1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/x86-xstate.h"
29 #include "nat/gdb_ptrace.h"
32 #include "nat/amd64-linux-siginfo.h"
35 #include "gdb_proc_service.h"
36 /* Don't include elf/common.h if linux/elf.h got included by
37 gdb_proc_service.h. */
39 #include "elf/common.h"
42 #include "gdbsupport/agent.h"
44 #include "tracepoint.h"
46 #include "nat/linux-nat.h"
47 #include "nat/x86-linux.h"
48 #include "nat/x86-linux-dregs.h"
49 #include "linux-x86-tdesc.h"
52 static target_desc_up tdesc_amd64_linux_no_xml
;
54 static target_desc_up tdesc_i386_linux_no_xml
;
57 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
58 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
60 /* Backward compatibility for gdb without XML support. */
62 static const char xmltarget_i386_linux_no_xml
[] = "@<target>\
63 <architecture>i386</architecture>\
64 <osabi>GNU/Linux</osabi>\
68 static const char xmltarget_amd64_linux_no_xml
[] = "@<target>\
69 <architecture>i386:x86-64</architecture>\
70 <osabi>GNU/Linux</osabi>\
75 #include <sys/procfs.h>
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Linux target op definitions for the x86 architecture.
97 This is initialized assuming an amd64 target.
98 'low_arch_setup' will correct it for i386 or amd64 targets. */
100 class x86_target
: public linux_process_target
104 const regs_info
*get_regs_info () override
;
106 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
108 bool supports_z_point_type (char z_type
) override
;
110 void process_qsupported (gdb::array_view
<const char * const> features
) override
;
112 bool supports_tracepoints () override
;
114 bool supports_fast_tracepoints () override
;
116 int install_fast_tracepoint_jump_pad
117 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
118 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
119 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
120 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
121 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
124 int get_min_fast_tracepoint_insn_len () override
;
126 struct emit_ops
*emit_ops () override
;
128 int get_ipa_tdesc_idx () override
;
132 void low_arch_setup () override
;
134 bool low_cannot_fetch_register (int regno
) override
;
136 bool low_cannot_store_register (int regno
) override
;
138 bool low_supports_breakpoints () override
;
140 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
142 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
144 int low_decr_pc_after_break () override
;
146 bool low_breakpoint_at (CORE_ADDR pc
) override
;
148 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
149 int size
, raw_breakpoint
*bp
) override
;
151 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
152 int size
, raw_breakpoint
*bp
) override
;
154 bool low_stopped_by_watchpoint () override
;
156 CORE_ADDR
low_stopped_data_address () override
;
158 /* collect_ptrace_register/supply_ptrace_register are not needed in the
159 native i386 case (no registers smaller than an xfer unit), and are not
160 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
162 /* Need to fix up i386 siginfo if host is amd64. */
163 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
164 int direction
) override
;
166 arch_process_info
*low_new_process () override
;
168 void low_delete_process (arch_process_info
*info
) override
;
170 void low_new_thread (lwp_info
*) override
;
172 void low_delete_thread (arch_lwp_info
*) override
;
174 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
176 void low_prepare_to_resume (lwp_info
*lwp
) override
;
178 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
180 bool low_supports_range_stepping () override
;
182 bool low_supports_catch_syscall () override
;
184 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
188 /* Update all the target description of all processes; a new GDB
189 connected, and it may or not support xml target descriptions. */
190 void update_xmltarget ();
193 /* The singleton target ops object. */
195 static x86_target the_x86_target
;
197 /* Per-process arch-specific data we want to keep. */
199 struct arch_process_info
201 struct x86_debug_reg_state debug_reg_state
;
206 /* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout.
208 Note that the transfer layout uses 64-bit regs. */
209 static /*const*/ int i386_regmap
[] =
211 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
212 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
213 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
214 DS
* 8, ES
* 8, FS
* 8, GS
* 8
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
219 /* So code below doesn't have to care, i386 or amd64. */
220 #define ORIG_EAX ORIG_RAX
223 static const int x86_64_regmap
[] =
225 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
226 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
227 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
228 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
229 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
230 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1, -1, -1, -1, -1, -1, -1, -1,
235 -1, -1, -1, -1, -1, -1, -1, -1,
238 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
239 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
252 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
253 #define X86_64_USER_REGS (GS + 1)
255 #else /* ! __x86_64__ */
257 /* Mapping between the general-purpose registers in `struct user'
258 format and GDB's register array layout. */
259 static /*const*/ int i386_regmap
[] =
261 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
262 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
263 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
264 DS
* 4, ES
* 4, FS
* 4, GS
* 4
267 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
275 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
278 is_64bit_tdesc (thread_info
*thread
)
280 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
282 return register_size (regcache
->tdesc
, 0) == 8;
288 /* Called by libthread_db. */
291 ps_get_thread_area (struct ps_prochandle
*ph
,
292 lwpid_t lwpid
, int idx
, void **base
)
295 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
296 gdb_assert (lwp
!= nullptr);
297 int use_64bit
= is_64bit_tdesc (get_lwp_thread (lwp
));
304 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
308 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
319 unsigned int desc
[4];
321 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
322 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base
= (void *) (uintptr_t) desc
[1];
331 /* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
337 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
339 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
340 gdb_assert (lwp
!= nullptr);
342 int use_64bit
= is_64bit_tdesc (get_lwp_thread (lwp
));
347 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
349 *addr
= (CORE_ADDR
) (uintptr_t) base
;
358 struct thread_info
*thr
= get_lwp_thread (lwp
);
359 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
360 unsigned int desc
[4];
362 const int reg_thread_area
= 3; /* bits to scale down register value. */
365 collect_register_by_name (regcache
, "gs", &gs
);
367 idx
= gs
>> reg_thread_area
;
369 if (ptrace (PTRACE_GET_THREAD_AREA
,
371 (void *) (long) idx
, (unsigned long) &desc
) < 0)
382 x86_target::low_cannot_store_register (int regno
)
385 if (is_64bit_tdesc (current_thread
))
389 return regno
>= I386_NUM_REGS
;
393 x86_target::low_cannot_fetch_register (int regno
)
396 if (is_64bit_tdesc (current_thread
))
400 return regno
>= I386_NUM_REGS
;
404 collect_register_i386 (struct regcache
*regcache
, int regno
, void *buf
)
406 collect_register (regcache
, regno
, buf
);
409 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
410 space reserved in buf for the register is 8 bytes. Make sure the entire
411 reserved space is initialized. */
413 gdb_assert (register_size (regcache
->tdesc
, regno
) == 4);
417 /* Sign extend EAX value to avoid potential syscall restart
420 See amd64_linux_collect_native_gregset() in
421 gdb/amd64-linux-nat.c for a detailed explanation. */
422 *(int64_t *) buf
= *(int32_t *) buf
;
427 *(uint64_t *) buf
= *(uint32_t *) buf
;
433 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
438 if (register_size (regcache
->tdesc
, 0) == 8)
440 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
441 if (x86_64_regmap
[i
] != -1)
442 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
448 for (i
= 0; i
< I386_NUM_REGS
; i
++)
449 collect_register_i386 (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
451 /* Handle ORIG_EAX, which is not in i386_regmap. */
452 collect_register_i386 (regcache
, find_regno (regcache
->tdesc
, "orig_eax"),
453 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
457 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
462 if (register_size (regcache
->tdesc
, 0) == 8)
464 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
465 if (x86_64_regmap
[i
] != -1)
466 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
472 for (i
= 0; i
< I386_NUM_REGS
; i
++)
473 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
475 supply_register_by_name (regcache
, "orig_eax",
476 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
480 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
483 i387_cache_to_fxsave (regcache
, buf
);
485 i387_cache_to_fsave (regcache
, buf
);
490 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
493 i387_fxsave_to_cache (regcache
, buf
);
495 i387_fsave_to_cache (regcache
, buf
);
502 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
504 i387_cache_to_fxsave (regcache
, buf
);
508 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
510 i387_fxsave_to_cache (regcache
, buf
);
516 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
518 i387_cache_to_xsave (regcache
, buf
);
522 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
524 i387_xsave_to_cache (regcache
, buf
);
527 /* ??? The non-biarch i386 case stores all the i387 regs twice.
528 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
529 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
530 doesn't work. IWBN to avoid the duplication in the case where it
531 does work. Maybe the arch_setup routine could check whether it works
532 and update the supported regsets accordingly. */
534 static struct regset_info x86_regsets
[] =
536 #ifdef HAVE_PTRACE_GETREGS
537 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
539 x86_fill_gregset
, x86_store_gregset
},
540 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
541 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
543 # ifdef HAVE_PTRACE_GETFPXREGS
544 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
546 x86_fill_fpxregset
, x86_store_fpxregset
},
549 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
551 x86_fill_fpregset
, x86_store_fpregset
},
552 #endif /* HAVE_PTRACE_GETREGS */
557 x86_target::low_supports_breakpoints ()
563 x86_target::low_get_pc (regcache
*regcache
)
565 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
571 collect_register_by_name (regcache
, "rip", &pc
);
572 return (CORE_ADDR
) pc
;
578 collect_register_by_name (regcache
, "eip", &pc
);
579 return (CORE_ADDR
) pc
;
584 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
586 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
592 supply_register_by_name (regcache
, "rip", &newpc
);
598 supply_register_by_name (regcache
, "eip", &newpc
);
603 x86_target::low_decr_pc_after_break ()
609 static const gdb_byte x86_breakpoint
[] = { 0xCC };
610 #define x86_breakpoint_len 1
613 x86_target::low_breakpoint_at (CORE_ADDR pc
)
617 read_memory (pc
, &c
, 1);
624 /* Low-level function vector. */
625 struct x86_dr_low_type x86_dr_low
=
627 x86_linux_dr_set_control
,
628 x86_linux_dr_set_addr
,
629 x86_linux_dr_get_addr
,
630 x86_linux_dr_get_status
,
631 x86_linux_dr_get_control
,
635 /* Breakpoint/Watchpoint support. */
638 x86_target::supports_z_point_type (char z_type
)
644 case Z_PACKET_WRITE_WP
:
645 case Z_PACKET_ACCESS_WP
:
653 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
654 int size
, raw_breakpoint
*bp
)
656 struct process_info
*proc
= current_process ();
660 case raw_bkpt_type_hw
:
661 case raw_bkpt_type_write_wp
:
662 case raw_bkpt_type_access_wp
:
664 enum target_hw_bp_type hw_type
665 = raw_bkpt_type_to_target_hw_bp_type (type
);
666 struct x86_debug_reg_state
*state
667 = &proc
->priv
->arch_private
->debug_reg_state
;
669 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
679 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
680 int size
, raw_breakpoint
*bp
)
682 struct process_info
*proc
= current_process ();
686 case raw_bkpt_type_hw
:
687 case raw_bkpt_type_write_wp
:
688 case raw_bkpt_type_access_wp
:
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type
);
692 struct x86_debug_reg_state
*state
693 = &proc
->priv
->arch_private
->debug_reg_state
;
695 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
704 x86_target::low_stopped_by_watchpoint ()
706 struct process_info
*proc
= current_process ();
707 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
711 x86_target::low_stopped_data_address ()
713 struct process_info
*proc
= current_process ();
715 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
721 /* Called when a new process is created. */
724 x86_target::low_new_process ()
726 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
728 x86_low_init_dregs (&info
->debug_reg_state
);
733 /* Called when a process is being deleted. */
736 x86_target::low_delete_process (arch_process_info
*info
)
742 x86_target::low_new_thread (lwp_info
*lwp
)
744 /* This comes from nat/. */
745 x86_linux_new_thread (lwp
);
749 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
751 /* This comes from nat/. */
752 x86_linux_delete_thread (alwp
);
755 /* Target routine for new_fork. */
758 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
760 /* These are allocated by linux_add_process. */
761 gdb_assert (parent
->priv
!= NULL
762 && parent
->priv
->arch_private
!= NULL
);
763 gdb_assert (child
->priv
!= NULL
764 && child
->priv
->arch_private
!= NULL
);
766 /* Linux kernel before 2.6.33 commit
767 72f674d203cd230426437cdcf7dd6f681dad8b0d
768 will inherit hardware debug registers from parent
769 on fork/vfork/clone. Newer Linux kernels create such tasks with
770 zeroed debug registers.
772 GDB core assumes the child inherits the watchpoints/hw
773 breakpoints of the parent, and will remove them all from the
774 forked off process. Copy the debug registers mirrors into the
775 new process so that all breakpoints and watchpoints can be
776 removed together. The debug registers mirror will become zeroed
777 in the end before detaching the forked off process, thus making
778 this compatible with older Linux kernels too. */
780 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
784 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
786 /* This comes from nat/. */
787 x86_linux_prepare_to_resume (lwp
);
790 /* See nat/x86-dregs.h. */
792 struct x86_debug_reg_state
*
793 x86_debug_reg_state (pid_t pid
)
795 struct process_info
*proc
= find_process_pid (pid
);
797 return &proc
->priv
->arch_private
->debug_reg_state
;
800 /* When GDBSERVER is built as a 64-bit application on linux, the
801 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
802 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
803 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
804 conversion in-place ourselves. */
806 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
807 layout of the inferiors' architecture. Returns true if any
808 conversion was done; false otherwise. If DIRECTION is 1, then copy
809 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
813 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
816 unsigned int machine
;
817 int tid
= lwpid_of (current_thread
);
818 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
820 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
821 if (!is_64bit_tdesc (current_thread
))
822 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
824 /* No fixup for native x32 GDB. */
825 else if (!is_elf64
&& sizeof (void *) == 8)
826 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
835 /* Format of XSAVE extended state is:
839 sw_usable_bytes[464..511]
840 xstate_hdr_bytes[512..575]
845 Same memory layout will be used for the coredump NT_X86_XSTATE
846 representing the XSAVE extended state registers.
848 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
849 extended state mask, which is the same as the extended control register
850 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
851 together with the mask saved in the xstate_hdr_bytes to determine what
852 states the processor/OS supports and what state, used or initialized,
853 the process/thread is in. */
854 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
856 /* Does the current host support the GETFPXREGS request? The header
857 file may or may not define it, and even if it is defined, the
858 kernel will return EIO if it's running on a pre-SSE processor. */
859 int have_ptrace_getfpxregs
=
860 #ifdef HAVE_PTRACE_GETFPXREGS
867 /* Get Linux/x86 target description from running target. */
869 static const struct target_desc
*
870 x86_linux_read_description (void)
872 unsigned int machine
;
876 static uint64_t xcr0
;
877 static int xsave_len
;
878 struct regset_info
*regset
;
880 tid
= lwpid_of (current_thread
);
882 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
884 if (sizeof (void *) == 4)
887 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
889 else if (machine
== EM_X86_64
)
890 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
894 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
895 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
897 elf_fpxregset_t fpxregs
;
899 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
901 have_ptrace_getfpxregs
= 0;
902 have_ptrace_getregset
= 0;
903 return i386_linux_read_description (X86_XSTATE_X87
);
906 have_ptrace_getfpxregs
= 1;
914 if (machine
== EM_X86_64
)
915 return tdesc_amd64_linux_no_xml
.get ();
918 return tdesc_i386_linux_no_xml
.get ();
921 if (have_ptrace_getregset
== -1)
923 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
926 iov
.iov_base
= xstateregs
;
927 iov
.iov_len
= sizeof (xstateregs
);
929 /* Check if PTRACE_GETREGSET works. */
930 if (ptrace (PTRACE_GETREGSET
, tid
,
931 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
932 have_ptrace_getregset
= 0;
935 have_ptrace_getregset
= 1;
937 /* Get XCR0 from XSAVE extended state. */
938 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
939 / sizeof (uint64_t))];
941 xsave_len
= x86_xsave_length ();
943 /* Use PTRACE_GETREGSET if it is available. */
944 for (regset
= x86_regsets
;
945 regset
->fill_function
!= NULL
; regset
++)
946 if (regset
->get_request
== PTRACE_GETREGSET
)
947 regset
->size
= xsave_len
;
948 else if (regset
->type
!= GENERAL_REGS
)
953 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
954 xcr0_features
= (have_ptrace_getregset
955 && (xcr0
& X86_XSTATE_ALL_MASK
));
958 i387_set_xsave_mask (xcr0
, xsave_len
);
960 if (machine
== EM_X86_64
)
963 const target_desc
*tdesc
= NULL
;
967 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
972 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
978 const target_desc
*tdesc
= NULL
;
981 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
984 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
989 gdb_assert_not_reached ("failed to return tdesc");
992 /* Update all the target description of all processes; a new GDB
993 connected, and it may or not support xml target descriptions. */
996 x86_target::update_xmltarget ()
998 scoped_restore_current_thread restore_thread
;
1000 /* Before changing the register cache's internal layout, flush the
1001 contents of the current valid caches back to the threads, and
1002 release the current regcache objects. */
1003 regcache_release ();
1005 for_each_process ([this] (process_info
*proc
) {
1006 int pid
= proc
->pid
;
1008 /* Look up any thread of this process. */
1009 switch_to_thread (find_any_thread_of_pid (pid
));
1015 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1016 PTRACE_GETREGSET. */
1019 x86_target::process_qsupported (gdb::array_view
<const char * const> features
)
1021 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1022 with "i386" in qSupported query, it supports x86 XML target
1026 for (const char *feature
: features
)
1028 if (startswith (feature
, "xmlRegisters="))
1030 char *copy
= xstrdup (feature
+ 13);
1033 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1035 p
= strtok_r (NULL
, ",", &saveptr
))
1037 if (strcmp (p
, "i386") == 0)
1048 update_xmltarget ();
1051 /* Common for x86/x86-64. */
1053 static struct regsets_info x86_regsets_info
=
1055 x86_regsets
, /* regsets */
1056 0, /* num_regsets */
1057 NULL
, /* disabled_regsets */
1061 static struct regs_info amd64_linux_regs_info
=
1063 NULL
, /* regset_bitmap */
1064 NULL
, /* usrregs_info */
1068 static struct usrregs_info i386_linux_usrregs_info
=
1074 static struct regs_info i386_linux_regs_info
=
1076 NULL
, /* regset_bitmap */
1077 &i386_linux_usrregs_info
,
1082 x86_target::get_regs_info ()
1085 if (is_64bit_tdesc (current_thread
))
1086 return &amd64_linux_regs_info
;
1089 return &i386_linux_regs_info
;
1092 /* Initialize the target description for the architecture of the
1096 x86_target::low_arch_setup ()
1098 current_process ()->tdesc
= x86_linux_read_description ();
1102 x86_target::low_supports_catch_syscall ()
1107 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1108 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1111 x86_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1113 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1119 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1120 *sysno
= (int) l_sysno
;
1123 collect_register_by_name (regcache
, "orig_eax", sysno
);
1127 x86_target::supports_tracepoints ()
1133 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1135 target_write_memory (*to
, buf
, len
);
1140 push_opcode (unsigned char *buf
, const char *op
)
1142 unsigned char *buf_org
= buf
;
1147 unsigned long ul
= strtoul (op
, &endptr
, 16);
1156 return buf
- buf_org
;
1161 /* Build a jump pad that saves registers and calls a collection
1162 function. Writes a jump instruction to the jump pad to
1163 JJUMPAD_INSN. The caller is responsible to write it in at the
1164 tracepoint address. */
1167 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1168 CORE_ADDR collector
,
1171 CORE_ADDR
*jump_entry
,
1172 CORE_ADDR
*trampoline
,
1173 ULONGEST
*trampoline_size
,
1174 unsigned char *jjump_pad_insn
,
1175 ULONGEST
*jjump_pad_insn_size
,
1176 CORE_ADDR
*adjusted_insn_addr
,
1177 CORE_ADDR
*adjusted_insn_addr_end
,
1180 unsigned char buf
[40];
1184 CORE_ADDR buildaddr
= *jump_entry
;
1186 /* Build the jump pad. */
1188 /* First, do tracepoint data collection. Save registers. */
1190 /* Need to ensure stack pointer saved first. */
1191 buf
[i
++] = 0x54; /* push %rsp */
1192 buf
[i
++] = 0x55; /* push %rbp */
1193 buf
[i
++] = 0x57; /* push %rdi */
1194 buf
[i
++] = 0x56; /* push %rsi */
1195 buf
[i
++] = 0x52; /* push %rdx */
1196 buf
[i
++] = 0x51; /* push %rcx */
1197 buf
[i
++] = 0x53; /* push %rbx */
1198 buf
[i
++] = 0x50; /* push %rax */
1199 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1200 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1201 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1207 buf
[i
++] = 0x9c; /* pushfq */
1208 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1210 memcpy (buf
+ i
, &tpaddr
, 8);
1212 buf
[i
++] = 0x57; /* push %rdi */
1213 append_insns (&buildaddr
, i
, buf
);
1215 /* Stack space for the collecting_t object. */
1217 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1218 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1219 memcpy (buf
+ i
, &tpoint
, 8);
1221 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1222 i
+= push_opcode (&buf
[i
],
1223 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1224 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1225 append_insns (&buildaddr
, i
, buf
);
1229 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1230 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1232 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1233 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1234 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1235 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1236 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1237 append_insns (&buildaddr
, i
, buf
);
1239 /* Set up the gdb_collect call. */
1240 /* At this point, (stack pointer + 0x18) is the base of our saved
1244 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1245 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1247 /* tpoint address may be 64-bit wide. */
1248 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1249 memcpy (buf
+ i
, &tpoint
, 8);
1251 append_insns (&buildaddr
, i
, buf
);
1253 /* The collector function being in the shared library, may be
1254 >31-bits away off the jump pad. */
1256 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1257 memcpy (buf
+ i
, &collector
, 8);
1259 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1260 append_insns (&buildaddr
, i
, buf
);
1262 /* Clear the spin-lock. */
1264 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1265 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1266 memcpy (buf
+ i
, &lockaddr
, 8);
1268 append_insns (&buildaddr
, i
, buf
);
1270 /* Remove stack that had been used for the collect_t object. */
1272 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1273 append_insns (&buildaddr
, i
, buf
);
1275 /* Restore register state. */
1277 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1281 buf
[i
++] = 0x9d; /* popfq */
1282 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1283 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1284 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1285 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1286 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1287 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1290 buf
[i
++] = 0x58; /* pop %rax */
1291 buf
[i
++] = 0x5b; /* pop %rbx */
1292 buf
[i
++] = 0x59; /* pop %rcx */
1293 buf
[i
++] = 0x5a; /* pop %rdx */
1294 buf
[i
++] = 0x5e; /* pop %rsi */
1295 buf
[i
++] = 0x5f; /* pop %rdi */
1296 buf
[i
++] = 0x5d; /* pop %rbp */
1297 buf
[i
++] = 0x5c; /* pop %rsp */
1298 append_insns (&buildaddr
, i
, buf
);
1300 /* Now, adjust the original instruction to execute in the jump
1302 *adjusted_insn_addr
= buildaddr
;
1303 relocate_instruction (&buildaddr
, tpaddr
);
1304 *adjusted_insn_addr_end
= buildaddr
;
1306 /* Finally, write a jump back to the program. */
1308 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1309 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1312 "E.Jump back from jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64
" > int32).", loffset
);
1317 offset
= (int) loffset
;
1318 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1319 memcpy (buf
+ 1, &offset
, 4);
1320 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1322 /* The jump pad is now built. Wire in a jump to our jump pad. This
1323 is always done last (by our caller actually), so that we can
1324 install fast tracepoints with threads running. This relies on
1325 the agent's atomic write support. */
1326 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1327 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1330 "E.Jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64
" > int32).", loffset
);
1335 offset
= (int) loffset
;
1337 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1338 memcpy (buf
+ 1, &offset
, 4);
1339 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1340 *jjump_pad_insn_size
= sizeof (jump_insn
);
1342 /* Return the end address of our pad. */
1343 *jump_entry
= buildaddr
;
1348 #endif /* __x86_64__ */
1350 /* Build a jump pad that saves registers and calls a collection
1351 function. Writes a jump instruction to the jump pad to
1352 JJUMPAD_INSN. The caller is responsible to write it in at the
1353 tracepoint address. */
1356 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1357 CORE_ADDR collector
,
1360 CORE_ADDR
*jump_entry
,
1361 CORE_ADDR
*trampoline
,
1362 ULONGEST
*trampoline_size
,
1363 unsigned char *jjump_pad_insn
,
1364 ULONGEST
*jjump_pad_insn_size
,
1365 CORE_ADDR
*adjusted_insn_addr
,
1366 CORE_ADDR
*adjusted_insn_addr_end
,
1369 unsigned char buf
[0x100];
1371 CORE_ADDR buildaddr
= *jump_entry
;
1373 /* Build the jump pad. */
1375 /* First, do tracepoint data collection. Save registers. */
1377 buf
[i
++] = 0x60; /* pushad */
1378 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1379 *((int *)(buf
+ i
)) = (int) tpaddr
;
1381 buf
[i
++] = 0x9c; /* pushf */
1382 buf
[i
++] = 0x1e; /* push %ds */
1383 buf
[i
++] = 0x06; /* push %es */
1384 buf
[i
++] = 0x0f; /* push %fs */
1386 buf
[i
++] = 0x0f; /* push %gs */
1388 buf
[i
++] = 0x16; /* push %ss */
1389 buf
[i
++] = 0x0e; /* push %cs */
1390 append_insns (&buildaddr
, i
, buf
);
1392 /* Stack space for the collecting_t object. */
1394 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1396 /* Build the object. */
1397 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1398 memcpy (buf
+ i
, &tpoint
, 4);
1400 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1402 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1403 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1404 append_insns (&buildaddr
, i
, buf
);
1406 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1407 If we cared for it, this could be using xchg alternatively. */
1410 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1411 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1413 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1415 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1416 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1417 append_insns (&buildaddr
, i
, buf
);
1420 /* Set up arguments to the gdb_collect call. */
1422 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1423 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1424 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1425 append_insns (&buildaddr
, i
, buf
);
1428 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1429 append_insns (&buildaddr
, i
, buf
);
1432 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1433 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1435 append_insns (&buildaddr
, i
, buf
);
1437 buf
[0] = 0xe8; /* call <reladdr> */
1438 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1439 memcpy (buf
+ 1, &offset
, 4);
1440 append_insns (&buildaddr
, 5, buf
);
1441 /* Clean up after the call. */
1442 buf
[0] = 0x83; /* add $0x8,%esp */
1445 append_insns (&buildaddr
, 3, buf
);
1448 /* Clear the spin-lock. This would need the LOCK prefix on older
1451 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1452 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1453 memcpy (buf
+ i
, &lockaddr
, 4);
1455 append_insns (&buildaddr
, i
, buf
);
1458 /* Remove stack that had been used for the collect_t object. */
1460 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1461 append_insns (&buildaddr
, i
, buf
);
1464 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1467 buf
[i
++] = 0x17; /* pop %ss */
1468 buf
[i
++] = 0x0f; /* pop %gs */
1470 buf
[i
++] = 0x0f; /* pop %fs */
1472 buf
[i
++] = 0x07; /* pop %es */
1473 buf
[i
++] = 0x1f; /* pop %ds */
1474 buf
[i
++] = 0x9d; /* popf */
1475 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1478 buf
[i
++] = 0x61; /* popad */
1479 append_insns (&buildaddr
, i
, buf
);
1481 /* Now, adjust the original instruction to execute in the jump
1483 *adjusted_insn_addr
= buildaddr
;
1484 relocate_instruction (&buildaddr
, tpaddr
);
1485 *adjusted_insn_addr_end
= buildaddr
;
1487 /* Write the jump back to the program. */
1488 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1489 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1490 memcpy (buf
+ 1, &offset
, 4);
1491 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1493 /* The jump pad is now built. Wire in a jump to our jump pad. This
1494 is always done last (by our caller actually), so that we can
1495 install fast tracepoints with threads running. This relies on
1496 the agent's atomic write support. */
1499 /* Create a trampoline. */
1500 *trampoline_size
= sizeof (jump_insn
);
1501 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1503 /* No trampoline space available. */
1505 "E.Cannot allocate trampoline space needed for fast "
1506 "tracepoints on 4-byte instructions.");
1510 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1511 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1512 memcpy (buf
+ 1, &offset
, 4);
1513 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1515 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1516 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1517 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1518 memcpy (buf
+ 2, &offset
, 2);
1519 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1520 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1524 /* Else use a 32-bit relative jump instruction. */
1525 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1526 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1527 memcpy (buf
+ 1, &offset
, 4);
1528 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1529 *jjump_pad_insn_size
= sizeof (jump_insn
);
1532 /* Return the end address of our pad. */
1533 *jump_entry
= buildaddr
;
1539 x86_target::supports_fast_tracepoints ()
1545 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1547 CORE_ADDR collector
,
1550 CORE_ADDR
*jump_entry
,
1551 CORE_ADDR
*trampoline
,
1552 ULONGEST
*trampoline_size
,
1553 unsigned char *jjump_pad_insn
,
1554 ULONGEST
*jjump_pad_insn_size
,
1555 CORE_ADDR
*adjusted_insn_addr
,
1556 CORE_ADDR
*adjusted_insn_addr_end
,
1560 if (is_64bit_tdesc (current_thread
))
1561 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1562 collector
, lockaddr
,
1563 orig_size
, jump_entry
,
1564 trampoline
, trampoline_size
,
1566 jjump_pad_insn_size
,
1568 adjusted_insn_addr_end
,
1572 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1573 collector
, lockaddr
,
1574 orig_size
, jump_entry
,
1575 trampoline
, trampoline_size
,
1577 jjump_pad_insn_size
,
1579 adjusted_insn_addr_end
,
1583 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1587 x86_target::get_min_fast_tracepoint_insn_len ()
1589 static int warned_about_fast_tracepoints
= 0;
1592 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1593 used for fast tracepoints. */
1594 if (is_64bit_tdesc (current_thread
))
1598 if (agent_loaded_p ())
1600 char errbuf
[IPA_BUFSIZ
];
1604 /* On x86, if trampolines are available, then 4-byte jump instructions
1605 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1606 with a 4-byte offset are used instead. */
1607 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1611 /* GDB has no channel to explain to user why a shorter fast
1612 tracepoint is not possible, but at least make GDBserver
1613 mention that something has gone awry. */
1614 if (!warned_about_fast_tracepoints
)
1616 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1617 warned_about_fast_tracepoints
= 1;
1624 /* Indicate that the minimum length is currently unknown since the IPA
1625 has not loaded yet. */
1631 add_insns (unsigned char *start
, int len
)
1633 CORE_ADDR buildaddr
= current_insn_ptr
;
1635 threads_debug_printf ("Adding %d bytes of insn at %s",
1636 len
, paddress (buildaddr
));
1638 append_insns (&buildaddr
, len
, start
);
1639 current_insn_ptr
= buildaddr
;
1642 /* Our general strategy for emitting code is to avoid specifying raw
1643 bytes whenever possible, and instead copy a block of inline asm
1644 that is embedded in the function. This is a little messy, because
1645 we need to keep the compiler from discarding what looks like dead
1646 code, plus suppress various warnings. */
1648 #define EMIT_ASM(NAME, INSNS) \
1651 extern unsigned char start_ ## NAME, end_ ## NAME; \
1652 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1653 __asm__ ("jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":" \
1656 "\t" "end_" #NAME ":"); \
1661 #define EMIT_ASM32(NAME,INSNS) \
1664 extern unsigned char start_ ## NAME, end_ ## NAME; \
1665 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1666 __asm__ (".code32\n" \
1667 "\t" "jmp end_" #NAME "\n" \
1668 "\t" "start_" #NAME ":\n" \
1670 "\t" "end_" #NAME ":\n" \
1676 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1683 amd64_emit_prologue (void)
1685 EMIT_ASM (amd64_prologue
,
1687 "movq %rsp,%rbp\n\t"
1688 "sub $0x20,%rsp\n\t"
1689 "movq %rdi,-8(%rbp)\n\t"
1690 "movq %rsi,-16(%rbp)");
1695 amd64_emit_epilogue (void)
1697 EMIT_ASM (amd64_epilogue
,
1698 "movq -16(%rbp),%rdi\n\t"
1699 "movq %rax,(%rdi)\n\t"
1706 amd64_emit_add (void)
1708 EMIT_ASM (amd64_add
,
1709 "add (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1714 amd64_emit_sub (void)
1716 EMIT_ASM (amd64_sub
,
1717 "sub %rax,(%rsp)\n\t"
1722 amd64_emit_mul (void)
1728 amd64_emit_lsh (void)
1734 amd64_emit_rsh_signed (void)
1740 amd64_emit_rsh_unsigned (void)
1746 amd64_emit_ext (int arg
)
1751 EMIT_ASM (amd64_ext_8
,
1757 EMIT_ASM (amd64_ext_16
,
1762 EMIT_ASM (amd64_ext_32
,
1771 amd64_emit_log_not (void)
1773 EMIT_ASM (amd64_log_not
,
1774 "test %rax,%rax\n\t"
1780 amd64_emit_bit_and (void)
1782 EMIT_ASM (amd64_and
,
1783 "and (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1788 amd64_emit_bit_or (void)
1791 "or (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1796 amd64_emit_bit_xor (void)
1798 EMIT_ASM (amd64_xor
,
1799 "xor (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1804 amd64_emit_bit_not (void)
1806 EMIT_ASM (amd64_bit_not
,
1807 "xorq $0xffffffffffffffff,%rax");
1811 amd64_emit_equal (void)
1813 EMIT_ASM (amd64_equal
,
1814 "cmp %rax,(%rsp)\n\t"
1815 "je .Lamd64_equal_true\n\t"
1817 "jmp .Lamd64_equal_end\n\t"
1818 ".Lamd64_equal_true:\n\t"
1820 ".Lamd64_equal_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1825 amd64_emit_less_signed (void)
1827 EMIT_ASM (amd64_less_signed
,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jl .Lamd64_less_signed_true\n\t"
1831 "jmp .Lamd64_less_signed_end\n\t"
1832 ".Lamd64_less_signed_true:\n\t"
1834 ".Lamd64_less_signed_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1839 amd64_emit_less_unsigned (void)
1841 EMIT_ASM (amd64_less_unsigned
,
1842 "cmp %rax,(%rsp)\n\t"
1843 "jb .Lamd64_less_unsigned_true\n\t"
1845 "jmp .Lamd64_less_unsigned_end\n\t"
1846 ".Lamd64_less_unsigned_true:\n\t"
1848 ".Lamd64_less_unsigned_end:\n\t"
1849 "lea 0x8(%rsp),%rsp");
1853 amd64_emit_ref (int size
)
1858 EMIT_ASM (amd64_ref1
,
1862 EMIT_ASM (amd64_ref2
,
1866 EMIT_ASM (amd64_ref4
,
1867 "movl (%rax),%eax");
1870 EMIT_ASM (amd64_ref8
,
1871 "movq (%rax),%rax");
1877 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1879 EMIT_ASM (amd64_if_goto
,
1883 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1891 amd64_emit_goto (int *offset_p
, int *size_p
)
1893 EMIT_ASM (amd64_goto
,
1894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1902 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1904 int diff
= (to
- (from
+ size
));
1905 unsigned char buf
[sizeof (int)];
1913 memcpy (buf
, &diff
, sizeof (int));
1914 target_write_memory (from
, buf
, sizeof (int));
1918 amd64_emit_const (LONGEST num
)
1920 unsigned char buf
[16];
1922 CORE_ADDR buildaddr
= current_insn_ptr
;
1925 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1926 memcpy (&buf
[i
], &num
, sizeof (num
));
1928 append_insns (&buildaddr
, i
, buf
);
1929 current_insn_ptr
= buildaddr
;
1933 amd64_emit_call (CORE_ADDR fn
)
1935 unsigned char buf
[16];
1937 CORE_ADDR buildaddr
;
1940 /* The destination function being in the shared library, may be
1941 >31-bits away off the compiled code pad. */
1943 buildaddr
= current_insn_ptr
;
1945 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1949 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1951 /* Offset is too large for a call. Use callq, but that requires
1952 a register, so avoid it if possible. Use r10, since it is
1953 call-clobbered, we don't have to push/pop it. */
1954 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1956 memcpy (buf
+ i
, &fn
, 8);
1958 buf
[i
++] = 0xff; /* callq *%r10 */
1963 int offset32
= offset64
; /* we know we can't overflow here. */
1965 buf
[i
++] = 0xe8; /* call <reladdr> */
1966 memcpy (buf
+ i
, &offset32
, 4);
1970 append_insns (&buildaddr
, i
, buf
);
1971 current_insn_ptr
= buildaddr
;
1975 amd64_emit_reg (int reg
)
1977 unsigned char buf
[16];
1979 CORE_ADDR buildaddr
;
1981 /* Assume raw_regs is still in %rdi. */
1982 buildaddr
= current_insn_ptr
;
1984 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1985 memcpy (&buf
[i
], ®
, sizeof (reg
));
1987 append_insns (&buildaddr
, i
, buf
);
1988 current_insn_ptr
= buildaddr
;
1989 amd64_emit_call (get_raw_reg_func_addr ());
1993 amd64_emit_pop (void)
1995 EMIT_ASM (amd64_pop
,
2000 amd64_emit_stack_flush (void)
2002 EMIT_ASM (amd64_stack_flush
,
2007 amd64_emit_zero_ext (int arg
)
2012 EMIT_ASM (amd64_zero_ext_8
,
2016 EMIT_ASM (amd64_zero_ext_16
,
2017 "and $0xffff,%rax");
2020 EMIT_ASM (amd64_zero_ext_32
,
2021 "mov $0xffffffff,%rcx\n\t"
2030 amd64_emit_swap (void)
2032 EMIT_ASM (amd64_swap
,
2039 amd64_emit_stack_adjust (int n
)
2041 unsigned char buf
[16];
2043 CORE_ADDR buildaddr
= current_insn_ptr
;
2046 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2050 /* This only handles adjustments up to 16, but we don't expect any more. */
2052 append_insns (&buildaddr
, i
, buf
);
2053 current_insn_ptr
= buildaddr
;
2056 /* FN's prototype is `LONGEST(*fn)(int)'. */
2059 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2061 unsigned char buf
[16];
2063 CORE_ADDR buildaddr
;
2065 buildaddr
= current_insn_ptr
;
2067 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2068 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2070 append_insns (&buildaddr
, i
, buf
);
2071 current_insn_ptr
= buildaddr
;
2072 amd64_emit_call (fn
);
2075 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2078 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2080 unsigned char buf
[16];
2082 CORE_ADDR buildaddr
;
2084 buildaddr
= current_insn_ptr
;
2086 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2087 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2089 append_insns (&buildaddr
, i
, buf
);
2090 current_insn_ptr
= buildaddr
;
2091 EMIT_ASM (amd64_void_call_2_a
,
2092 /* Save away a copy of the stack top. */
2094 /* Also pass top as the second argument. */
2096 amd64_emit_call (fn
);
2097 EMIT_ASM (amd64_void_call_2_b
,
2098 /* Restore the stack top, %rax may have been trashed. */
2103 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2106 "cmp %rax,(%rsp)\n\t"
2107 "jne .Lamd64_eq_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_eq_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2123 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2126 "cmp %rax,(%rsp)\n\t"
2127 "je .Lamd64_ne_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_ne_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2143 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnl .Lamd64_lt_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_lt_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2163 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2166 "cmp %rax,(%rsp)\n\t"
2167 "jnle .Lamd64_le_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_le_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2183 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2186 "cmp %rax,(%rsp)\n\t"
2187 "jng .Lamd64_gt_fallthru\n\t"
2188 "lea 0x8(%rsp),%rsp\n\t"
2190 /* jmp, but don't trust the assembler to choose the right jump */
2191 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2192 ".Lamd64_gt_fallthru:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2203 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2206 "cmp %rax,(%rsp)\n\t"
2207 "jnge .Lamd64_ge_fallthru\n\t"
2208 ".Lamd64_ge_jump:\n\t"
2209 "lea 0x8(%rsp),%rsp\n\t"
2211 /* jmp, but don't trust the assembler to choose the right jump */
2212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2213 ".Lamd64_ge_fallthru:\n\t"
2214 "lea 0x8(%rsp),%rsp\n\t"
2223 static emit_ops amd64_emit_ops
=
2225 amd64_emit_prologue
,
2226 amd64_emit_epilogue
,
2231 amd64_emit_rsh_signed
,
2232 amd64_emit_rsh_unsigned
,
2240 amd64_emit_less_signed
,
2241 amd64_emit_less_unsigned
,
2245 amd64_write_goto_address
,
2250 amd64_emit_stack_flush
,
2251 amd64_emit_zero_ext
,
2253 amd64_emit_stack_adjust
,
2254 amd64_emit_int_call_1
,
2255 amd64_emit_void_call_2
,
2264 #endif /* __x86_64__ */
2267 i386_emit_prologue (void)
2269 EMIT_ASM32 (i386_prologue
,
2273 /* At this point, the raw regs base address is at 8(%ebp), and the
2274 value pointer is at 12(%ebp). */
2278 i386_emit_epilogue (void)
2280 EMIT_ASM32 (i386_epilogue
,
2281 "mov 12(%ebp),%ecx\n\t"
2282 "mov %eax,(%ecx)\n\t"
2283 "mov %ebx,0x4(%ecx)\n\t"
2291 i386_emit_add (void)
2293 EMIT_ASM32 (i386_add
,
2294 "add (%esp),%eax\n\t"
2295 "adc 0x4(%esp),%ebx\n\t"
2296 "lea 0x8(%esp),%esp");
2300 i386_emit_sub (void)
2302 EMIT_ASM32 (i386_sub
,
2303 "subl %eax,(%esp)\n\t"
2304 "sbbl %ebx,4(%esp)\n\t"
2310 i386_emit_mul (void)
2316 i386_emit_lsh (void)
2322 i386_emit_rsh_signed (void)
2328 i386_emit_rsh_unsigned (void)
2334 i386_emit_ext (int arg
)
2339 EMIT_ASM32 (i386_ext_8
,
2342 "movl %eax,%ebx\n\t"
2346 EMIT_ASM32 (i386_ext_16
,
2348 "movl %eax,%ebx\n\t"
2352 EMIT_ASM32 (i386_ext_32
,
2353 "movl %eax,%ebx\n\t"
2362 i386_emit_log_not (void)
2364 EMIT_ASM32 (i386_log_not
,
2366 "test %eax,%eax\n\t"
2373 i386_emit_bit_and (void)
2375 EMIT_ASM32 (i386_and
,
2376 "and (%esp),%eax\n\t"
2377 "and 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2382 i386_emit_bit_or (void)
2384 EMIT_ASM32 (i386_or
,
2385 "or (%esp),%eax\n\t"
2386 "or 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2391 i386_emit_bit_xor (void)
2393 EMIT_ASM32 (i386_xor
,
2394 "xor (%esp),%eax\n\t"
2395 "xor 0x4(%esp),%ebx\n\t"
2396 "lea 0x8(%esp),%esp");
2400 i386_emit_bit_not (void)
2402 EMIT_ASM32 (i386_bit_not
,
2403 "xor $0xffffffff,%eax\n\t"
2404 "xor $0xffffffff,%ebx\n\t");
2408 i386_emit_equal (void)
2410 EMIT_ASM32 (i386_equal
,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jne .Li386_equal_false\n\t"
2413 "cmpl %eax,(%esp)\n\t"
2414 "je .Li386_equal_true\n\t"
2415 ".Li386_equal_false:\n\t"
2417 "jmp .Li386_equal_end\n\t"
2418 ".Li386_equal_true:\n\t"
2420 ".Li386_equal_end:\n\t"
2422 "lea 0x8(%esp),%esp");
2426 i386_emit_less_signed (void)
2428 EMIT_ASM32 (i386_less_signed
,
2429 "cmpl %ebx,4(%esp)\n\t"
2430 "jl .Li386_less_signed_true\n\t"
2431 "jne .Li386_less_signed_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 ".Li386_less_signed_false:\n\t"
2436 "jmp .Li386_less_signed_end\n\t"
2437 ".Li386_less_signed_true:\n\t"
2439 ".Li386_less_signed_end:\n\t"
2441 "lea 0x8(%esp),%esp");
2445 i386_emit_less_unsigned (void)
2447 EMIT_ASM32 (i386_less_unsigned
,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jb .Li386_less_unsigned_true\n\t"
2450 "jne .Li386_less_unsigned_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 ".Li386_less_unsigned_false:\n\t"
2455 "jmp .Li386_less_unsigned_end\n\t"
2456 ".Li386_less_unsigned_true:\n\t"
2458 ".Li386_less_unsigned_end:\n\t"
2460 "lea 0x8(%esp),%esp");
2464 i386_emit_ref (int size
)
2469 EMIT_ASM32 (i386_ref1
,
2473 EMIT_ASM32 (i386_ref2
,
2477 EMIT_ASM32 (i386_ref4
,
2478 "movl (%eax),%eax");
2481 EMIT_ASM32 (i386_ref8
,
2482 "movl 4(%eax),%ebx\n\t"
2483 "movl (%eax),%eax");
2489 i386_emit_if_goto (int *offset_p
, int *size_p
)
2491 EMIT_ASM32 (i386_if_goto
,
2497 /* Don't trust the assembler to choose the right jump */
2498 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2501 *offset_p
= 11; /* be sure that this matches the sequence above */
2507 i386_emit_goto (int *offset_p
, int *size_p
)
2509 EMIT_ASM32 (i386_goto
,
2510 /* Don't trust the assembler to choose the right jump */
2511 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2519 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2521 int diff
= (to
- (from
+ size
));
2522 unsigned char buf
[sizeof (int)];
2524 /* We're only doing 4-byte sizes at the moment. */
2531 memcpy (buf
, &diff
, sizeof (int));
2532 target_write_memory (from
, buf
, sizeof (int));
2536 i386_emit_const (LONGEST num
)
2538 unsigned char buf
[16];
2540 CORE_ADDR buildaddr
= current_insn_ptr
;
2543 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2544 lo
= num
& 0xffffffff;
2545 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2547 hi
= ((num
>> 32) & 0xffffffff);
2550 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2551 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2556 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2558 append_insns (&buildaddr
, i
, buf
);
2559 current_insn_ptr
= buildaddr
;
2563 i386_emit_call (CORE_ADDR fn
)
2565 unsigned char buf
[16];
2567 CORE_ADDR buildaddr
;
2569 buildaddr
= current_insn_ptr
;
2571 buf
[i
++] = 0xe8; /* call <reladdr> */
2572 offset
= ((int) fn
) - (buildaddr
+ 5);
2573 memcpy (buf
+ 1, &offset
, 4);
2574 append_insns (&buildaddr
, 5, buf
);
2575 current_insn_ptr
= buildaddr
;
2579 i386_emit_reg (int reg
)
2581 unsigned char buf
[16];
2583 CORE_ADDR buildaddr
;
2585 EMIT_ASM32 (i386_reg_a
,
2587 buildaddr
= current_insn_ptr
;
2589 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2590 memcpy (&buf
[i
], ®
, sizeof (reg
));
2592 append_insns (&buildaddr
, i
, buf
);
2593 current_insn_ptr
= buildaddr
;
2594 EMIT_ASM32 (i386_reg_b
,
2595 "mov %eax,4(%esp)\n\t"
2596 "mov 8(%ebp),%eax\n\t"
2598 i386_emit_call (get_raw_reg_func_addr ());
2599 EMIT_ASM32 (i386_reg_c
,
2601 "lea 0x8(%esp),%esp");
2605 i386_emit_pop (void)
2607 EMIT_ASM32 (i386_pop
,
2613 i386_emit_stack_flush (void)
2615 EMIT_ASM32 (i386_stack_flush
,
2621 i386_emit_zero_ext (int arg
)
2626 EMIT_ASM32 (i386_zero_ext_8
,
2627 "and $0xff,%eax\n\t"
2631 EMIT_ASM32 (i386_zero_ext_16
,
2632 "and $0xffff,%eax\n\t"
2636 EMIT_ASM32 (i386_zero_ext_32
,
2645 i386_emit_swap (void)
2647 EMIT_ASM32 (i386_swap
,
2657 i386_emit_stack_adjust (int n
)
2659 unsigned char buf
[16];
2661 CORE_ADDR buildaddr
= current_insn_ptr
;
2664 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2668 append_insns (&buildaddr
, i
, buf
);
2669 current_insn_ptr
= buildaddr
;
2672 /* FN's prototype is `LONGEST(*fn)(int)'. */
2675 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2677 unsigned char buf
[16];
2679 CORE_ADDR buildaddr
;
2681 EMIT_ASM32 (i386_int_call_1_a
,
2682 /* Reserve a bit of stack space. */
2684 /* Put the one argument on the stack. */
2685 buildaddr
= current_insn_ptr
;
2687 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2690 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2692 append_insns (&buildaddr
, i
, buf
);
2693 current_insn_ptr
= buildaddr
;
2694 i386_emit_call (fn
);
2695 EMIT_ASM32 (i386_int_call_1_c
,
2697 "lea 0x8(%esp),%esp");
2700 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2703 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2705 unsigned char buf
[16];
2707 CORE_ADDR buildaddr
;
2709 EMIT_ASM32 (i386_void_call_2_a
,
2710 /* Preserve %eax only; we don't have to worry about %ebx. */
2712 /* Reserve a bit of stack space for arguments. */
2713 "sub $0x10,%esp\n\t"
2714 /* Copy "top" to the second argument position. (Note that
2715 we can't assume function won't scribble on its
2716 arguments, so don't try to restore from this.) */
2717 "mov %eax,4(%esp)\n\t"
2718 "mov %ebx,8(%esp)");
2719 /* Put the first argument on the stack. */
2720 buildaddr
= current_insn_ptr
;
2722 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2725 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2727 append_insns (&buildaddr
, i
, buf
);
2728 current_insn_ptr
= buildaddr
;
2729 i386_emit_call (fn
);
2730 EMIT_ASM32 (i386_void_call_2_b
,
2731 "lea 0x10(%esp),%esp\n\t"
2732 /* Restore original stack top. */
2738 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2741 /* Check low half first, more likely to be decider */
2742 "cmpl %eax,(%esp)\n\t"
2743 "jne .Leq_fallthru\n\t"
2744 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Leq_fallthru\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2749 /* jmp, but don't trust the assembler to choose the right jump */
2750 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2751 ".Leq_fallthru:\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2763 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2766 /* Check low half first, more likely to be decider */
2767 "cmpl %eax,(%esp)\n\t"
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "je .Lne_fallthru\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2775 /* jmp, but don't trust the assembler to choose the right jump */
2776 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2777 ".Lne_fallthru:\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2789 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2792 "cmpl %ebx,4(%esp)\n\t"
2794 "jne .Llt_fallthru\n\t"
2795 "cmpl %eax,(%esp)\n\t"
2796 "jnl .Llt_fallthru\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2801 /* jmp, but don't trust the assembler to choose the right jump */
2802 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2803 ".Llt_fallthru:\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2815 i386_emit_le_goto (int *offset_p
, int *size_p
)
2818 "cmpl %ebx,4(%esp)\n\t"
2820 "jne .Lle_fallthru\n\t"
2821 "cmpl %eax,(%esp)\n\t"
2822 "jnle .Lle_fallthru\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2827 /* jmp, but don't trust the assembler to choose the right jump */
2828 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2829 ".Lle_fallthru:\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2841 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2844 "cmpl %ebx,4(%esp)\n\t"
2846 "jne .Lgt_fallthru\n\t"
2847 "cmpl %eax,(%esp)\n\t"
2848 "jng .Lgt_fallthru\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2853 /* jmp, but don't trust the assembler to choose the right jump */
2854 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2855 ".Lgt_fallthru:\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2867 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2870 "cmpl %ebx,4(%esp)\n\t"
2872 "jne .Lge_fallthru\n\t"
2873 "cmpl %eax,(%esp)\n\t"
2874 "jnge .Lge_fallthru\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2879 /* jmp, but don't trust the assembler to choose the right jump */
2880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2881 ".Lge_fallthru:\n\t"
2882 "lea 0x8(%esp),%esp\n\t"
2892 static emit_ops i386_emit_ops
=
2900 i386_emit_rsh_signed
,
2901 i386_emit_rsh_unsigned
,
2909 i386_emit_less_signed
,
2910 i386_emit_less_unsigned
,
2914 i386_write_goto_address
,
2919 i386_emit_stack_flush
,
2922 i386_emit_stack_adjust
,
2923 i386_emit_int_call_1
,
2924 i386_emit_void_call_2
,
2935 x86_target::emit_ops ()
2938 if (is_64bit_tdesc (current_thread
))
2939 return &amd64_emit_ops
;
2942 return &i386_emit_ops
;
2945 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2948 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2950 *size
= x86_breakpoint_len
;
2951 return x86_breakpoint
;
2955 x86_target::low_supports_range_stepping ()
2961 x86_target::get_ipa_tdesc_idx ()
2963 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2964 const struct target_desc
*tdesc
= regcache
->tdesc
;
2967 return amd64_get_ipa_tdesc_idx (tdesc
);
2970 if (tdesc
== tdesc_i386_linux_no_xml
.get ())
2971 return X86_TDESC_SSE
;
2973 return i386_get_ipa_tdesc_idx (tdesc
);
2976 /* The linux target ops object. */
2978 linux_process_target
*the_linux_target
= &the_x86_target
;
2981 initialize_low_arch (void)
2983 /* Initialize the Linux target descriptions. */
2985 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2986 copy_target_description (tdesc_amd64_linux_no_xml
.get (),
2987 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2989 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2992 tdesc_i386_linux_no_xml
= allocate_target_description ();
2993 copy_target_description (tdesc_i386_linux_no_xml
.get (),
2994 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2995 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2997 initialize_regsets_info (&x86_regsets_info
);