ld: =fillexp different behaviors for hexidecimal literal
[binutils-gdb.git] / gdbserver / linux-x86-low.cc
blob1483e2a66d77ff655cc04b864c9bbc5f210013d4
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/x86-xstate.h"
29 #include "nat/gdb_ptrace.h"
31 #ifdef __x86_64__
32 #include "nat/amd64-linux-siginfo.h"
33 #endif
35 #include "gdb_proc_service.h"
36 /* Don't include elf/common.h if linux/elf.h got included by
37 gdb_proc_service.h. */
38 #ifndef ELFMAG0
39 #include "elf/common.h"
40 #endif
42 #include "gdbsupport/agent.h"
43 #include "tdesc.h"
44 #include "tracepoint.h"
45 #include "ax.h"
46 #include "nat/linux-nat.h"
47 #include "nat/x86-linux.h"
48 #include "nat/x86-linux-dregs.h"
49 #include "linux-x86-tdesc.h"
51 #ifdef __x86_64__
52 static target_desc_up tdesc_amd64_linux_no_xml;
53 #endif
54 static target_desc_up tdesc_i386_linux_no_xml;
57 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
58 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
60 /* Backward compatibility for gdb without XML support. */
62 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
63 <architecture>i386</architecture>\
64 <osabi>GNU/Linux</osabi>\
65 </target>";
67 #ifdef __x86_64__
68 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
69 <architecture>i386:x86-64</architecture>\
70 <osabi>GNU/Linux</osabi>\
71 </target>";
72 #endif
74 #include <sys/reg.h>
75 #include <sys/procfs.h>
76 #include <sys/uio.h>
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
96 /* Linux target op definitions for the x86 architecture.
97 This is initialized assuming an amd64 target.
98 'low_arch_setup' will correct it for i386 or amd64 targets. */
100 class x86_target : public linux_process_target
102 public:
104 const regs_info *get_regs_info () override;
106 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
108 bool supports_z_point_type (char z_type) override;
110 void process_qsupported (gdb::array_view<const char * const> features) override;
112 bool supports_tracepoints () override;
114 bool supports_fast_tracepoints () override;
116 int install_fast_tracepoint_jump_pad
117 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
118 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
119 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
120 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
121 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
122 char *err) override;
124 int get_min_fast_tracepoint_insn_len () override;
126 struct emit_ops *emit_ops () override;
128 int get_ipa_tdesc_idx () override;
130 protected:
132 void low_arch_setup () override;
134 bool low_cannot_fetch_register (int regno) override;
136 bool low_cannot_store_register (int regno) override;
138 bool low_supports_breakpoints () override;
140 CORE_ADDR low_get_pc (regcache *regcache) override;
142 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
144 int low_decr_pc_after_break () override;
146 bool low_breakpoint_at (CORE_ADDR pc) override;
148 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
151 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
152 int size, raw_breakpoint *bp) override;
154 bool low_stopped_by_watchpoint () override;
156 CORE_ADDR low_stopped_data_address () override;
158 /* collect_ptrace_register/supply_ptrace_register are not needed in the
159 native i386 case (no registers smaller than an xfer unit), and are not
160 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
162 /* Need to fix up i386 siginfo if host is amd64. */
163 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
164 int direction) override;
166 arch_process_info *low_new_process () override;
168 void low_delete_process (arch_process_info *info) override;
170 void low_new_thread (lwp_info *) override;
172 void low_delete_thread (arch_lwp_info *) override;
174 void low_new_fork (process_info *parent, process_info *child) override;
176 void low_prepare_to_resume (lwp_info *lwp) override;
178 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
180 bool low_supports_range_stepping () override;
182 bool low_supports_catch_syscall () override;
184 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
186 private:
188 /* Update all the target description of all processes; a new GDB
189 connected, and it may or not support xml target descriptions. */
190 void update_xmltarget ();
193 /* The singleton target ops object. */
195 static x86_target the_x86_target;
197 /* Per-process arch-specific data we want to keep. */
199 struct arch_process_info
201 struct x86_debug_reg_state debug_reg_state;
204 #ifdef __x86_64__
206 /* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout.
208 Note that the transfer layout uses 64-bit regs. */
209 static /*const*/ int i386_regmap[] =
211 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
212 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
213 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
214 DS * 8, ES * 8, FS * 8, GS * 8
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
219 /* So code below doesn't have to care, i386 or amd64. */
220 #define ORIG_EAX ORIG_RAX
221 #define REGSIZE 8
223 static const int x86_64_regmap[] =
225 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
226 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
227 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
228 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
229 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
230 DS * 8, ES * 8, FS * 8, GS * 8,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1, -1, -1, -1, -1, -1, -1, -1,
235 -1, -1, -1, -1, -1, -1, -1, -1,
236 ORIG_RAX * 8,
237 21 * 8, 22 * 8,
238 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
239 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1 /* pkru */
252 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
253 #define X86_64_USER_REGS (GS + 1)
255 #else /* ! __x86_64__ */
257 /* Mapping between the general-purpose registers in `struct user'
258 format and GDB's register array layout. */
259 static /*const*/ int i386_regmap[] =
261 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
262 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
263 EIP * 4, EFL * 4, CS * 4, SS * 4,
264 DS * 4, ES * 4, FS * 4, GS * 4
267 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
269 #define REGSIZE 4
271 #endif
273 #ifdef __x86_64__
275 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
277 static int
278 is_64bit_tdesc (thread_info *thread)
280 struct regcache *regcache = get_thread_regcache (thread, 0);
282 return register_size (regcache->tdesc, 0) == 8;
285 #endif
288 /* Called by libthread_db. */
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 lwpid_t lwpid, int idx, void **base)
294 #ifdef __x86_64__
295 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
296 gdb_assert (lwp != nullptr);
297 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
299 if (use_64bit)
301 switch (idx)
303 case FS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
305 return PS_OK;
306 break;
307 case GS:
308 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
309 return PS_OK;
310 break;
311 default:
312 return PS_BADADDR;
314 return PS_ERR;
316 #endif
319 unsigned int desc[4];
321 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
322 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
323 return PS_ERR;
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base = (void *) (uintptr_t) desc[1];
327 return PS_OK;
331 /* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
337 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
339 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
340 gdb_assert (lwp != nullptr);
341 #ifdef __x86_64__
342 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
344 if (use_64bit)
346 void *base;
347 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
349 *addr = (CORE_ADDR) (uintptr_t) base;
350 return 0;
353 return -1;
355 #endif
358 struct thread_info *thr = get_lwp_thread (lwp);
359 struct regcache *regcache = get_thread_regcache (thr, 1);
360 unsigned int desc[4];
361 ULONGEST gs = 0;
362 const int reg_thread_area = 3; /* bits to scale down register value. */
363 int idx;
365 collect_register_by_name (regcache, "gs", &gs);
367 idx = gs >> reg_thread_area;
369 if (ptrace (PTRACE_GET_THREAD_AREA,
370 lwpid_of (thr),
371 (void *) (long) idx, (unsigned long) &desc) < 0)
372 return -1;
374 *addr = desc[1];
375 return 0;
381 bool
382 x86_target::low_cannot_store_register (int regno)
384 #ifdef __x86_64__
385 if (is_64bit_tdesc (current_thread))
386 return false;
387 #endif
389 return regno >= I386_NUM_REGS;
392 bool
393 x86_target::low_cannot_fetch_register (int regno)
395 #ifdef __x86_64__
396 if (is_64bit_tdesc (current_thread))
397 return false;
398 #endif
400 return regno >= I386_NUM_REGS;
403 static void
404 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
406 collect_register (regcache, regno, buf);
408 #ifdef __x86_64__
409 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
410 space reserved in buf for the register is 8 bytes. Make sure the entire
411 reserved space is initialized. */
413 gdb_assert (register_size (regcache->tdesc, regno) == 4);
415 if (regno == RAX)
417 /* Sign extend EAX value to avoid potential syscall restart
418 problems.
420 See amd64_linux_collect_native_gregset() in
421 gdb/amd64-linux-nat.c for a detailed explanation. */
422 *(int64_t *) buf = *(int32_t *) buf;
424 else
426 /* Zero-extend. */
427 *(uint64_t *) buf = *(uint32_t *) buf;
429 #endif
432 static void
433 x86_fill_gregset (struct regcache *regcache, void *buf)
435 int i;
437 #ifdef __x86_64__
438 if (register_size (regcache->tdesc, 0) == 8)
440 for (i = 0; i < X86_64_NUM_REGS; i++)
441 if (x86_64_regmap[i] != -1)
442 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
444 return;
446 #endif
448 for (i = 0; i < I386_NUM_REGS; i++)
449 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
451 /* Handle ORIG_EAX, which is not in i386_regmap. */
452 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
453 ((char *) buf) + ORIG_EAX * REGSIZE);
456 static void
457 x86_store_gregset (struct regcache *regcache, const void *buf)
459 int i;
461 #ifdef __x86_64__
462 if (register_size (regcache->tdesc, 0) == 8)
464 for (i = 0; i < X86_64_NUM_REGS; i++)
465 if (x86_64_regmap[i] != -1)
466 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
468 return;
470 #endif
472 for (i = 0; i < I386_NUM_REGS; i++)
473 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
475 supply_register_by_name (regcache, "orig_eax",
476 ((char *) buf) + ORIG_EAX * REGSIZE);
479 static void
480 x86_fill_fpregset (struct regcache *regcache, void *buf)
482 #ifdef __x86_64__
483 i387_cache_to_fxsave (regcache, buf);
484 #else
485 i387_cache_to_fsave (regcache, buf);
486 #endif
489 static void
490 x86_store_fpregset (struct regcache *regcache, const void *buf)
492 #ifdef __x86_64__
493 i387_fxsave_to_cache (regcache, buf);
494 #else
495 i387_fsave_to_cache (regcache, buf);
496 #endif
499 #ifndef __x86_64__
501 static void
502 x86_fill_fpxregset (struct regcache *regcache, void *buf)
504 i387_cache_to_fxsave (regcache, buf);
507 static void
508 x86_store_fpxregset (struct regcache *regcache, const void *buf)
510 i387_fxsave_to_cache (regcache, buf);
513 #endif
515 static void
516 x86_fill_xstateregset (struct regcache *regcache, void *buf)
518 i387_cache_to_xsave (regcache, buf);
521 static void
522 x86_store_xstateregset (struct regcache *regcache, const void *buf)
524 i387_xsave_to_cache (regcache, buf);
527 /* ??? The non-biarch i386 case stores all the i387 regs twice.
528 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
529 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
530 doesn't work. IWBN to avoid the duplication in the case where it
531 does work. Maybe the arch_setup routine could check whether it works
532 and update the supported regsets accordingly. */
534 static struct regset_info x86_regsets[] =
536 #ifdef HAVE_PTRACE_GETREGS
537 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
538 GENERAL_REGS,
539 x86_fill_gregset, x86_store_gregset },
540 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
541 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
542 # ifndef __x86_64__
543 # ifdef HAVE_PTRACE_GETFPXREGS
544 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
545 EXTENDED_REGS,
546 x86_fill_fpxregset, x86_store_fpxregset },
547 # endif
548 # endif
549 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
550 FP_REGS,
551 x86_fill_fpregset, x86_store_fpregset },
552 #endif /* HAVE_PTRACE_GETREGS */
553 NULL_REGSET
556 bool
557 x86_target::low_supports_breakpoints ()
559 return true;
562 CORE_ADDR
563 x86_target::low_get_pc (regcache *regcache)
565 int use_64bit = register_size (regcache->tdesc, 0) == 8;
567 if (use_64bit)
569 uint64_t pc;
571 collect_register_by_name (regcache, "rip", &pc);
572 return (CORE_ADDR) pc;
574 else
576 uint32_t pc;
578 collect_register_by_name (regcache, "eip", &pc);
579 return (CORE_ADDR) pc;
583 void
584 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
586 int use_64bit = register_size (regcache->tdesc, 0) == 8;
588 if (use_64bit)
590 uint64_t newpc = pc;
592 supply_register_by_name (regcache, "rip", &newpc);
594 else
596 uint32_t newpc = pc;
598 supply_register_by_name (regcache, "eip", &newpc);
603 x86_target::low_decr_pc_after_break ()
605 return 1;
609 static const gdb_byte x86_breakpoint[] = { 0xCC };
610 #define x86_breakpoint_len 1
612 bool
613 x86_target::low_breakpoint_at (CORE_ADDR pc)
615 unsigned char c;
617 read_memory (pc, &c, 1);
618 if (c == 0xCC)
619 return true;
621 return false;
624 /* Low-level function vector. */
625 struct x86_dr_low_type x86_dr_low =
627 x86_linux_dr_set_control,
628 x86_linux_dr_set_addr,
629 x86_linux_dr_get_addr,
630 x86_linux_dr_get_status,
631 x86_linux_dr_get_control,
632 sizeof (void *),
635 /* Breakpoint/Watchpoint support. */
637 bool
638 x86_target::supports_z_point_type (char z_type)
640 switch (z_type)
642 case Z_PACKET_SW_BP:
643 case Z_PACKET_HW_BP:
644 case Z_PACKET_WRITE_WP:
645 case Z_PACKET_ACCESS_WP:
646 return true;
647 default:
648 return false;
653 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
654 int size, raw_breakpoint *bp)
656 struct process_info *proc = current_process ();
658 switch (type)
660 case raw_bkpt_type_hw:
661 case raw_bkpt_type_write_wp:
662 case raw_bkpt_type_access_wp:
664 enum target_hw_bp_type hw_type
665 = raw_bkpt_type_to_target_hw_bp_type (type);
666 struct x86_debug_reg_state *state
667 = &proc->priv->arch_private->debug_reg_state;
669 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
672 default:
673 /* Unsupported. */
674 return 1;
679 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
680 int size, raw_breakpoint *bp)
682 struct process_info *proc = current_process ();
684 switch (type)
686 case raw_bkpt_type_hw:
687 case raw_bkpt_type_write_wp:
688 case raw_bkpt_type_access_wp:
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type);
692 struct x86_debug_reg_state *state
693 = &proc->priv->arch_private->debug_reg_state;
695 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
697 default:
698 /* Unsupported. */
699 return 1;
703 bool
704 x86_target::low_stopped_by_watchpoint ()
706 struct process_info *proc = current_process ();
707 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
710 CORE_ADDR
711 x86_target::low_stopped_data_address ()
713 struct process_info *proc = current_process ();
714 CORE_ADDR addr;
715 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
716 &addr))
717 return addr;
718 return 0;
721 /* Called when a new process is created. */
723 arch_process_info *
724 x86_target::low_new_process ()
726 struct arch_process_info *info = XCNEW (struct arch_process_info);
728 x86_low_init_dregs (&info->debug_reg_state);
730 return info;
733 /* Called when a process is being deleted. */
735 void
736 x86_target::low_delete_process (arch_process_info *info)
738 xfree (info);
741 void
742 x86_target::low_new_thread (lwp_info *lwp)
744 /* This comes from nat/. */
745 x86_linux_new_thread (lwp);
748 void
749 x86_target::low_delete_thread (arch_lwp_info *alwp)
751 /* This comes from nat/. */
752 x86_linux_delete_thread (alwp);
755 /* Target routine for new_fork. */
757 void
758 x86_target::low_new_fork (process_info *parent, process_info *child)
760 /* These are allocated by linux_add_process. */
761 gdb_assert (parent->priv != NULL
762 && parent->priv->arch_private != NULL);
763 gdb_assert (child->priv != NULL
764 && child->priv->arch_private != NULL);
766 /* Linux kernel before 2.6.33 commit
767 72f674d203cd230426437cdcf7dd6f681dad8b0d
768 will inherit hardware debug registers from parent
769 on fork/vfork/clone. Newer Linux kernels create such tasks with
770 zeroed debug registers.
772 GDB core assumes the child inherits the watchpoints/hw
773 breakpoints of the parent, and will remove them all from the
774 forked off process. Copy the debug registers mirrors into the
775 new process so that all breakpoints and watchpoints can be
776 removed together. The debug registers mirror will become zeroed
777 in the end before detaching the forked off process, thus making
778 this compatible with older Linux kernels too. */
780 *child->priv->arch_private = *parent->priv->arch_private;
783 void
784 x86_target::low_prepare_to_resume (lwp_info *lwp)
786 /* This comes from nat/. */
787 x86_linux_prepare_to_resume (lwp);
790 /* See nat/x86-dregs.h. */
792 struct x86_debug_reg_state *
793 x86_debug_reg_state (pid_t pid)
795 struct process_info *proc = find_process_pid (pid);
797 return &proc->priv->arch_private->debug_reg_state;
800 /* When GDBSERVER is built as a 64-bit application on linux, the
801 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
802 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
803 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
804 conversion in-place ourselves. */
806 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
807 layout of the inferiors' architecture. Returns true if any
808 conversion was done; false otherwise. If DIRECTION is 1, then copy
809 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
810 INF. */
812 bool
813 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
815 #ifdef __x86_64__
816 unsigned int machine;
817 int tid = lwpid_of (current_thread);
818 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
820 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
821 if (!is_64bit_tdesc (current_thread))
822 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
823 FIXUP_32);
824 /* No fixup for native x32 GDB. */
825 else if (!is_elf64 && sizeof (void *) == 8)
826 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
827 FIXUP_X32);
828 #endif
830 return false;
833 static int use_xml;
835 /* Format of XSAVE extended state is:
836 struct
838 fxsave_bytes[0..463]
839 sw_usable_bytes[464..511]
840 xstate_hdr_bytes[512..575]
841 avx_bytes[576..831]
842 future_state etc
845 Same memory layout will be used for the coredump NT_X86_XSTATE
846 representing the XSAVE extended state registers.
848 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
849 extended state mask, which is the same as the extended control register
850 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
851 together with the mask saved in the xstate_hdr_bytes to determine what
852 states the processor/OS supports and what state, used or initialized,
853 the process/thread is in. */
854 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
856 /* Does the current host support the GETFPXREGS request? The header
857 file may or may not define it, and even if it is defined, the
858 kernel will return EIO if it's running on a pre-SSE processor. */
859 int have_ptrace_getfpxregs =
860 #ifdef HAVE_PTRACE_GETFPXREGS
862 #else
864 #endif
867 /* Get Linux/x86 target description from running target. */
869 static const struct target_desc *
870 x86_linux_read_description (void)
872 unsigned int machine;
873 int is_elf64;
874 int xcr0_features;
875 int tid;
876 static uint64_t xcr0;
877 static int xsave_len;
878 struct regset_info *regset;
880 tid = lwpid_of (current_thread);
882 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
884 if (sizeof (void *) == 4)
886 if (is_elf64 > 0)
887 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
888 #ifndef __x86_64__
889 else if (machine == EM_X86_64)
890 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
891 #endif
894 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
895 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
897 elf_fpxregset_t fpxregs;
899 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
901 have_ptrace_getfpxregs = 0;
902 have_ptrace_getregset = 0;
903 return i386_linux_read_description (X86_XSTATE_X87);
905 else
906 have_ptrace_getfpxregs = 1;
908 #endif
910 if (!use_xml)
912 /* Don't use XML. */
913 #ifdef __x86_64__
914 if (machine == EM_X86_64)
915 return tdesc_amd64_linux_no_xml.get ();
916 else
917 #endif
918 return tdesc_i386_linux_no_xml.get ();
921 if (have_ptrace_getregset == -1)
923 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
924 struct iovec iov;
926 iov.iov_base = xstateregs;
927 iov.iov_len = sizeof (xstateregs);
929 /* Check if PTRACE_GETREGSET works. */
930 if (ptrace (PTRACE_GETREGSET, tid,
931 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
932 have_ptrace_getregset = 0;
933 else
935 have_ptrace_getregset = 1;
937 /* Get XCR0 from XSAVE extended state. */
938 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
939 / sizeof (uint64_t))];
941 xsave_len = x86_xsave_length ();
943 /* Use PTRACE_GETREGSET if it is available. */
944 for (regset = x86_regsets;
945 regset->fill_function != NULL; regset++)
946 if (regset->get_request == PTRACE_GETREGSET)
947 regset->size = xsave_len;
948 else if (regset->type != GENERAL_REGS)
949 regset->size = 0;
953 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
954 xcr0_features = (have_ptrace_getregset
955 && (xcr0 & X86_XSTATE_ALL_MASK));
957 if (xcr0_features)
958 i387_set_xsave_mask (xcr0, xsave_len);
960 if (machine == EM_X86_64)
962 #ifdef __x86_64__
963 const target_desc *tdesc = NULL;
965 if (xcr0_features)
967 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
968 !is_elf64);
971 if (tdesc == NULL)
972 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
973 return tdesc;
974 #endif
976 else
978 const target_desc *tdesc = NULL;
980 if (xcr0_features)
981 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
983 if (tdesc == NULL)
984 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
986 return tdesc;
989 gdb_assert_not_reached ("failed to return tdesc");
992 /* Update all the target description of all processes; a new GDB
993 connected, and it may or not support xml target descriptions. */
995 void
996 x86_target::update_xmltarget ()
998 scoped_restore_current_thread restore_thread;
1000 /* Before changing the register cache's internal layout, flush the
1001 contents of the current valid caches back to the threads, and
1002 release the current regcache objects. */
1003 regcache_release ();
1005 for_each_process ([this] (process_info *proc) {
1006 int pid = proc->pid;
1008 /* Look up any thread of this process. */
1009 switch_to_thread (find_any_thread_of_pid (pid));
1011 low_arch_setup ();
1015 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1016 PTRACE_GETREGSET. */
1018 void
1019 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1021 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1022 with "i386" in qSupported query, it supports x86 XML target
1023 descriptions. */
1024 use_xml = 0;
1026 for (const char *feature : features)
1028 if (startswith (feature, "xmlRegisters="))
1030 char *copy = xstrdup (feature + 13);
1032 char *saveptr;
1033 for (char *p = strtok_r (copy, ",", &saveptr);
1034 p != NULL;
1035 p = strtok_r (NULL, ",", &saveptr))
1037 if (strcmp (p, "i386") == 0)
1039 use_xml = 1;
1040 break;
1044 free (copy);
1048 update_xmltarget ();
1051 /* Common for x86/x86-64. */
1053 static struct regsets_info x86_regsets_info =
1055 x86_regsets, /* regsets */
1056 0, /* num_regsets */
1057 NULL, /* disabled_regsets */
1060 #ifdef __x86_64__
1061 static struct regs_info amd64_linux_regs_info =
1063 NULL, /* regset_bitmap */
1064 NULL, /* usrregs_info */
1065 &x86_regsets_info
1067 #endif
1068 static struct usrregs_info i386_linux_usrregs_info =
1070 I386_NUM_REGS,
1071 i386_regmap,
1074 static struct regs_info i386_linux_regs_info =
1076 NULL, /* regset_bitmap */
1077 &i386_linux_usrregs_info,
1078 &x86_regsets_info
1081 const regs_info *
1082 x86_target::get_regs_info ()
1084 #ifdef __x86_64__
1085 if (is_64bit_tdesc (current_thread))
1086 return &amd64_linux_regs_info;
1087 else
1088 #endif
1089 return &i386_linux_regs_info;
1092 /* Initialize the target description for the architecture of the
1093 inferior. */
1095 void
1096 x86_target::low_arch_setup ()
1098 current_process ()->tdesc = x86_linux_read_description ();
1101 bool
1102 x86_target::low_supports_catch_syscall ()
1104 return true;
1107 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1108 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1110 void
1111 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1113 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1115 if (use_64bit)
1117 long l_sysno;
1119 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1120 *sysno = (int) l_sysno;
1122 else
1123 collect_register_by_name (regcache, "orig_eax", sysno);
1126 bool
1127 x86_target::supports_tracepoints ()
1129 return true;
1132 static void
1133 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1135 target_write_memory (*to, buf, len);
1136 *to += len;
1139 static int
1140 push_opcode (unsigned char *buf, const char *op)
1142 unsigned char *buf_org = buf;
1144 while (1)
1146 char *endptr;
1147 unsigned long ul = strtoul (op, &endptr, 16);
1149 if (endptr == op)
1150 break;
1152 *buf++ = ul;
1153 op = endptr;
1156 return buf - buf_org;
1159 #ifdef __x86_64__
1161 /* Build a jump pad that saves registers and calls a collection
1162 function. Writes a jump instruction to the jump pad to
1163 JJUMPAD_INSN. The caller is responsible to write it in at the
1164 tracepoint address. */
1166 static int
1167 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1168 CORE_ADDR collector,
1169 CORE_ADDR lockaddr,
1170 ULONGEST orig_size,
1171 CORE_ADDR *jump_entry,
1172 CORE_ADDR *trampoline,
1173 ULONGEST *trampoline_size,
1174 unsigned char *jjump_pad_insn,
1175 ULONGEST *jjump_pad_insn_size,
1176 CORE_ADDR *adjusted_insn_addr,
1177 CORE_ADDR *adjusted_insn_addr_end,
1178 char *err)
1180 unsigned char buf[40];
1181 int i, offset;
1182 int64_t loffset;
1184 CORE_ADDR buildaddr = *jump_entry;
1186 /* Build the jump pad. */
1188 /* First, do tracepoint data collection. Save registers. */
1189 i = 0;
1190 /* Need to ensure stack pointer saved first. */
1191 buf[i++] = 0x54; /* push %rsp */
1192 buf[i++] = 0x55; /* push %rbp */
1193 buf[i++] = 0x57; /* push %rdi */
1194 buf[i++] = 0x56; /* push %rsi */
1195 buf[i++] = 0x52; /* push %rdx */
1196 buf[i++] = 0x51; /* push %rcx */
1197 buf[i++] = 0x53; /* push %rbx */
1198 buf[i++] = 0x50; /* push %rax */
1199 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1200 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1201 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1202 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1203 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1204 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1205 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1206 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1207 buf[i++] = 0x9c; /* pushfq */
1208 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1209 buf[i++] = 0xbf;
1210 memcpy (buf + i, &tpaddr, 8);
1211 i += 8;
1212 buf[i++] = 0x57; /* push %rdi */
1213 append_insns (&buildaddr, i, buf);
1215 /* Stack space for the collecting_t object. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1218 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1219 memcpy (buf + i, &tpoint, 8);
1220 i += 8;
1221 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1222 i += push_opcode (&buf[i],
1223 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1224 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1225 append_insns (&buildaddr, i, buf);
1227 /* spin-lock. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1230 memcpy (&buf[i], (void *) &lockaddr, 8);
1231 i += 8;
1232 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1233 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1234 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1235 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1236 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1237 append_insns (&buildaddr, i, buf);
1239 /* Set up the gdb_collect call. */
1240 /* At this point, (stack pointer + 0x18) is the base of our saved
1241 register block. */
1243 i = 0;
1244 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1245 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1247 /* tpoint address may be 64-bit wide. */
1248 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1249 memcpy (buf + i, &tpoint, 8);
1250 i += 8;
1251 append_insns (&buildaddr, i, buf);
1253 /* The collector function being in the shared library, may be
1254 >31-bits away off the jump pad. */
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1257 memcpy (buf + i, &collector, 8);
1258 i += 8;
1259 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1260 append_insns (&buildaddr, i, buf);
1262 /* Clear the spin-lock. */
1263 i = 0;
1264 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1265 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1266 memcpy (buf + i, &lockaddr, 8);
1267 i += 8;
1268 append_insns (&buildaddr, i, buf);
1270 /* Remove stack that had been used for the collect_t object. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1273 append_insns (&buildaddr, i, buf);
1275 /* Restore register state. */
1276 i = 0;
1277 buf[i++] = 0x48; /* add $0x8,%rsp */
1278 buf[i++] = 0x83;
1279 buf[i++] = 0xc4;
1280 buf[i++] = 0x08;
1281 buf[i++] = 0x9d; /* popfq */
1282 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1283 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1284 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1285 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1286 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1287 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1288 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1289 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1290 buf[i++] = 0x58; /* pop %rax */
1291 buf[i++] = 0x5b; /* pop %rbx */
1292 buf[i++] = 0x59; /* pop %rcx */
1293 buf[i++] = 0x5a; /* pop %rdx */
1294 buf[i++] = 0x5e; /* pop %rsi */
1295 buf[i++] = 0x5f; /* pop %rdi */
1296 buf[i++] = 0x5d; /* pop %rbp */
1297 buf[i++] = 0x5c; /* pop %rsp */
1298 append_insns (&buildaddr, i, buf);
1300 /* Now, adjust the original instruction to execute in the jump
1301 pad. */
1302 *adjusted_insn_addr = buildaddr;
1303 relocate_instruction (&buildaddr, tpaddr);
1304 *adjusted_insn_addr_end = buildaddr;
1306 /* Finally, write a jump back to the program. */
1308 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1309 if (loffset > INT_MAX || loffset < INT_MIN)
1311 sprintf (err,
1312 "E.Jump back from jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64 " > int32).", loffset);
1314 return 1;
1317 offset = (int) loffset;
1318 memcpy (buf, jump_insn, sizeof (jump_insn));
1319 memcpy (buf + 1, &offset, 4);
1320 append_insns (&buildaddr, sizeof (jump_insn), buf);
1322 /* The jump pad is now built. Wire in a jump to our jump pad. This
1323 is always done last (by our caller actually), so that we can
1324 install fast tracepoints with threads running. This relies on
1325 the agent's atomic write support. */
1326 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1327 if (loffset > INT_MAX || loffset < INT_MIN)
1329 sprintf (err,
1330 "E.Jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64 " > int32).", loffset);
1332 return 1;
1335 offset = (int) loffset;
1337 memcpy (buf, jump_insn, sizeof (jump_insn));
1338 memcpy (buf + 1, &offset, 4);
1339 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1340 *jjump_pad_insn_size = sizeof (jump_insn);
1342 /* Return the end address of our pad. */
1343 *jump_entry = buildaddr;
1345 return 0;
1348 #endif /* __x86_64__ */
1350 /* Build a jump pad that saves registers and calls a collection
1351 function. Writes a jump instruction to the jump pad to
1352 JJUMPAD_INSN. The caller is responsible to write it in at the
1353 tracepoint address. */
1355 static int
1356 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1357 CORE_ADDR collector,
1358 CORE_ADDR lockaddr,
1359 ULONGEST orig_size,
1360 CORE_ADDR *jump_entry,
1361 CORE_ADDR *trampoline,
1362 ULONGEST *trampoline_size,
1363 unsigned char *jjump_pad_insn,
1364 ULONGEST *jjump_pad_insn_size,
1365 CORE_ADDR *adjusted_insn_addr,
1366 CORE_ADDR *adjusted_insn_addr_end,
1367 char *err)
1369 unsigned char buf[0x100];
1370 int i, offset;
1371 CORE_ADDR buildaddr = *jump_entry;
1373 /* Build the jump pad. */
1375 /* First, do tracepoint data collection. Save registers. */
1376 i = 0;
1377 buf[i++] = 0x60; /* pushad */
1378 buf[i++] = 0x68; /* push tpaddr aka $pc */
1379 *((int *)(buf + i)) = (int) tpaddr;
1380 i += 4;
1381 buf[i++] = 0x9c; /* pushf */
1382 buf[i++] = 0x1e; /* push %ds */
1383 buf[i++] = 0x06; /* push %es */
1384 buf[i++] = 0x0f; /* push %fs */
1385 buf[i++] = 0xa0;
1386 buf[i++] = 0x0f; /* push %gs */
1387 buf[i++] = 0xa8;
1388 buf[i++] = 0x16; /* push %ss */
1389 buf[i++] = 0x0e; /* push %cs */
1390 append_insns (&buildaddr, i, buf);
1392 /* Stack space for the collecting_t object. */
1393 i = 0;
1394 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1396 /* Build the object. */
1397 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1398 memcpy (buf + i, &tpoint, 4);
1399 i += 4;
1400 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1402 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1403 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1404 append_insns (&buildaddr, i, buf);
1406 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1407 If we cared for it, this could be using xchg alternatively. */
1409 i = 0;
1410 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1411 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1412 %esp,<lockaddr> */
1413 memcpy (&buf[i], (void *) &lockaddr, 4);
1414 i += 4;
1415 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1416 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1417 append_insns (&buildaddr, i, buf);
1420 /* Set up arguments to the gdb_collect call. */
1421 i = 0;
1422 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1423 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1424 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1425 append_insns (&buildaddr, i, buf);
1427 i = 0;
1428 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1429 append_insns (&buildaddr, i, buf);
1431 i = 0;
1432 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1433 memcpy (&buf[i], (void *) &tpoint, 4);
1434 i += 4;
1435 append_insns (&buildaddr, i, buf);
1437 buf[0] = 0xe8; /* call <reladdr> */
1438 offset = collector - (buildaddr + sizeof (jump_insn));
1439 memcpy (buf + 1, &offset, 4);
1440 append_insns (&buildaddr, 5, buf);
1441 /* Clean up after the call. */
1442 buf[0] = 0x83; /* add $0x8,%esp */
1443 buf[1] = 0xc4;
1444 buf[2] = 0x08;
1445 append_insns (&buildaddr, 3, buf);
1448 /* Clear the spin-lock. This would need the LOCK prefix on older
1449 broken archs. */
1450 i = 0;
1451 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1452 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1453 memcpy (buf + i, &lockaddr, 4);
1454 i += 4;
1455 append_insns (&buildaddr, i, buf);
1458 /* Remove stack that had been used for the collect_t object. */
1459 i = 0;
1460 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1461 append_insns (&buildaddr, i, buf);
1463 i = 0;
1464 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1465 buf[i++] = 0xc4;
1466 buf[i++] = 0x04;
1467 buf[i++] = 0x17; /* pop %ss */
1468 buf[i++] = 0x0f; /* pop %gs */
1469 buf[i++] = 0xa9;
1470 buf[i++] = 0x0f; /* pop %fs */
1471 buf[i++] = 0xa1;
1472 buf[i++] = 0x07; /* pop %es */
1473 buf[i++] = 0x1f; /* pop %ds */
1474 buf[i++] = 0x9d; /* popf */
1475 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1476 buf[i++] = 0xc4;
1477 buf[i++] = 0x04;
1478 buf[i++] = 0x61; /* popad */
1479 append_insns (&buildaddr, i, buf);
1481 /* Now, adjust the original instruction to execute in the jump
1482 pad. */
1483 *adjusted_insn_addr = buildaddr;
1484 relocate_instruction (&buildaddr, tpaddr);
1485 *adjusted_insn_addr_end = buildaddr;
1487 /* Write the jump back to the program. */
1488 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1489 memcpy (buf, jump_insn, sizeof (jump_insn));
1490 memcpy (buf + 1, &offset, 4);
1491 append_insns (&buildaddr, sizeof (jump_insn), buf);
1493 /* The jump pad is now built. Wire in a jump to our jump pad. This
1494 is always done last (by our caller actually), so that we can
1495 install fast tracepoints with threads running. This relies on
1496 the agent's atomic write support. */
1497 if (orig_size == 4)
1499 /* Create a trampoline. */
1500 *trampoline_size = sizeof (jump_insn);
1501 if (!claim_trampoline_space (*trampoline_size, trampoline))
1503 /* No trampoline space available. */
1504 strcpy (err,
1505 "E.Cannot allocate trampoline space needed for fast "
1506 "tracepoints on 4-byte instructions.");
1507 return 1;
1510 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1511 memcpy (buf, jump_insn, sizeof (jump_insn));
1512 memcpy (buf + 1, &offset, 4);
1513 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1515 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1516 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1517 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1518 memcpy (buf + 2, &offset, 2);
1519 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1520 *jjump_pad_insn_size = sizeof (small_jump_insn);
1522 else
1524 /* Else use a 32-bit relative jump instruction. */
1525 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1526 memcpy (buf, jump_insn, sizeof (jump_insn));
1527 memcpy (buf + 1, &offset, 4);
1528 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1529 *jjump_pad_insn_size = sizeof (jump_insn);
1532 /* Return the end address of our pad. */
1533 *jump_entry = buildaddr;
1535 return 0;
1538 bool
1539 x86_target::supports_fast_tracepoints ()
1541 return true;
1545 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1546 CORE_ADDR tpaddr,
1547 CORE_ADDR collector,
1548 CORE_ADDR lockaddr,
1549 ULONGEST orig_size,
1550 CORE_ADDR *jump_entry,
1551 CORE_ADDR *trampoline,
1552 ULONGEST *trampoline_size,
1553 unsigned char *jjump_pad_insn,
1554 ULONGEST *jjump_pad_insn_size,
1555 CORE_ADDR *adjusted_insn_addr,
1556 CORE_ADDR *adjusted_insn_addr_end,
1557 char *err)
1559 #ifdef __x86_64__
1560 if (is_64bit_tdesc (current_thread))
1561 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1562 collector, lockaddr,
1563 orig_size, jump_entry,
1564 trampoline, trampoline_size,
1565 jjump_pad_insn,
1566 jjump_pad_insn_size,
1567 adjusted_insn_addr,
1568 adjusted_insn_addr_end,
1569 err);
1570 #endif
1572 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1573 collector, lockaddr,
1574 orig_size, jump_entry,
1575 trampoline, trampoline_size,
1576 jjump_pad_insn,
1577 jjump_pad_insn_size,
1578 adjusted_insn_addr,
1579 adjusted_insn_addr_end,
1580 err);
1583 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1584 architectures. */
1587 x86_target::get_min_fast_tracepoint_insn_len ()
1589 static int warned_about_fast_tracepoints = 0;
1591 #ifdef __x86_64__
1592 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1593 used for fast tracepoints. */
1594 if (is_64bit_tdesc (current_thread))
1595 return 5;
1596 #endif
1598 if (agent_loaded_p ())
1600 char errbuf[IPA_BUFSIZ];
1602 errbuf[0] = '\0';
1604 /* On x86, if trampolines are available, then 4-byte jump instructions
1605 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1606 with a 4-byte offset are used instead. */
1607 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1608 return 4;
1609 else
1611 /* GDB has no channel to explain to user why a shorter fast
1612 tracepoint is not possible, but at least make GDBserver
1613 mention that something has gone awry. */
1614 if (!warned_about_fast_tracepoints)
1616 warning ("4-byte fast tracepoints not available; %s", errbuf);
1617 warned_about_fast_tracepoints = 1;
1619 return 5;
1622 else
1624 /* Indicate that the minimum length is currently unknown since the IPA
1625 has not loaded yet. */
1626 return 0;
1630 static void
1631 add_insns (unsigned char *start, int len)
1633 CORE_ADDR buildaddr = current_insn_ptr;
1635 threads_debug_printf ("Adding %d bytes of insn at %s",
1636 len, paddress (buildaddr));
1638 append_insns (&buildaddr, len, start);
1639 current_insn_ptr = buildaddr;
1642 /* Our general strategy for emitting code is to avoid specifying raw
1643 bytes whenever possible, and instead copy a block of inline asm
1644 that is embedded in the function. This is a little messy, because
1645 we need to keep the compiler from discarding what looks like dead
1646 code, plus suppress various warnings. */
1648 #define EMIT_ASM(NAME, INSNS) \
1649 do \
1651 extern unsigned char start_ ## NAME, end_ ## NAME; \
1652 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1653 __asm__ ("jmp end_" #NAME "\n" \
1654 "\t" "start_" #NAME ":" \
1655 "\t" INSNS "\n" \
1656 "\t" "end_" #NAME ":"); \
1657 } while (0)
1659 #ifdef __x86_64__
1661 #define EMIT_ASM32(NAME,INSNS) \
1662 do \
1664 extern unsigned char start_ ## NAME, end_ ## NAME; \
1665 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1666 __asm__ (".code32\n" \
1667 "\t" "jmp end_" #NAME "\n" \
1668 "\t" "start_" #NAME ":\n" \
1669 "\t" INSNS "\n" \
1670 "\t" "end_" #NAME ":\n" \
1671 ".code64\n"); \
1672 } while (0)
1674 #else
1676 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1678 #endif
1680 #ifdef __x86_64__
1682 static void
1683 amd64_emit_prologue (void)
1685 EMIT_ASM (amd64_prologue,
1686 "pushq %rbp\n\t"
1687 "movq %rsp,%rbp\n\t"
1688 "sub $0x20,%rsp\n\t"
1689 "movq %rdi,-8(%rbp)\n\t"
1690 "movq %rsi,-16(%rbp)");
1694 static void
1695 amd64_emit_epilogue (void)
1697 EMIT_ASM (amd64_epilogue,
1698 "movq -16(%rbp),%rdi\n\t"
1699 "movq %rax,(%rdi)\n\t"
1700 "xor %rax,%rax\n\t"
1701 "leave\n\t"
1702 "ret");
1705 static void
1706 amd64_emit_add (void)
1708 EMIT_ASM (amd64_add,
1709 "add (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1713 static void
1714 amd64_emit_sub (void)
1716 EMIT_ASM (amd64_sub,
1717 "sub %rax,(%rsp)\n\t"
1718 "pop %rax");
1721 static void
1722 amd64_emit_mul (void)
1724 emit_error = 1;
1727 static void
1728 amd64_emit_lsh (void)
1730 emit_error = 1;
1733 static void
1734 amd64_emit_rsh_signed (void)
1736 emit_error = 1;
1739 static void
1740 amd64_emit_rsh_unsigned (void)
1742 emit_error = 1;
1745 static void
1746 amd64_emit_ext (int arg)
1748 switch (arg)
1750 case 8:
1751 EMIT_ASM (amd64_ext_8,
1752 "cbtw\n\t"
1753 "cwtl\n\t"
1754 "cltq");
1755 break;
1756 case 16:
1757 EMIT_ASM (amd64_ext_16,
1758 "cwtl\n\t"
1759 "cltq");
1760 break;
1761 case 32:
1762 EMIT_ASM (amd64_ext_32,
1763 "cltq");
1764 break;
1765 default:
1766 emit_error = 1;
1770 static void
1771 amd64_emit_log_not (void)
1773 EMIT_ASM (amd64_log_not,
1774 "test %rax,%rax\n\t"
1775 "sete %cl\n\t"
1776 "movzbq %cl,%rax");
1779 static void
1780 amd64_emit_bit_and (void)
1782 EMIT_ASM (amd64_and,
1783 "and (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1787 static void
1788 amd64_emit_bit_or (void)
1790 EMIT_ASM (amd64_or,
1791 "or (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1795 static void
1796 amd64_emit_bit_xor (void)
1798 EMIT_ASM (amd64_xor,
1799 "xor (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1803 static void
1804 amd64_emit_bit_not (void)
1806 EMIT_ASM (amd64_bit_not,
1807 "xorq $0xffffffffffffffff,%rax");
1810 static void
1811 amd64_emit_equal (void)
1813 EMIT_ASM (amd64_equal,
1814 "cmp %rax,(%rsp)\n\t"
1815 "je .Lamd64_equal_true\n\t"
1816 "xor %rax,%rax\n\t"
1817 "jmp .Lamd64_equal_end\n\t"
1818 ".Lamd64_equal_true:\n\t"
1819 "mov $0x1,%rax\n\t"
1820 ".Lamd64_equal_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1824 static void
1825 amd64_emit_less_signed (void)
1827 EMIT_ASM (amd64_less_signed,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jl .Lamd64_less_signed_true\n\t"
1830 "xor %rax,%rax\n\t"
1831 "jmp .Lamd64_less_signed_end\n\t"
1832 ".Lamd64_less_signed_true:\n\t"
1833 "mov $1,%rax\n\t"
1834 ".Lamd64_less_signed_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1838 static void
1839 amd64_emit_less_unsigned (void)
1841 EMIT_ASM (amd64_less_unsigned,
1842 "cmp %rax,(%rsp)\n\t"
1843 "jb .Lamd64_less_unsigned_true\n\t"
1844 "xor %rax,%rax\n\t"
1845 "jmp .Lamd64_less_unsigned_end\n\t"
1846 ".Lamd64_less_unsigned_true:\n\t"
1847 "mov $1,%rax\n\t"
1848 ".Lamd64_less_unsigned_end:\n\t"
1849 "lea 0x8(%rsp),%rsp");
1852 static void
1853 amd64_emit_ref (int size)
1855 switch (size)
1857 case 1:
1858 EMIT_ASM (amd64_ref1,
1859 "movb (%rax),%al");
1860 break;
1861 case 2:
1862 EMIT_ASM (amd64_ref2,
1863 "movw (%rax),%ax");
1864 break;
1865 case 4:
1866 EMIT_ASM (amd64_ref4,
1867 "movl (%rax),%eax");
1868 break;
1869 case 8:
1870 EMIT_ASM (amd64_ref8,
1871 "movq (%rax),%rax");
1872 break;
1876 static void
1877 amd64_emit_if_goto (int *offset_p, int *size_p)
1879 EMIT_ASM (amd64_if_goto,
1880 "mov %rax,%rcx\n\t"
1881 "pop %rax\n\t"
1882 "cmp $0,%rcx\n\t"
1883 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1884 if (offset_p)
1885 *offset_p = 10;
1886 if (size_p)
1887 *size_p = 4;
1890 static void
1891 amd64_emit_goto (int *offset_p, int *size_p)
1893 EMIT_ASM (amd64_goto,
1894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1895 if (offset_p)
1896 *offset_p = 1;
1897 if (size_p)
1898 *size_p = 4;
1901 static void
1902 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1904 int diff = (to - (from + size));
1905 unsigned char buf[sizeof (int)];
1907 if (size != 4)
1909 emit_error = 1;
1910 return;
1913 memcpy (buf, &diff, sizeof (int));
1914 target_write_memory (from, buf, sizeof (int));
1917 static void
1918 amd64_emit_const (LONGEST num)
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr = current_insn_ptr;
1924 i = 0;
1925 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1926 memcpy (&buf[i], &num, sizeof (num));
1927 i += 8;
1928 append_insns (&buildaddr, i, buf);
1929 current_insn_ptr = buildaddr;
1932 static void
1933 amd64_emit_call (CORE_ADDR fn)
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr;
1938 LONGEST offset64;
1940 /* The destination function being in the shared library, may be
1941 >31-bits away off the compiled code pad. */
1943 buildaddr = current_insn_ptr;
1945 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1947 i = 0;
1949 if (offset64 > INT_MAX || offset64 < INT_MIN)
1951 /* Offset is too large for a call. Use callq, but that requires
1952 a register, so avoid it if possible. Use r10, since it is
1953 call-clobbered, we don't have to push/pop it. */
1954 buf[i++] = 0x48; /* mov $fn,%r10 */
1955 buf[i++] = 0xba;
1956 memcpy (buf + i, &fn, 8);
1957 i += 8;
1958 buf[i++] = 0xff; /* callq *%r10 */
1959 buf[i++] = 0xd2;
1961 else
1963 int offset32 = offset64; /* we know we can't overflow here. */
1965 buf[i++] = 0xe8; /* call <reladdr> */
1966 memcpy (buf + i, &offset32, 4);
1967 i += 4;
1970 append_insns (&buildaddr, i, buf);
1971 current_insn_ptr = buildaddr;
1974 static void
1975 amd64_emit_reg (int reg)
1977 unsigned char buf[16];
1978 int i;
1979 CORE_ADDR buildaddr;
1981 /* Assume raw_regs is still in %rdi. */
1982 buildaddr = current_insn_ptr;
1983 i = 0;
1984 buf[i++] = 0xbe; /* mov $<n>,%esi */
1985 memcpy (&buf[i], &reg, sizeof (reg));
1986 i += 4;
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989 amd64_emit_call (get_raw_reg_func_addr ());
1992 static void
1993 amd64_emit_pop (void)
1995 EMIT_ASM (amd64_pop,
1996 "pop %rax");
1999 static void
2000 amd64_emit_stack_flush (void)
2002 EMIT_ASM (amd64_stack_flush,
2003 "push %rax");
2006 static void
2007 amd64_emit_zero_ext (int arg)
2009 switch (arg)
2011 case 8:
2012 EMIT_ASM (amd64_zero_ext_8,
2013 "and $0xff,%rax");
2014 break;
2015 case 16:
2016 EMIT_ASM (amd64_zero_ext_16,
2017 "and $0xffff,%rax");
2018 break;
2019 case 32:
2020 EMIT_ASM (amd64_zero_ext_32,
2021 "mov $0xffffffff,%rcx\n\t"
2022 "and %rcx,%rax");
2023 break;
2024 default:
2025 emit_error = 1;
2029 static void
2030 amd64_emit_swap (void)
2032 EMIT_ASM (amd64_swap,
2033 "mov %rax,%rcx\n\t"
2034 "pop %rax\n\t"
2035 "push %rcx");
2038 static void
2039 amd64_emit_stack_adjust (int n)
2041 unsigned char buf[16];
2042 int i;
2043 CORE_ADDR buildaddr = current_insn_ptr;
2045 i = 0;
2046 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2047 buf[i++] = 0x8d;
2048 buf[i++] = 0x64;
2049 buf[i++] = 0x24;
2050 /* This only handles adjustments up to 16, but we don't expect any more. */
2051 buf[i++] = n * 8;
2052 append_insns (&buildaddr, i, buf);
2053 current_insn_ptr = buildaddr;
2056 /* FN's prototype is `LONGEST(*fn)(int)'. */
2058 static void
2059 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2061 unsigned char buf[16];
2062 int i;
2063 CORE_ADDR buildaddr;
2065 buildaddr = current_insn_ptr;
2066 i = 0;
2067 buf[i++] = 0xbf; /* movl $<n>,%edi */
2068 memcpy (&buf[i], &arg1, sizeof (arg1));
2069 i += 4;
2070 append_insns (&buildaddr, i, buf);
2071 current_insn_ptr = buildaddr;
2072 amd64_emit_call (fn);
2075 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2077 static void
2078 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2080 unsigned char buf[16];
2081 int i;
2082 CORE_ADDR buildaddr;
2084 buildaddr = current_insn_ptr;
2085 i = 0;
2086 buf[i++] = 0xbf; /* movl $<n>,%edi */
2087 memcpy (&buf[i], &arg1, sizeof (arg1));
2088 i += 4;
2089 append_insns (&buildaddr, i, buf);
2090 current_insn_ptr = buildaddr;
2091 EMIT_ASM (amd64_void_call_2_a,
2092 /* Save away a copy of the stack top. */
2093 "push %rax\n\t"
2094 /* Also pass top as the second argument. */
2095 "mov %rax,%rsi");
2096 amd64_emit_call (fn);
2097 EMIT_ASM (amd64_void_call_2_b,
2098 /* Restore the stack top, %rax may have been trashed. */
2099 "pop %rax");
2102 static void
2103 amd64_emit_eq_goto (int *offset_p, int *size_p)
2105 EMIT_ASM (amd64_eq,
2106 "cmp %rax,(%rsp)\n\t"
2107 "jne .Lamd64_eq_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_eq_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax");
2116 if (offset_p)
2117 *offset_p = 13;
2118 if (size_p)
2119 *size_p = 4;
2122 static void
2123 amd64_emit_ne_goto (int *offset_p, int *size_p)
2125 EMIT_ASM (amd64_ne,
2126 "cmp %rax,(%rsp)\n\t"
2127 "je .Lamd64_ne_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_ne_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2134 "pop %rax");
2136 if (offset_p)
2137 *offset_p = 13;
2138 if (size_p)
2139 *size_p = 4;
2142 static void
2143 amd64_emit_lt_goto (int *offset_p, int *size_p)
2145 EMIT_ASM (amd64_lt,
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnl .Lamd64_lt_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2149 "pop %rax\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_lt_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax");
2156 if (offset_p)
2157 *offset_p = 13;
2158 if (size_p)
2159 *size_p = 4;
2162 static void
2163 amd64_emit_le_goto (int *offset_p, int *size_p)
2165 EMIT_ASM (amd64_le,
2166 "cmp %rax,(%rsp)\n\t"
2167 "jnle .Lamd64_le_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2169 "pop %rax\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_le_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2174 "pop %rax");
2176 if (offset_p)
2177 *offset_p = 13;
2178 if (size_p)
2179 *size_p = 4;
2182 static void
2183 amd64_emit_gt_goto (int *offset_p, int *size_p)
2185 EMIT_ASM (amd64_gt,
2186 "cmp %rax,(%rsp)\n\t"
2187 "jng .Lamd64_gt_fallthru\n\t"
2188 "lea 0x8(%rsp),%rsp\n\t"
2189 "pop %rax\n\t"
2190 /* jmp, but don't trust the assembler to choose the right jump */
2191 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2192 ".Lamd64_gt_fallthru:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax");
2196 if (offset_p)
2197 *offset_p = 13;
2198 if (size_p)
2199 *size_p = 4;
2202 static void
2203 amd64_emit_ge_goto (int *offset_p, int *size_p)
2205 EMIT_ASM (amd64_ge,
2206 "cmp %rax,(%rsp)\n\t"
2207 "jnge .Lamd64_ge_fallthru\n\t"
2208 ".Lamd64_ge_jump:\n\t"
2209 "lea 0x8(%rsp),%rsp\n\t"
2210 "pop %rax\n\t"
2211 /* jmp, but don't trust the assembler to choose the right jump */
2212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2213 ".Lamd64_ge_fallthru:\n\t"
2214 "lea 0x8(%rsp),%rsp\n\t"
2215 "pop %rax");
2217 if (offset_p)
2218 *offset_p = 13;
2219 if (size_p)
2220 *size_p = 4;
2223 static emit_ops amd64_emit_ops =
2225 amd64_emit_prologue,
2226 amd64_emit_epilogue,
2227 amd64_emit_add,
2228 amd64_emit_sub,
2229 amd64_emit_mul,
2230 amd64_emit_lsh,
2231 amd64_emit_rsh_signed,
2232 amd64_emit_rsh_unsigned,
2233 amd64_emit_ext,
2234 amd64_emit_log_not,
2235 amd64_emit_bit_and,
2236 amd64_emit_bit_or,
2237 amd64_emit_bit_xor,
2238 amd64_emit_bit_not,
2239 amd64_emit_equal,
2240 amd64_emit_less_signed,
2241 amd64_emit_less_unsigned,
2242 amd64_emit_ref,
2243 amd64_emit_if_goto,
2244 amd64_emit_goto,
2245 amd64_write_goto_address,
2246 amd64_emit_const,
2247 amd64_emit_call,
2248 amd64_emit_reg,
2249 amd64_emit_pop,
2250 amd64_emit_stack_flush,
2251 amd64_emit_zero_ext,
2252 amd64_emit_swap,
2253 amd64_emit_stack_adjust,
2254 amd64_emit_int_call_1,
2255 amd64_emit_void_call_2,
2256 amd64_emit_eq_goto,
2257 amd64_emit_ne_goto,
2258 amd64_emit_lt_goto,
2259 amd64_emit_le_goto,
2260 amd64_emit_gt_goto,
2261 amd64_emit_ge_goto
2264 #endif /* __x86_64__ */
2266 static void
2267 i386_emit_prologue (void)
2269 EMIT_ASM32 (i386_prologue,
2270 "push %ebp\n\t"
2271 "mov %esp,%ebp\n\t"
2272 "push %ebx");
2273 /* At this point, the raw regs base address is at 8(%ebp), and the
2274 value pointer is at 12(%ebp). */
2277 static void
2278 i386_emit_epilogue (void)
2280 EMIT_ASM32 (i386_epilogue,
2281 "mov 12(%ebp),%ecx\n\t"
2282 "mov %eax,(%ecx)\n\t"
2283 "mov %ebx,0x4(%ecx)\n\t"
2284 "xor %eax,%eax\n\t"
2285 "pop %ebx\n\t"
2286 "pop %ebp\n\t"
2287 "ret");
2290 static void
2291 i386_emit_add (void)
2293 EMIT_ASM32 (i386_add,
2294 "add (%esp),%eax\n\t"
2295 "adc 0x4(%esp),%ebx\n\t"
2296 "lea 0x8(%esp),%esp");
2299 static void
2300 i386_emit_sub (void)
2302 EMIT_ASM32 (i386_sub,
2303 "subl %eax,(%esp)\n\t"
2304 "sbbl %ebx,4(%esp)\n\t"
2305 "pop %eax\n\t"
2306 "pop %ebx\n\t");
2309 static void
2310 i386_emit_mul (void)
2312 emit_error = 1;
2315 static void
2316 i386_emit_lsh (void)
2318 emit_error = 1;
2321 static void
2322 i386_emit_rsh_signed (void)
2324 emit_error = 1;
2327 static void
2328 i386_emit_rsh_unsigned (void)
2330 emit_error = 1;
2333 static void
2334 i386_emit_ext (int arg)
2336 switch (arg)
2338 case 8:
2339 EMIT_ASM32 (i386_ext_8,
2340 "cbtw\n\t"
2341 "cwtl\n\t"
2342 "movl %eax,%ebx\n\t"
2343 "sarl $31,%ebx");
2344 break;
2345 case 16:
2346 EMIT_ASM32 (i386_ext_16,
2347 "cwtl\n\t"
2348 "movl %eax,%ebx\n\t"
2349 "sarl $31,%ebx");
2350 break;
2351 case 32:
2352 EMIT_ASM32 (i386_ext_32,
2353 "movl %eax,%ebx\n\t"
2354 "sarl $31,%ebx");
2355 break;
2356 default:
2357 emit_error = 1;
2361 static void
2362 i386_emit_log_not (void)
2364 EMIT_ASM32 (i386_log_not,
2365 "or %ebx,%eax\n\t"
2366 "test %eax,%eax\n\t"
2367 "sete %cl\n\t"
2368 "xor %ebx,%ebx\n\t"
2369 "movzbl %cl,%eax");
2372 static void
2373 i386_emit_bit_and (void)
2375 EMIT_ASM32 (i386_and,
2376 "and (%esp),%eax\n\t"
2377 "and 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2381 static void
2382 i386_emit_bit_or (void)
2384 EMIT_ASM32 (i386_or,
2385 "or (%esp),%eax\n\t"
2386 "or 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2390 static void
2391 i386_emit_bit_xor (void)
2393 EMIT_ASM32 (i386_xor,
2394 "xor (%esp),%eax\n\t"
2395 "xor 0x4(%esp),%ebx\n\t"
2396 "lea 0x8(%esp),%esp");
2399 static void
2400 i386_emit_bit_not (void)
2402 EMIT_ASM32 (i386_bit_not,
2403 "xor $0xffffffff,%eax\n\t"
2404 "xor $0xffffffff,%ebx\n\t");
2407 static void
2408 i386_emit_equal (void)
2410 EMIT_ASM32 (i386_equal,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jne .Li386_equal_false\n\t"
2413 "cmpl %eax,(%esp)\n\t"
2414 "je .Li386_equal_true\n\t"
2415 ".Li386_equal_false:\n\t"
2416 "xor %eax,%eax\n\t"
2417 "jmp .Li386_equal_end\n\t"
2418 ".Li386_equal_true:\n\t"
2419 "mov $1,%eax\n\t"
2420 ".Li386_equal_end:\n\t"
2421 "xor %ebx,%ebx\n\t"
2422 "lea 0x8(%esp),%esp");
2425 static void
2426 i386_emit_less_signed (void)
2428 EMIT_ASM32 (i386_less_signed,
2429 "cmpl %ebx,4(%esp)\n\t"
2430 "jl .Li386_less_signed_true\n\t"
2431 "jne .Li386_less_signed_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 ".Li386_less_signed_false:\n\t"
2435 "xor %eax,%eax\n\t"
2436 "jmp .Li386_less_signed_end\n\t"
2437 ".Li386_less_signed_true:\n\t"
2438 "mov $1,%eax\n\t"
2439 ".Li386_less_signed_end:\n\t"
2440 "xor %ebx,%ebx\n\t"
2441 "lea 0x8(%esp),%esp");
2444 static void
2445 i386_emit_less_unsigned (void)
2447 EMIT_ASM32 (i386_less_unsigned,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jb .Li386_less_unsigned_true\n\t"
2450 "jne .Li386_less_unsigned_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 ".Li386_less_unsigned_false:\n\t"
2454 "xor %eax,%eax\n\t"
2455 "jmp .Li386_less_unsigned_end\n\t"
2456 ".Li386_less_unsigned_true:\n\t"
2457 "mov $1,%eax\n\t"
2458 ".Li386_less_unsigned_end:\n\t"
2459 "xor %ebx,%ebx\n\t"
2460 "lea 0x8(%esp),%esp");
2463 static void
2464 i386_emit_ref (int size)
2466 switch (size)
2468 case 1:
2469 EMIT_ASM32 (i386_ref1,
2470 "movb (%eax),%al");
2471 break;
2472 case 2:
2473 EMIT_ASM32 (i386_ref2,
2474 "movw (%eax),%ax");
2475 break;
2476 case 4:
2477 EMIT_ASM32 (i386_ref4,
2478 "movl (%eax),%eax");
2479 break;
2480 case 8:
2481 EMIT_ASM32 (i386_ref8,
2482 "movl 4(%eax),%ebx\n\t"
2483 "movl (%eax),%eax");
2484 break;
2488 static void
2489 i386_emit_if_goto (int *offset_p, int *size_p)
2491 EMIT_ASM32 (i386_if_goto,
2492 "mov %eax,%ecx\n\t"
2493 "or %ebx,%ecx\n\t"
2494 "pop %eax\n\t"
2495 "pop %ebx\n\t"
2496 "cmpl $0,%ecx\n\t"
2497 /* Don't trust the assembler to choose the right jump */
2498 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2500 if (offset_p)
2501 *offset_p = 11; /* be sure that this matches the sequence above */
2502 if (size_p)
2503 *size_p = 4;
2506 static void
2507 i386_emit_goto (int *offset_p, int *size_p)
2509 EMIT_ASM32 (i386_goto,
2510 /* Don't trust the assembler to choose the right jump */
2511 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2512 if (offset_p)
2513 *offset_p = 1;
2514 if (size_p)
2515 *size_p = 4;
2518 static void
2519 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2521 int diff = (to - (from + size));
2522 unsigned char buf[sizeof (int)];
2524 /* We're only doing 4-byte sizes at the moment. */
2525 if (size != 4)
2527 emit_error = 1;
2528 return;
2531 memcpy (buf, &diff, sizeof (int));
2532 target_write_memory (from, buf, sizeof (int));
2535 static void
2536 i386_emit_const (LONGEST num)
2538 unsigned char buf[16];
2539 int i, hi, lo;
2540 CORE_ADDR buildaddr = current_insn_ptr;
2542 i = 0;
2543 buf[i++] = 0xb8; /* mov $<n>,%eax */
2544 lo = num & 0xffffffff;
2545 memcpy (&buf[i], &lo, sizeof (lo));
2546 i += 4;
2547 hi = ((num >> 32) & 0xffffffff);
2548 if (hi)
2550 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2551 memcpy (&buf[i], &hi, sizeof (hi));
2552 i += 4;
2554 else
2556 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2558 append_insns (&buildaddr, i, buf);
2559 current_insn_ptr = buildaddr;
2562 static void
2563 i386_emit_call (CORE_ADDR fn)
2565 unsigned char buf[16];
2566 int i, offset;
2567 CORE_ADDR buildaddr;
2569 buildaddr = current_insn_ptr;
2570 i = 0;
2571 buf[i++] = 0xe8; /* call <reladdr> */
2572 offset = ((int) fn) - (buildaddr + 5);
2573 memcpy (buf + 1, &offset, 4);
2574 append_insns (&buildaddr, 5, buf);
2575 current_insn_ptr = buildaddr;
2578 static void
2579 i386_emit_reg (int reg)
2581 unsigned char buf[16];
2582 int i;
2583 CORE_ADDR buildaddr;
2585 EMIT_ASM32 (i386_reg_a,
2586 "sub $0x8,%esp");
2587 buildaddr = current_insn_ptr;
2588 i = 0;
2589 buf[i++] = 0xb8; /* mov $<n>,%eax */
2590 memcpy (&buf[i], &reg, sizeof (reg));
2591 i += 4;
2592 append_insns (&buildaddr, i, buf);
2593 current_insn_ptr = buildaddr;
2594 EMIT_ASM32 (i386_reg_b,
2595 "mov %eax,4(%esp)\n\t"
2596 "mov 8(%ebp),%eax\n\t"
2597 "mov %eax,(%esp)");
2598 i386_emit_call (get_raw_reg_func_addr ());
2599 EMIT_ASM32 (i386_reg_c,
2600 "xor %ebx,%ebx\n\t"
2601 "lea 0x8(%esp),%esp");
2604 static void
2605 i386_emit_pop (void)
2607 EMIT_ASM32 (i386_pop,
2608 "pop %eax\n\t"
2609 "pop %ebx");
2612 static void
2613 i386_emit_stack_flush (void)
2615 EMIT_ASM32 (i386_stack_flush,
2616 "push %ebx\n\t"
2617 "push %eax");
2620 static void
2621 i386_emit_zero_ext (int arg)
2623 switch (arg)
2625 case 8:
2626 EMIT_ASM32 (i386_zero_ext_8,
2627 "and $0xff,%eax\n\t"
2628 "xor %ebx,%ebx");
2629 break;
2630 case 16:
2631 EMIT_ASM32 (i386_zero_ext_16,
2632 "and $0xffff,%eax\n\t"
2633 "xor %ebx,%ebx");
2634 break;
2635 case 32:
2636 EMIT_ASM32 (i386_zero_ext_32,
2637 "xor %ebx,%ebx");
2638 break;
2639 default:
2640 emit_error = 1;
2644 static void
2645 i386_emit_swap (void)
2647 EMIT_ASM32 (i386_swap,
2648 "mov %eax,%ecx\n\t"
2649 "mov %ebx,%edx\n\t"
2650 "pop %eax\n\t"
2651 "pop %ebx\n\t"
2652 "push %edx\n\t"
2653 "push %ecx");
2656 static void
2657 i386_emit_stack_adjust (int n)
2659 unsigned char buf[16];
2660 int i;
2661 CORE_ADDR buildaddr = current_insn_ptr;
2663 i = 0;
2664 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2665 buf[i++] = 0x64;
2666 buf[i++] = 0x24;
2667 buf[i++] = n * 8;
2668 append_insns (&buildaddr, i, buf);
2669 current_insn_ptr = buildaddr;
2672 /* FN's prototype is `LONGEST(*fn)(int)'. */
2674 static void
2675 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2677 unsigned char buf[16];
2678 int i;
2679 CORE_ADDR buildaddr;
2681 EMIT_ASM32 (i386_int_call_1_a,
2682 /* Reserve a bit of stack space. */
2683 "sub $0x8,%esp");
2684 /* Put the one argument on the stack. */
2685 buildaddr = current_insn_ptr;
2686 i = 0;
2687 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2688 buf[i++] = 0x04;
2689 buf[i++] = 0x24;
2690 memcpy (&buf[i], &arg1, sizeof (arg1));
2691 i += 4;
2692 append_insns (&buildaddr, i, buf);
2693 current_insn_ptr = buildaddr;
2694 i386_emit_call (fn);
2695 EMIT_ASM32 (i386_int_call_1_c,
2696 "mov %edx,%ebx\n\t"
2697 "lea 0x8(%esp),%esp");
2700 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2702 static void
2703 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2705 unsigned char buf[16];
2706 int i;
2707 CORE_ADDR buildaddr;
2709 EMIT_ASM32 (i386_void_call_2_a,
2710 /* Preserve %eax only; we don't have to worry about %ebx. */
2711 "push %eax\n\t"
2712 /* Reserve a bit of stack space for arguments. */
2713 "sub $0x10,%esp\n\t"
2714 /* Copy "top" to the second argument position. (Note that
2715 we can't assume function won't scribble on its
2716 arguments, so don't try to restore from this.) */
2717 "mov %eax,4(%esp)\n\t"
2718 "mov %ebx,8(%esp)");
2719 /* Put the first argument on the stack. */
2720 buildaddr = current_insn_ptr;
2721 i = 0;
2722 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2723 buf[i++] = 0x04;
2724 buf[i++] = 0x24;
2725 memcpy (&buf[i], &arg1, sizeof (arg1));
2726 i += 4;
2727 append_insns (&buildaddr, i, buf);
2728 current_insn_ptr = buildaddr;
2729 i386_emit_call (fn);
2730 EMIT_ASM32 (i386_void_call_2_b,
2731 "lea 0x10(%esp),%esp\n\t"
2732 /* Restore original stack top. */
2733 "pop %eax");
2737 static void
2738 i386_emit_eq_goto (int *offset_p, int *size_p)
2740 EMIT_ASM32 (eq,
2741 /* Check low half first, more likely to be decider */
2742 "cmpl %eax,(%esp)\n\t"
2743 "jne .Leq_fallthru\n\t"
2744 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Leq_fallthru\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2747 "pop %eax\n\t"
2748 "pop %ebx\n\t"
2749 /* jmp, but don't trust the assembler to choose the right jump */
2750 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2751 ".Leq_fallthru:\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2753 "pop %eax\n\t"
2754 "pop %ebx");
2756 if (offset_p)
2757 *offset_p = 18;
2758 if (size_p)
2759 *size_p = 4;
2762 static void
2763 i386_emit_ne_goto (int *offset_p, int *size_p)
2765 EMIT_ASM32 (ne,
2766 /* Check low half first, more likely to be decider */
2767 "cmpl %eax,(%esp)\n\t"
2768 "jne .Lne_jump\n\t"
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "je .Lne_fallthru\n\t"
2771 ".Lne_jump:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2773 "pop %eax\n\t"
2774 "pop %ebx\n\t"
2775 /* jmp, but don't trust the assembler to choose the right jump */
2776 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2777 ".Lne_fallthru:\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2779 "pop %eax\n\t"
2780 "pop %ebx");
2782 if (offset_p)
2783 *offset_p = 18;
2784 if (size_p)
2785 *size_p = 4;
2788 static void
2789 i386_emit_lt_goto (int *offset_p, int *size_p)
2791 EMIT_ASM32 (lt,
2792 "cmpl %ebx,4(%esp)\n\t"
2793 "jl .Llt_jump\n\t"
2794 "jne .Llt_fallthru\n\t"
2795 "cmpl %eax,(%esp)\n\t"
2796 "jnl .Llt_fallthru\n\t"
2797 ".Llt_jump:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2799 "pop %eax\n\t"
2800 "pop %ebx\n\t"
2801 /* jmp, but don't trust the assembler to choose the right jump */
2802 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2803 ".Llt_fallthru:\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2805 "pop %eax\n\t"
2806 "pop %ebx");
2808 if (offset_p)
2809 *offset_p = 20;
2810 if (size_p)
2811 *size_p = 4;
2814 static void
2815 i386_emit_le_goto (int *offset_p, int *size_p)
2817 EMIT_ASM32 (le,
2818 "cmpl %ebx,4(%esp)\n\t"
2819 "jle .Lle_jump\n\t"
2820 "jne .Lle_fallthru\n\t"
2821 "cmpl %eax,(%esp)\n\t"
2822 "jnle .Lle_fallthru\n\t"
2823 ".Lle_jump:\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2825 "pop %eax\n\t"
2826 "pop %ebx\n\t"
2827 /* jmp, but don't trust the assembler to choose the right jump */
2828 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2829 ".Lle_fallthru:\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2831 "pop %eax\n\t"
2832 "pop %ebx");
2834 if (offset_p)
2835 *offset_p = 20;
2836 if (size_p)
2837 *size_p = 4;
2840 static void
2841 i386_emit_gt_goto (int *offset_p, int *size_p)
2843 EMIT_ASM32 (gt,
2844 "cmpl %ebx,4(%esp)\n\t"
2845 "jg .Lgt_jump\n\t"
2846 "jne .Lgt_fallthru\n\t"
2847 "cmpl %eax,(%esp)\n\t"
2848 "jng .Lgt_fallthru\n\t"
2849 ".Lgt_jump:\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2851 "pop %eax\n\t"
2852 "pop %ebx\n\t"
2853 /* jmp, but don't trust the assembler to choose the right jump */
2854 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2855 ".Lgt_fallthru:\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2857 "pop %eax\n\t"
2858 "pop %ebx");
2860 if (offset_p)
2861 *offset_p = 20;
2862 if (size_p)
2863 *size_p = 4;
2866 static void
2867 i386_emit_ge_goto (int *offset_p, int *size_p)
2869 EMIT_ASM32 (ge,
2870 "cmpl %ebx,4(%esp)\n\t"
2871 "jge .Lge_jump\n\t"
2872 "jne .Lge_fallthru\n\t"
2873 "cmpl %eax,(%esp)\n\t"
2874 "jnge .Lge_fallthru\n\t"
2875 ".Lge_jump:\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2877 "pop %eax\n\t"
2878 "pop %ebx\n\t"
2879 /* jmp, but don't trust the assembler to choose the right jump */
2880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2881 ".Lge_fallthru:\n\t"
2882 "lea 0x8(%esp),%esp\n\t"
2883 "pop %eax\n\t"
2884 "pop %ebx");
2886 if (offset_p)
2887 *offset_p = 20;
2888 if (size_p)
2889 *size_p = 4;
2892 static emit_ops i386_emit_ops =
2894 i386_emit_prologue,
2895 i386_emit_epilogue,
2896 i386_emit_add,
2897 i386_emit_sub,
2898 i386_emit_mul,
2899 i386_emit_lsh,
2900 i386_emit_rsh_signed,
2901 i386_emit_rsh_unsigned,
2902 i386_emit_ext,
2903 i386_emit_log_not,
2904 i386_emit_bit_and,
2905 i386_emit_bit_or,
2906 i386_emit_bit_xor,
2907 i386_emit_bit_not,
2908 i386_emit_equal,
2909 i386_emit_less_signed,
2910 i386_emit_less_unsigned,
2911 i386_emit_ref,
2912 i386_emit_if_goto,
2913 i386_emit_goto,
2914 i386_write_goto_address,
2915 i386_emit_const,
2916 i386_emit_call,
2917 i386_emit_reg,
2918 i386_emit_pop,
2919 i386_emit_stack_flush,
2920 i386_emit_zero_ext,
2921 i386_emit_swap,
2922 i386_emit_stack_adjust,
2923 i386_emit_int_call_1,
2924 i386_emit_void_call_2,
2925 i386_emit_eq_goto,
2926 i386_emit_ne_goto,
2927 i386_emit_lt_goto,
2928 i386_emit_le_goto,
2929 i386_emit_gt_goto,
2930 i386_emit_ge_goto
2934 emit_ops *
2935 x86_target::emit_ops ()
2937 #ifdef __x86_64__
2938 if (is_64bit_tdesc (current_thread))
2939 return &amd64_emit_ops;
2940 else
2941 #endif
2942 return &i386_emit_ops;
2945 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2947 const gdb_byte *
2948 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2950 *size = x86_breakpoint_len;
2951 return x86_breakpoint;
2954 bool
2955 x86_target::low_supports_range_stepping ()
2957 return true;
2961 x86_target::get_ipa_tdesc_idx ()
2963 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2964 const struct target_desc *tdesc = regcache->tdesc;
2966 #ifdef __x86_64__
2967 return amd64_get_ipa_tdesc_idx (tdesc);
2968 #endif
2970 if (tdesc == tdesc_i386_linux_no_xml.get ())
2971 return X86_TDESC_SSE;
2973 return i386_get_ipa_tdesc_idx (tdesc);
2976 /* The linux target ops object. */
2978 linux_process_target *the_linux_target = &the_x86_target;
2980 void
2981 initialize_low_arch (void)
2983 /* Initialize the Linux target descriptions. */
2984 #ifdef __x86_64__
2985 tdesc_amd64_linux_no_xml = allocate_target_description ();
2986 copy_target_description (tdesc_amd64_linux_no_xml.get (),
2987 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2988 false));
2989 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2990 #endif
2992 tdesc_i386_linux_no_xml = allocate_target_description ();
2993 copy_target_description (tdesc_i386_linux_no_xml.get (),
2994 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2995 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2997 initialize_regsets_info (&x86_regsets_info);