Automatic date update in version.in
[binutils-gdb.git] / gdb / amd64-tdep.c
blob4b9dbbab66e87562c26c4573107536b546521d1e
1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "language.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52 #include "amd64-ravenscar-thread.h"
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
61 /* Register information. */
63 static const char * const amd64_register_names[] =
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
78 "mxcsr",
81 static const char * const amd64_ymm_names[] =
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
89 static const char * const amd64_ymm_avx512_names[] =
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
97 static const char * const amd64_ymmh_names[] =
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 static const char * const amd64_ymmh_avx512_names[] =
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 static const char * const amd64_mpx_names[] =
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 static const char * const amd64_k_names[] =
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
124 static const char * const amd64_zmmh_names[] =
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 static const char * const amd64_zmm_names[] =
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
148 static const char * const amd64_xmm_avx512_names[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
155 static const char * const amd64_pkeys_names[] = {
156 "pkru"
159 /* DWARF Register Number Mapping as defined in the System V psABI,
160 section 3.6. */
162 static int amd64_dwarf_regmap[] =
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
166 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
167 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
169 /* Frame Pointer Register RBP. */
170 AMD64_RBP_REGNUM,
172 /* Stack Pointer Register RSP. */
173 AMD64_RSP_REGNUM,
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM, /* %r8 */
177 AMD64_R9_REGNUM, /* %r9 */
178 AMD64_R10_REGNUM, /* %r10 */
179 AMD64_R11_REGNUM, /* %r11 */
180 AMD64_R12_REGNUM, /* %r12 */
181 AMD64_R13_REGNUM, /* %r13 */
182 AMD64_R14_REGNUM, /* %r14 */
183 AMD64_R15_REGNUM, /* %r15 */
185 /* Return Address RA. Mapped to RIP. */
186 AMD64_RIP_REGNUM,
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
190 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
191 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
192 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
196 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
197 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
198 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
202 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
203 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
204 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 /* Control and Status Flags Register. */
213 AMD64_EFLAGS_REGNUM,
215 /* Selector Registers. */
216 AMD64_ES_REGNUM,
217 AMD64_CS_REGNUM,
218 AMD64_SS_REGNUM,
219 AMD64_DS_REGNUM,
220 AMD64_FS_REGNUM,
221 AMD64_GS_REGNUM,
225 /* Segment Base Address Registers. */
231 /* Special Selector Registers. */
235 /* Floating Point Control Registers. */
236 AMD64_MXCSR_REGNUM,
237 AMD64_FCTRL_REGNUM,
238 AMD64_FSTAT_REGNUM
241 static const int amd64_dwarf_regmap_len =
242 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
244 /* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
247 static int
248 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
250 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
251 int ymm0_regnum = tdep->ymm0_regnum;
252 int regnum = -1;
254 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
255 regnum = amd64_dwarf_regmap[reg];
257 if (ymm0_regnum >= 0
258 && i386_xmm_regnum_p (gdbarch, regnum))
259 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
261 return regnum;
264 /* Map architectural register numbers to gdb register numbers. */
266 static const int amd64_arch_regmap[16] =
268 AMD64_RAX_REGNUM, /* %rax */
269 AMD64_RCX_REGNUM, /* %rcx */
270 AMD64_RDX_REGNUM, /* %rdx */
271 AMD64_RBX_REGNUM, /* %rbx */
272 AMD64_RSP_REGNUM, /* %rsp */
273 AMD64_RBP_REGNUM, /* %rbp */
274 AMD64_RSI_REGNUM, /* %rsi */
275 AMD64_RDI_REGNUM, /* %rdi */
276 AMD64_R8_REGNUM, /* %r8 */
277 AMD64_R9_REGNUM, /* %r9 */
278 AMD64_R10_REGNUM, /* %r10 */
279 AMD64_R11_REGNUM, /* %r11 */
280 AMD64_R12_REGNUM, /* %r12 */
281 AMD64_R13_REGNUM, /* %r13 */
282 AMD64_R14_REGNUM, /* %r14 */
283 AMD64_R15_REGNUM /* %r15 */
286 static const int amd64_arch_regmap_len =
287 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
289 /* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
292 static int
293 amd64_arch_reg_to_regnum (int reg)
295 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
297 return amd64_arch_regmap[reg];
300 /* Register names for byte pseudo-registers. */
302 static const char * const amd64_byte_names[] =
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
309 /* Number of lower byte registers. */
310 #define AMD64_NUM_LOWER_BYTE_REGS 16
312 /* Register names for word pseudo-registers. */
314 static const char * const amd64_word_names[] =
316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
320 /* Register names for dword pseudo-registers. */
322 static const char * const amd64_dword_names[] =
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
326 "eip"
329 /* Return the name of register REGNUM. */
331 static const char *
332 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
334 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
335 if (i386_byte_regnum_p (gdbarch, regnum))
336 return amd64_byte_names[regnum - tdep->al_regnum];
337 else if (i386_zmm_regnum_p (gdbarch, regnum))
338 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
339 else if (i386_ymm_regnum_p (gdbarch, regnum))
340 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
341 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
342 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
343 else if (i386_word_regnum_p (gdbarch, regnum))
344 return amd64_word_names[regnum - tdep->ax_regnum];
345 else if (i386_dword_regnum_p (gdbarch, regnum))
346 return amd64_dword_names[regnum - tdep->eax_regnum];
347 else
348 return i386_pseudo_register_name (gdbarch, regnum);
351 static value *
352 amd64_pseudo_register_read_value (gdbarch *gdbarch, frame_info_ptr next_frame,
353 int regnum)
355 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
357 if (i386_byte_regnum_p (gdbarch, regnum))
359 int gpnum = regnum - tdep->al_regnum;
361 /* Extract (always little endian). */
362 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
364 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
366 /* Special handling for AH, BH, CH, DH. */
367 return pseudo_from_raw_part (next_frame, regnum, gpnum, 1);
369 else
370 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
372 else if (i386_dword_regnum_p (gdbarch, regnum))
374 int gpnum = regnum - tdep->eax_regnum;
376 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
378 else
379 return i386_pseudo_register_read_value (gdbarch, next_frame, regnum);
382 static void
383 amd64_pseudo_register_write (gdbarch *gdbarch, frame_info_ptr next_frame,
384 int regnum, gdb::array_view<const gdb_byte> buf)
386 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
388 if (i386_byte_regnum_p (gdbarch, regnum))
390 int gpnum = regnum - tdep->al_regnum;
392 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
394 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
395 pseudo_to_raw_part (next_frame, buf, gpnum, 1);
397 else
398 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
400 else if (i386_dword_regnum_p (gdbarch, regnum))
402 int gpnum = regnum - tdep->eax_regnum;
403 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
405 else
406 i386_pseudo_register_write (gdbarch, next_frame, regnum, buf);
409 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
411 static int
412 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
413 struct agent_expr *ax, int regnum)
415 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
417 if (i386_byte_regnum_p (gdbarch, regnum))
419 int gpnum = regnum - tdep->al_regnum;
421 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
422 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
423 else
424 ax_reg_mask (ax, gpnum);
425 return 0;
427 else if (i386_dword_regnum_p (gdbarch, regnum))
429 int gpnum = regnum - tdep->eax_regnum;
431 ax_reg_mask (ax, gpnum);
432 return 0;
434 else
435 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
440 /* Register classes as defined in the psABI. */
442 enum amd64_reg_class
444 AMD64_INTEGER,
445 AMD64_SSE,
446 AMD64_SSEUP,
447 AMD64_X87,
448 AMD64_X87UP,
449 AMD64_COMPLEX_X87,
450 AMD64_NO_CLASS,
451 AMD64_MEMORY
454 /* Return the union class of CLASS1 and CLASS2. See the psABI for
455 details. */
457 static enum amd64_reg_class
458 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
460 /* Rule (a): If both classes are equal, this is the resulting class. */
461 if (class1 == class2)
462 return class1;
464 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
465 is the other class. */
466 if (class1 == AMD64_NO_CLASS)
467 return class2;
468 if (class2 == AMD64_NO_CLASS)
469 return class1;
471 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
472 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
473 return AMD64_MEMORY;
475 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
476 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
477 return AMD64_INTEGER;
479 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
480 MEMORY is used as class. */
481 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
482 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
483 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
484 return AMD64_MEMORY;
486 /* Rule (f): Otherwise class SSE is used. */
487 return AMD64_SSE;
490 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
492 /* Return true if TYPE is a structure or union with unaligned fields. */
494 static bool
495 amd64_has_unaligned_fields (struct type *type)
497 if (type->code () == TYPE_CODE_STRUCT
498 || type->code () == TYPE_CODE_UNION)
500 for (int i = 0; i < type->num_fields (); i++)
502 struct type *subtype = check_typedef (type->field (i).type ());
504 /* Ignore static fields, empty fields (for example nested
505 empty structures), and bitfields (these are handled by
506 the caller). */
507 if (type->field (i).is_static ()
508 || (type->field (i).bitsize () == 0
509 && subtype->length () == 0)
510 || type->field (i).is_packed ())
511 continue;
513 int bitpos = type->field (i).loc_bitpos ();
515 if (bitpos % 8 != 0)
516 return true;
518 int align = type_align (subtype);
519 if (align == 0)
520 error (_("could not determine alignment of type"));
522 int bytepos = bitpos / 8;
523 if (bytepos % align != 0)
524 return true;
526 if (amd64_has_unaligned_fields (subtype))
527 return true;
531 return false;
534 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
535 structures and union types, and store the result in THECLASS. */
537 static void
538 amd64_classify_aggregate_field (struct type *type, int i,
539 enum amd64_reg_class theclass[2],
540 unsigned int bitoffset)
542 struct type *subtype = check_typedef (type->field (i).type ());
543 enum amd64_reg_class subclass[2];
544 int bitsize = type->field (i).bitsize ();
546 if (bitsize == 0)
547 bitsize = subtype->length () * 8;
549 /* Ignore static fields, or empty fields, for example nested
550 empty structures.*/
551 if (type->field (i).is_static () || bitsize == 0)
552 return;
554 int bitpos = bitoffset + type->field (i).loc_bitpos ();
555 int pos = bitpos / 64;
556 int endpos = (bitpos + bitsize - 1) / 64;
558 if (subtype->code () == TYPE_CODE_STRUCT
559 || subtype->code () == TYPE_CODE_UNION)
561 /* Each field of an object is classified recursively. */
562 int j;
563 for (j = 0; j < subtype->num_fields (); j++)
564 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
565 return;
568 gdb_assert (pos == 0 || pos == 1);
570 amd64_classify (subtype, subclass);
571 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
572 if (bitsize <= 64 && pos == 0 && endpos == 1)
573 /* This is a bit of an odd case: We have a field that would
574 normally fit in one of the two eightbytes, except that
575 it is placed in a way that this field straddles them.
576 This has been seen with a structure containing an array.
578 The ABI is a bit unclear in this case, but we assume that
579 this field's class (stored in subclass[0]) must also be merged
580 into class[1]. In other words, our field has a piece stored
581 in the second eight-byte, and thus its class applies to
582 the second eight-byte as well.
584 In the case where the field length exceeds 8 bytes,
585 it should not be necessary to merge the field class
586 into class[1]. As LEN > 8, subclass[1] is necessarily
587 different from AMD64_NO_CLASS. If subclass[1] is equal
588 to subclass[0], then the normal class[1]/subclass[1]
589 merging will take care of everything. For subclass[1]
590 to be different from subclass[0], I can only see the case
591 where we have a SSE/SSEUP or X87/X87UP pair, which both
592 use up all 16 bytes of the aggregate, and are already
593 handled just fine (because each portion sits on its own
594 8-byte). */
595 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
596 if (pos == 0)
597 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
600 /* Classify TYPE according to the rules for aggregate (structures and
601 arrays) and union types, and store the result in CLASS. */
603 static void
604 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
606 /* 1. If the size of an object is larger than two times eight bytes, or
607 it is a non-trivial C++ object, or it has unaligned fields, then it
608 has class memory.
610 It is important that the trivially_copyable check is before the
611 unaligned fields check, as C++ classes with virtual base classes
612 will have fields (for the virtual base classes) with non-constant
613 loc_bitpos attributes, which will cause an assert to trigger within
614 the unaligned field check. As classes with virtual bases are not
615 trivially copyable, checking that first avoids this problem. */
616 if (TYPE_HAS_DYNAMIC_LENGTH (type)
617 || type->length () > 16
618 || !language_pass_by_reference (type).trivially_copyable
619 || amd64_has_unaligned_fields (type))
621 theclass[0] = theclass[1] = AMD64_MEMORY;
622 return;
625 /* 2. Both eightbytes get initialized to class NO_CLASS. */
626 theclass[0] = theclass[1] = AMD64_NO_CLASS;
628 /* 3. Each field of an object is classified recursively so that
629 always two fields are considered. The resulting class is
630 calculated according to the classes of the fields in the
631 eightbyte: */
633 if (type->code () == TYPE_CODE_ARRAY)
635 struct type *subtype = check_typedef (type->target_type ());
637 /* All fields in an array have the same type. */
638 amd64_classify (subtype, theclass);
639 if (type->length () > 8 && theclass[1] == AMD64_NO_CLASS)
640 theclass[1] = theclass[0];
642 else
644 int i;
646 /* Structure or union. */
647 gdb_assert (type->code () == TYPE_CODE_STRUCT
648 || type->code () == TYPE_CODE_UNION);
650 for (i = 0; i < type->num_fields (); i++)
651 amd64_classify_aggregate_field (type, i, theclass, 0);
654 /* 4. Then a post merger cleanup is done: */
656 /* Rule (a): If one of the classes is MEMORY, the whole argument is
657 passed in memory. */
658 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
659 theclass[0] = theclass[1] = AMD64_MEMORY;
661 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
662 SSE. */
663 if (theclass[0] == AMD64_SSEUP)
664 theclass[0] = AMD64_SSE;
665 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
666 theclass[1] = AMD64_SSE;
669 /* Classify TYPE, and store the result in CLASS. */
671 static void
672 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
674 enum type_code code = type->code ();
675 int len = type->length ();
677 theclass[0] = theclass[1] = AMD64_NO_CLASS;
679 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
680 long, long long, and pointers are in the INTEGER class. Similarly,
681 range types, used by languages such as Ada, are also in the INTEGER
682 class. */
683 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
684 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
685 || code == TYPE_CODE_CHAR
686 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
687 && (len == 1 || len == 2 || len == 4 || len == 8))
688 theclass[0] = AMD64_INTEGER;
690 /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
691 __m64 are in class SSE. */
692 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
693 && (len == 2 || len == 4 || len == 8))
694 /* FIXME: __m64 . */
695 theclass[0] = AMD64_SSE;
697 /* Arguments of types __float128, _Decimal128 and __m128 are split into
698 two halves. The least significant ones belong to class SSE, the most
699 significant one to class SSEUP. */
700 else if (code == TYPE_CODE_DECFLOAT && len == 16)
701 /* FIXME: __float128, __m128. */
702 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
704 /* The 64-bit mantissa of arguments of type long double belongs to
705 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
706 class X87UP. */
707 else if (code == TYPE_CODE_FLT && len == 16)
708 /* Class X87 and X87UP. */
709 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
711 /* Arguments of complex T - where T is one of the types _Float16, float or
712 double - get treated as if they are implemented as:
714 struct complexT {
715 T real;
716 T imag;
720 else if (code == TYPE_CODE_COMPLEX && (len == 8 || len == 4))
721 theclass[0] = AMD64_SSE;
722 else if (code == TYPE_CODE_COMPLEX && len == 16)
723 theclass[0] = theclass[1] = AMD64_SSE;
725 /* A variable of type complex long double is classified as type
726 COMPLEX_X87. */
727 else if (code == TYPE_CODE_COMPLEX && len == 32)
728 theclass[0] = AMD64_COMPLEX_X87;
730 /* Aggregates. */
731 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
732 || code == TYPE_CODE_UNION)
733 amd64_classify_aggregate (type, theclass);
736 static enum return_value_convention
737 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
738 struct type *type, struct regcache *regcache,
739 struct value **read_value, const gdb_byte *writebuf)
741 enum amd64_reg_class theclass[2];
742 int len = type->length ();
743 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
744 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
745 int integer_reg = 0;
746 int sse_reg = 0;
747 int i;
749 gdb_assert (!(read_value && writebuf));
751 /* 1. Classify the return type with the classification algorithm. */
752 amd64_classify (type, theclass);
754 /* 2. If the type has class MEMORY, then the caller provides space
755 for the return value and passes the address of this storage in
756 %rdi as if it were the first argument to the function. In effect,
757 this address becomes a hidden first argument.
759 On return %rax will contain the address that has been passed in
760 by the caller in %rdi. */
761 if (theclass[0] == AMD64_MEMORY)
763 /* As indicated by the comment above, the ABI guarantees that we
764 can always find the return value just after the function has
765 returned. */
767 if (read_value != nullptr)
769 ULONGEST addr;
771 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
772 *read_value = value_at_non_lval (type, addr);
775 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
778 gdb_byte *readbuf = nullptr;
779 if (read_value != nullptr)
781 *read_value = value::allocate (type);
782 readbuf = (*read_value)->contents_raw ().data ();
785 /* 8. If the class is COMPLEX_X87, the real part of the value is
786 returned in %st0 and the imaginary part in %st1. */
787 if (theclass[0] == AMD64_COMPLEX_X87)
789 if (readbuf)
791 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
792 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
795 if (writebuf)
797 i387_return_value (gdbarch, regcache);
798 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
799 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
801 /* Fix up the tag word such that both %st(0) and %st(1) are
802 marked as valid. */
803 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
806 return RETURN_VALUE_REGISTER_CONVENTION;
809 gdb_assert (theclass[1] != AMD64_MEMORY);
810 gdb_assert (len <= 16);
812 for (i = 0; len > 0; i++, len -= 8)
814 int regnum = -1;
815 int offset = 0;
817 switch (theclass[i])
819 case AMD64_INTEGER:
820 /* 3. If the class is INTEGER, the next available register
821 of the sequence %rax, %rdx is used. */
822 regnum = integer_regnum[integer_reg++];
823 break;
825 case AMD64_SSE:
826 /* 4. If the class is SSE, the next available SSE register
827 of the sequence %xmm0, %xmm1 is used. */
828 regnum = sse_regnum[sse_reg++];
829 break;
831 case AMD64_SSEUP:
832 /* 5. If the class is SSEUP, the eightbyte is passed in the
833 upper half of the last used SSE register. */
834 gdb_assert (sse_reg > 0);
835 regnum = sse_regnum[sse_reg - 1];
836 offset = 8;
837 break;
839 case AMD64_X87:
840 /* 6. If the class is X87, the value is returned on the X87
841 stack in %st0 as 80-bit x87 number. */
842 regnum = AMD64_ST0_REGNUM;
843 if (writebuf)
844 i387_return_value (gdbarch, regcache);
845 break;
847 case AMD64_X87UP:
848 /* 7. If the class is X87UP, the value is returned together
849 with the previous X87 value in %st0. */
850 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
851 regnum = AMD64_ST0_REGNUM;
852 offset = 8;
853 len = 2;
854 break;
856 case AMD64_NO_CLASS:
857 continue;
859 default:
860 gdb_assert (!"Unexpected register class.");
863 gdb_assert (regnum != -1);
865 if (readbuf)
866 regcache->raw_read_part (regnum, offset, std::min (len, 8),
867 readbuf + i * 8);
868 if (writebuf)
869 regcache->raw_write_part (regnum, offset, std::min (len, 8),
870 writebuf + i * 8);
873 return RETURN_VALUE_REGISTER_CONVENTION;
877 static CORE_ADDR
878 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
879 CORE_ADDR sp, function_call_return_method return_method)
881 static int integer_regnum[] =
883 AMD64_RDI_REGNUM, /* %rdi */
884 AMD64_RSI_REGNUM, /* %rsi */
885 AMD64_RDX_REGNUM, /* %rdx */
886 AMD64_RCX_REGNUM, /* %rcx */
887 AMD64_R8_REGNUM, /* %r8 */
888 AMD64_R9_REGNUM /* %r9 */
890 static int sse_regnum[] =
892 /* %xmm0 ... %xmm7 */
893 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
894 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
895 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
896 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
898 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
899 int num_stack_args = 0;
900 int num_elements = 0;
901 int element = 0;
902 int integer_reg = 0;
903 int sse_reg = 0;
904 int i;
906 /* Reserve a register for the "hidden" argument. */
907 if (return_method == return_method_struct)
908 integer_reg++;
910 for (i = 0; i < nargs; i++)
912 struct type *type = args[i]->type ();
913 int len = type->length ();
914 enum amd64_reg_class theclass[2];
915 int needed_integer_regs = 0;
916 int needed_sse_regs = 0;
917 int j;
919 /* Classify argument. */
920 amd64_classify (type, theclass);
922 /* Calculate the number of integer and SSE registers needed for
923 this argument. */
924 for (j = 0; j < 2; j++)
926 if (theclass[j] == AMD64_INTEGER)
927 needed_integer_regs++;
928 else if (theclass[j] == AMD64_SSE)
929 needed_sse_regs++;
932 /* Check whether enough registers are available, and if the
933 argument should be passed in registers at all. */
934 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
935 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
936 || (needed_integer_regs == 0 && needed_sse_regs == 0))
938 /* The argument will be passed on the stack. */
939 num_elements += ((len + 7) / 8);
940 stack_args[num_stack_args++] = args[i];
942 else
944 /* The argument will be passed in registers. */
945 const gdb_byte *valbuf = args[i]->contents ().data ();
946 gdb_byte buf[8];
948 gdb_assert (len <= 16);
950 for (j = 0; len > 0; j++, len -= 8)
952 int regnum = -1;
953 int offset = 0;
955 switch (theclass[j])
957 case AMD64_INTEGER:
958 regnum = integer_regnum[integer_reg++];
959 break;
961 case AMD64_SSE:
962 regnum = sse_regnum[sse_reg++];
963 break;
965 case AMD64_SSEUP:
966 gdb_assert (sse_reg > 0);
967 regnum = sse_regnum[sse_reg - 1];
968 offset = 8;
969 break;
971 case AMD64_NO_CLASS:
972 continue;
974 default:
975 gdb_assert (!"Unexpected register class.");
978 gdb_assert (regnum != -1);
979 memset (buf, 0, sizeof buf);
980 memcpy (buf, valbuf + j * 8, std::min (len, 8));
981 regcache->raw_write_part (regnum, offset, 8, buf);
986 /* Allocate space for the arguments on the stack. */
987 sp -= num_elements * 8;
989 /* The psABI says that "The end of the input argument area shall be
990 aligned on a 16 byte boundary." */
991 sp &= ~0xf;
993 /* Write out the arguments to the stack. */
994 for (i = 0; i < num_stack_args; i++)
996 struct type *type = stack_args[i]->type ();
997 const gdb_byte *valbuf = stack_args[i]->contents ().data ();
998 int len = type->length ();
1000 write_memory (sp + element * 8, valbuf, len);
1001 element += ((len + 7) / 8);
1004 /* The psABI says that "For calls that may call functions that use
1005 varargs or stdargs (prototype-less calls or calls to functions
1006 containing ellipsis (...) in the declaration) %al is used as
1007 hidden argument to specify the number of SSE registers used. */
1008 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1009 return sp;
1012 static CORE_ADDR
1013 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1014 struct regcache *regcache, CORE_ADDR bp_addr,
1015 int nargs, struct value **args, CORE_ADDR sp,
1016 function_call_return_method return_method,
1017 CORE_ADDR struct_addr)
1019 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1020 gdb_byte buf[8];
1022 /* BND registers can be in arbitrary values at the moment of the
1023 inferior call. This can cause boundary violations that are not
1024 due to a real bug or even desired by the user. The best to be done
1025 is set the BND registers to allow access to the whole memory, INIT
1026 state, before pushing the inferior call. */
1027 i387_reset_bnd_regs (gdbarch, regcache);
1029 /* Pass arguments. */
1030 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1032 /* Pass "hidden" argument". */
1033 if (return_method == return_method_struct)
1035 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1036 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1039 /* Store return address. */
1040 sp -= 8;
1041 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1042 write_memory (sp, buf, 8);
1044 /* Finally, update the stack pointer... */
1045 store_unsigned_integer (buf, 8, byte_order, sp);
1046 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1048 /* ...and fake a frame pointer. */
1049 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1051 return sp + 16;
1054 /* Displaced instruction handling. */
1056 /* A partially decoded instruction.
1057 This contains enough details for displaced stepping purposes. */
1059 struct amd64_insn
1061 /* The number of opcode bytes. */
1062 int opcode_len;
1063 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1064 not present. */
1065 int enc_prefix_offset;
1066 /* The offset to the first opcode byte. */
1067 int opcode_offset;
1068 /* The offset to the modrm byte or -1 if not present. */
1069 int modrm_offset;
1071 /* The raw instruction. */
1072 gdb_byte *raw_insn;
1075 struct amd64_displaced_step_copy_insn_closure
1076 : public displaced_step_copy_insn_closure
1078 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
1079 : insn_buf (insn_buf_len, 0)
1082 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1083 int tmp_used = 0;
1084 int tmp_regno;
1085 ULONGEST tmp_save;
1087 /* Details of the instruction. */
1088 struct amd64_insn insn_details;
1090 /* The possibly modified insn. */
1091 gdb::byte_vector insn_buf;
1094 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1095 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1096 at which point delete these in favor of libopcodes' versions). */
1098 static const unsigned char onebyte_has_modrm[256] = {
1099 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1100 /* ------------------------------- */
1101 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1102 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1103 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1104 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1105 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1106 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1107 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1108 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1109 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1110 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1111 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1112 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1113 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1114 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1115 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1116 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1117 /* ------------------------------- */
1118 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1121 static const unsigned char twobyte_has_modrm[256] = {
1122 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1123 /* ------------------------------- */
1124 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1125 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1126 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1127 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1128 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1129 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1130 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1131 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1132 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1133 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1134 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1135 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1136 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1137 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1138 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1139 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1140 /* ------------------------------- */
1141 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1144 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1146 static int
1147 rex_prefix_p (gdb_byte pfx)
1149 return REX_PREFIX_P (pfx);
1152 /* True if PFX is the start of the 2-byte VEX prefix. */
1154 static bool
1155 vex2_prefix_p (gdb_byte pfx)
1157 return pfx == 0xc5;
1160 /* True if PFX is the start of the 3-byte VEX prefix. */
1162 static bool
1163 vex3_prefix_p (gdb_byte pfx)
1165 return pfx == 0xc4;
1168 /* Skip the legacy instruction prefixes in INSN.
1169 We assume INSN is properly sentineled so we don't have to worry
1170 about falling off the end of the buffer. */
1172 static gdb_byte *
1173 amd64_skip_prefixes (gdb_byte *insn)
1175 while (1)
1177 switch (*insn)
1179 case DATA_PREFIX_OPCODE:
1180 case ADDR_PREFIX_OPCODE:
1181 case CS_PREFIX_OPCODE:
1182 case DS_PREFIX_OPCODE:
1183 case ES_PREFIX_OPCODE:
1184 case FS_PREFIX_OPCODE:
1185 case GS_PREFIX_OPCODE:
1186 case SS_PREFIX_OPCODE:
1187 case LOCK_PREFIX_OPCODE:
1188 case REPE_PREFIX_OPCODE:
1189 case REPNE_PREFIX_OPCODE:
1190 ++insn;
1191 continue;
1192 default:
1193 break;
1195 break;
1198 return insn;
1201 /* Return an integer register (other than RSP) that is unused as an input
1202 operand in INSN.
1203 In order to not require adding a rex prefix if the insn doesn't already
1204 have one, the result is restricted to RAX ... RDI, sans RSP.
1205 The register numbering of the result follows architecture ordering,
1206 e.g. RDI = 7. */
1208 static int
1209 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1211 /* 1 bit for each reg */
1212 int used_regs_mask = 0;
1214 /* There can be at most 3 int regs used as inputs in an insn, and we have
1215 7 to choose from (RAX ... RDI, sans RSP).
1216 This allows us to take a conservative approach and keep things simple.
1217 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1218 that implicitly specify RAX. */
1220 /* Avoid RAX. */
1221 used_regs_mask |= 1 << EAX_REG_NUM;
1222 /* Similarily avoid RDX, implicit operand in divides. */
1223 used_regs_mask |= 1 << EDX_REG_NUM;
1224 /* Avoid RSP. */
1225 used_regs_mask |= 1 << ESP_REG_NUM;
1227 /* If the opcode is one byte long and there's no ModRM byte,
1228 assume the opcode specifies a register. */
1229 if (details->opcode_len == 1 && details->modrm_offset == -1)
1230 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1232 /* Mark used regs in the modrm/sib bytes. */
1233 if (details->modrm_offset != -1)
1235 int modrm = details->raw_insn[details->modrm_offset];
1236 int mod = MODRM_MOD_FIELD (modrm);
1237 int reg = MODRM_REG_FIELD (modrm);
1238 int rm = MODRM_RM_FIELD (modrm);
1239 int have_sib = mod != 3 && rm == 4;
1241 /* Assume the reg field of the modrm byte specifies a register. */
1242 used_regs_mask |= 1 << reg;
1244 if (have_sib)
1246 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1247 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1248 used_regs_mask |= 1 << base;
1249 used_regs_mask |= 1 << idx;
1251 else
1253 used_regs_mask |= 1 << rm;
1257 gdb_assert (used_regs_mask < 256);
1258 gdb_assert (used_regs_mask != 255);
1260 /* Finally, find a free reg. */
1262 int i;
1264 for (i = 0; i < 8; ++i)
1266 if (! (used_regs_mask & (1 << i)))
1267 return i;
1270 /* We shouldn't get here. */
1271 internal_error (_("unable to find free reg"));
1275 /* Extract the details of INSN that we need. */
1277 static void
1278 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1280 gdb_byte *start = insn;
1281 int need_modrm;
1283 details->raw_insn = insn;
1285 details->opcode_len = -1;
1286 details->enc_prefix_offset = -1;
1287 details->opcode_offset = -1;
1288 details->modrm_offset = -1;
1290 /* Skip legacy instruction prefixes. */
1291 insn = amd64_skip_prefixes (insn);
1293 /* Skip REX/VEX instruction encoding prefixes. */
1294 if (rex_prefix_p (*insn))
1296 details->enc_prefix_offset = insn - start;
1297 ++insn;
1299 else if (vex2_prefix_p (*insn))
1301 /* Don't record the offset in this case because this prefix has
1302 no REX.B equivalent. */
1303 insn += 2;
1305 else if (vex3_prefix_p (*insn))
1307 details->enc_prefix_offset = insn - start;
1308 insn += 3;
1311 details->opcode_offset = insn - start;
1313 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1315 /* Two or three-byte opcode. */
1316 ++insn;
1317 need_modrm = twobyte_has_modrm[*insn];
1319 /* Check for three-byte opcode. */
1320 switch (*insn)
1322 case 0x24:
1323 case 0x25:
1324 case 0x38:
1325 case 0x3a:
1326 case 0x7a:
1327 case 0x7b:
1328 ++insn;
1329 details->opcode_len = 3;
1330 break;
1331 default:
1332 details->opcode_len = 2;
1333 break;
1336 else
1338 /* One-byte opcode. */
1339 need_modrm = onebyte_has_modrm[*insn];
1340 details->opcode_len = 1;
1343 if (need_modrm)
1345 ++insn;
1346 details->modrm_offset = insn - start;
1350 /* Update %rip-relative addressing in INSN.
1352 %rip-relative addressing only uses a 32-bit displacement.
1353 32 bits is not enough to be guaranteed to cover the distance between where
1354 the real instruction is and where its copy is.
1355 Convert the insn to use base+disp addressing.
1356 We set base = pc + insn_length so we can leave disp unchanged. */
1358 static void
1359 fixup_riprel (struct gdbarch *gdbarch,
1360 amd64_displaced_step_copy_insn_closure *dsc,
1361 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1363 const struct amd64_insn *insn_details = &dsc->insn_details;
1364 int modrm_offset = insn_details->modrm_offset;
1365 CORE_ADDR rip_base;
1366 int insn_length;
1367 int arch_tmp_regno, tmp_regno;
1368 ULONGEST orig_value;
1370 /* Compute the rip-relative address. */
1371 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1372 dsc->insn_buf.size (), from);
1373 rip_base = from + insn_length;
1375 /* We need a register to hold the address.
1376 Pick one not used in the insn.
1377 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1378 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1379 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1381 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1382 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1384 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1385 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1386 is not r8-r15. */
1387 if (insn_details->enc_prefix_offset != -1)
1389 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1390 if (rex_prefix_p (pfx[0]))
1391 pfx[0] &= ~REX_B;
1392 else if (vex3_prefix_p (pfx[0]))
1393 pfx[1] |= VEX3_NOT_B;
1394 else
1395 gdb_assert_not_reached ("unhandled prefix");
1398 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1399 dsc->tmp_regno = tmp_regno;
1400 dsc->tmp_save = orig_value;
1401 dsc->tmp_used = 1;
1403 /* Convert the ModRM field to be base+disp. */
1404 dsc->insn_buf[modrm_offset] &= ~0xc7;
1405 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1407 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1409 displaced_debug_printf ("%%rip-relative addressing used.");
1410 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1411 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1412 paddress (gdbarch, rip_base));
1415 static void
1416 fixup_displaced_copy (struct gdbarch *gdbarch,
1417 amd64_displaced_step_copy_insn_closure *dsc,
1418 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1420 const struct amd64_insn *details = &dsc->insn_details;
1422 if (details->modrm_offset != -1)
1424 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1426 if ((modrm & 0xc7) == 0x05)
1428 /* The insn uses rip-relative addressing.
1429 Deal with it. */
1430 fixup_riprel (gdbarch, dsc, from, to, regs);
1435 displaced_step_copy_insn_closure_up
1436 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1437 CORE_ADDR from, CORE_ADDR to,
1438 struct regcache *regs)
1440 int len = gdbarch_max_insn_length (gdbarch);
1441 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1442 continually watch for running off the end of the buffer. */
1443 int fixup_sentinel_space = len;
1444 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1445 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
1446 gdb_byte *buf = &dsc->insn_buf[0];
1447 struct amd64_insn *details = &dsc->insn_details;
1449 read_memory (from, buf, len);
1451 /* Set up the sentinel space so we don't have to worry about running
1452 off the end of the buffer. An excessive number of leading prefixes
1453 could otherwise cause this. */
1454 memset (buf + len, 0, fixup_sentinel_space);
1456 amd64_get_insn_details (buf, details);
1458 /* GDB may get control back after the insn after the syscall.
1459 Presumably this is a kernel bug.
1460 If this is a syscall, make sure there's a nop afterwards. */
1462 int syscall_length;
1464 if (amd64_syscall_p (details, &syscall_length))
1465 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1468 /* Modify the insn to cope with the address where it will be executed from.
1469 In particular, handle any rip-relative addressing. */
1470 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1472 write_memory (to, buf, len);
1474 displaced_debug_printf ("copy %s->%s: %s",
1475 paddress (gdbarch, from), paddress (gdbarch, to),
1476 bytes_to_string (buf, len).c_str ());
1478 /* This is a work around for a problem with g++ 4.8. */
1479 return displaced_step_copy_insn_closure_up (dsc.release ());
1482 static int
1483 amd64_absolute_jmp_p (const struct amd64_insn *details)
1485 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1487 if (insn[0] == 0xff)
1489 /* jump near, absolute indirect (/4) */
1490 if ((insn[1] & 0x38) == 0x20)
1491 return 1;
1493 /* jump far, absolute indirect (/5) */
1494 if ((insn[1] & 0x38) == 0x28)
1495 return 1;
1498 return 0;
1501 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1503 static int
1504 amd64_jmp_p (const struct amd64_insn *details)
1506 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1508 /* jump short, relative. */
1509 if (insn[0] == 0xeb)
1510 return 1;
1512 /* jump near, relative. */
1513 if (insn[0] == 0xe9)
1514 return 1;
1516 return amd64_absolute_jmp_p (details);
1519 static int
1520 amd64_absolute_call_p (const struct amd64_insn *details)
1522 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1524 if (insn[0] == 0xff)
1526 /* Call near, absolute indirect (/2) */
1527 if ((insn[1] & 0x38) == 0x10)
1528 return 1;
1530 /* Call far, absolute indirect (/3) */
1531 if ((insn[1] & 0x38) == 0x18)
1532 return 1;
1535 return 0;
1538 static int
1539 amd64_ret_p (const struct amd64_insn *details)
1541 /* NOTE: gcc can emit "repz ; ret". */
1542 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1544 switch (insn[0])
1546 case 0xc2: /* ret near, pop N bytes */
1547 case 0xc3: /* ret near */
1548 case 0xca: /* ret far, pop N bytes */
1549 case 0xcb: /* ret far */
1550 case 0xcf: /* iret */
1551 return 1;
1553 default:
1554 return 0;
1558 static int
1559 amd64_call_p (const struct amd64_insn *details)
1561 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1563 if (amd64_absolute_call_p (details))
1564 return 1;
1566 /* call near, relative */
1567 if (insn[0] == 0xe8)
1568 return 1;
1570 return 0;
1573 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1574 length in bytes. Otherwise, return zero. */
1576 static int
1577 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1579 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1581 if (insn[0] == 0x0f && insn[1] == 0x05)
1583 *lengthp = 2;
1584 return 1;
1587 return 0;
1590 /* Classify the instruction at ADDR using PRED.
1591 Throw an error if the memory can't be read. */
1593 static int
1594 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1595 int (*pred) (const struct amd64_insn *))
1597 struct amd64_insn details;
1599 gdb::byte_vector buf (gdbarch_max_insn_length (gdbarch));
1601 read_code (addr, buf.data (), buf.size ());
1602 amd64_get_insn_details (buf.data (), &details);
1604 int classification = pred (&details);
1606 return classification;
1609 /* The gdbarch insn_is_call method. */
1611 static int
1612 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1614 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1617 /* The gdbarch insn_is_ret method. */
1619 static int
1620 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1622 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1625 /* The gdbarch insn_is_jump method. */
1627 static int
1628 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1630 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1633 /* Fix up the state of registers and memory after having single-stepped
1634 a displaced instruction. */
1636 void
1637 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1638 struct displaced_step_copy_insn_closure *dsc_,
1639 CORE_ADDR from, CORE_ADDR to,
1640 struct regcache *regs, bool completed_p)
1642 amd64_displaced_step_copy_insn_closure *dsc
1643 = (amd64_displaced_step_copy_insn_closure *) dsc_;
1644 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1645 /* The offset we applied to the instruction's address. */
1646 ULONGEST insn_offset = to - from;
1647 gdb_byte *insn = dsc->insn_buf.data ();
1648 const struct amd64_insn *insn_details = &dsc->insn_details;
1650 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1651 paddress (gdbarch, from), paddress (gdbarch, to),
1652 insn[0], insn[1]);
1654 /* If we used a tmp reg, restore it. */
1656 if (dsc->tmp_used)
1658 displaced_debug_printf ("restoring reg %d to %s",
1659 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1660 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1663 /* The list of issues to contend with here is taken from
1664 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1665 Yay for Free Software! */
1667 /* Relocate the %rip back to the program's instruction stream,
1668 if necessary. */
1670 /* Except in the case of absolute or indirect jump or call
1671 instructions, or a return instruction, the new rip is relative to
1672 the displaced instruction; make it relative to the original insn.
1673 Well, signal handler returns don't need relocation either, but we use the
1674 value of %rip to recognize those; see below. */
1675 if (!completed_p
1676 || (!amd64_absolute_jmp_p (insn_details)
1677 && !amd64_absolute_call_p (insn_details)
1678 && !amd64_ret_p (insn_details)))
1680 int insn_len;
1682 CORE_ADDR pc = regcache_read_pc (regs);
1684 /* A signal trampoline system call changes the %rip, resuming
1685 execution of the main program after the signal handler has
1686 returned. That makes them like 'return' instructions; we
1687 shouldn't relocate %rip.
1689 But most system calls don't, and we do need to relocate %rip.
1691 Our heuristic for distinguishing these cases: if stepping
1692 over the system call instruction left control directly after
1693 the instruction, the we relocate --- control almost certainly
1694 doesn't belong in the displaced copy. Otherwise, we assume
1695 the instruction has put control where it belongs, and leave
1696 it unrelocated. Goodness help us if there are PC-relative
1697 system calls. */
1698 if (amd64_syscall_p (insn_details, &insn_len)
1699 /* GDB can get control back after the insn after the syscall.
1700 Presumably this is a kernel bug. Fixup ensures it's a nop, we
1701 add one to the length for it. */
1702 && (pc < to || pc > (to + insn_len + 1)))
1703 displaced_debug_printf ("syscall changed %%rip; not relocating");
1704 else
1706 CORE_ADDR rip = pc - insn_offset;
1708 /* If we just stepped over a breakpoint insn, we don't backup
1709 the pc on purpose; this is to match behaviour without
1710 stepping. */
1712 regcache_write_pc (regs, rip);
1714 displaced_debug_printf ("relocated %%rip from %s to %s",
1715 paddress (gdbarch, pc),
1716 paddress (gdbarch, rip));
1720 /* If the instruction was PUSHFL, then the TF bit will be set in the
1721 pushed value, and should be cleared. We'll leave this for later,
1722 since GDB already messes up the TF flag when stepping over a
1723 pushfl. */
1725 /* If the instruction was a call, the return address now atop the
1726 stack is the address following the copied instruction. We need
1727 to make it the address following the original instruction. */
1728 if (completed_p && amd64_call_p (insn_details))
1730 ULONGEST rsp;
1731 ULONGEST retaddr;
1732 const ULONGEST retaddr_len = 8;
1734 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1735 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1736 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1737 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1739 displaced_debug_printf ("relocated return addr at %s to %s",
1740 paddress (gdbarch, rsp),
1741 paddress (gdbarch, retaddr));
1745 /* If the instruction INSN uses RIP-relative addressing, return the
1746 offset into the raw INSN where the displacement to be adjusted is
1747 found. Returns 0 if the instruction doesn't use RIP-relative
1748 addressing. */
1750 static int
1751 rip_relative_offset (struct amd64_insn *insn)
1753 if (insn->modrm_offset != -1)
1755 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1757 if ((modrm & 0xc7) == 0x05)
1759 /* The displacement is found right after the ModRM byte. */
1760 return insn->modrm_offset + 1;
1764 return 0;
1767 static void
1768 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1770 target_write_memory (*to, buf, len);
1771 *to += len;
1774 static void
1775 amd64_relocate_instruction (struct gdbarch *gdbarch,
1776 CORE_ADDR *to, CORE_ADDR oldloc)
1778 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1779 int len = gdbarch_max_insn_length (gdbarch);
1780 /* Extra space for sentinels. */
1781 int fixup_sentinel_space = len;
1782 gdb::byte_vector buf (len + fixup_sentinel_space);
1783 struct amd64_insn insn_details;
1784 int offset = 0;
1785 LONGEST rel32, newrel;
1786 gdb_byte *insn;
1787 int insn_length;
1789 read_memory (oldloc, buf.data (), len);
1791 /* Set up the sentinel space so we don't have to worry about running
1792 off the end of the buffer. An excessive number of leading prefixes
1793 could otherwise cause this. */
1794 memset (buf.data () + len, 0, fixup_sentinel_space);
1796 insn = buf.data ();
1797 amd64_get_insn_details (insn, &insn_details);
1799 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1801 /* Skip legacy instruction prefixes. */
1802 insn = amd64_skip_prefixes (insn);
1804 /* Adjust calls with 32-bit relative addresses as push/jump, with
1805 the address pushed being the location where the original call in
1806 the user program would return to. */
1807 if (insn[0] == 0xe8)
1809 gdb_byte push_buf[32];
1810 CORE_ADDR ret_addr;
1811 int i = 0;
1813 /* Where "ret" in the original code will return to. */
1814 ret_addr = oldloc + insn_length;
1816 /* If pushing an address higher than or equal to 0x80000000,
1817 avoid 'pushq', as that sign extends its 32-bit operand, which
1818 would be incorrect. */
1819 if (ret_addr <= 0x7fffffff)
1821 push_buf[0] = 0x68; /* pushq $... */
1822 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1823 i = 5;
1825 else
1827 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1828 push_buf[i++] = 0x83;
1829 push_buf[i++] = 0xec;
1830 push_buf[i++] = 0x08;
1832 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1833 push_buf[i++] = 0x04;
1834 push_buf[i++] = 0x24;
1835 store_unsigned_integer (&push_buf[i], 4, byte_order,
1836 ret_addr & 0xffffffff);
1837 i += 4;
1839 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1840 push_buf[i++] = 0x44;
1841 push_buf[i++] = 0x24;
1842 push_buf[i++] = 0x04;
1843 store_unsigned_integer (&push_buf[i], 4, byte_order,
1844 ret_addr >> 32);
1845 i += 4;
1847 gdb_assert (i <= sizeof (push_buf));
1848 /* Push the push. */
1849 append_insns (to, i, push_buf);
1851 /* Convert the relative call to a relative jump. */
1852 insn[0] = 0xe9;
1854 /* Adjust the destination offset. */
1855 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1856 newrel = (oldloc - *to) + rel32;
1857 store_signed_integer (insn + 1, 4, byte_order, newrel);
1859 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1860 hex_string (rel32), paddress (gdbarch, oldloc),
1861 hex_string (newrel), paddress (gdbarch, *to));
1863 /* Write the adjusted jump into its displaced location. */
1864 append_insns (to, 5, insn);
1865 return;
1868 offset = rip_relative_offset (&insn_details);
1869 if (!offset)
1871 /* Adjust jumps with 32-bit relative addresses. Calls are
1872 already handled above. */
1873 if (insn[0] == 0xe9)
1874 offset = 1;
1875 /* Adjust conditional jumps. */
1876 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1877 offset = 2;
1880 if (offset)
1882 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1883 newrel = (oldloc - *to) + rel32;
1884 store_signed_integer (insn + offset, 4, byte_order, newrel);
1885 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1886 hex_string (rel32), paddress (gdbarch, oldloc),
1887 hex_string (newrel), paddress (gdbarch, *to));
1890 /* Write the adjusted instruction into its displaced location. */
1891 append_insns (to, insn_length, buf.data ());
1895 /* The maximum number of saved registers. This should include %rip. */
1896 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1898 struct amd64_frame_cache
1900 /* Base address. */
1901 CORE_ADDR base;
1902 int base_p;
1903 CORE_ADDR sp_offset;
1904 CORE_ADDR pc;
1906 /* Saved registers. */
1907 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1908 CORE_ADDR saved_sp;
1909 int saved_sp_reg;
1911 /* Do we have a frame? */
1912 int frameless_p;
1915 /* Initialize a frame cache. */
1917 static void
1918 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1920 int i;
1922 /* Base address. */
1923 cache->base = 0;
1924 cache->base_p = 0;
1925 cache->sp_offset = -8;
1926 cache->pc = 0;
1928 /* Saved registers. We initialize these to -1 since zero is a valid
1929 offset (that's where %rbp is supposed to be stored).
1930 The values start out as being offsets, and are later converted to
1931 addresses (at which point -1 is interpreted as an address, still meaning
1932 "invalid"). */
1933 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1934 cache->saved_regs[i] = -1;
1935 cache->saved_sp = 0;
1936 cache->saved_sp_reg = -1;
1938 /* Frameless until proven otherwise. */
1939 cache->frameless_p = 1;
1942 /* Allocate and initialize a frame cache. */
1944 static struct amd64_frame_cache *
1945 amd64_alloc_frame_cache (void)
1947 struct amd64_frame_cache *cache;
1949 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1950 amd64_init_frame_cache (cache);
1951 return cache;
1954 /* GCC 4.4 and later, can put code in the prologue to realign the
1955 stack pointer. Check whether PC points to such code, and update
1956 CACHE accordingly. Return the first instruction after the code
1957 sequence or CURRENT_PC, whichever is smaller. If we don't
1958 recognize the code, return PC. */
1960 static CORE_ADDR
1961 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1962 struct amd64_frame_cache *cache)
1964 /* There are 2 code sequences to re-align stack before the frame
1965 gets set up:
1967 1. Use a caller-saved saved register:
1969 leaq 8(%rsp), %reg
1970 andq $-XXX, %rsp
1971 pushq -8(%reg)
1973 2. Use a callee-saved saved register:
1975 pushq %reg
1976 leaq 16(%rsp), %reg
1977 andq $-XXX, %rsp
1978 pushq -8(%reg)
1980 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1982 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1983 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1986 gdb_byte buf[18];
1987 int reg, r;
1988 int offset, offset_and;
1990 if (target_read_code (pc, buf, sizeof buf))
1991 return pc;
1993 /* Check caller-saved saved register. The first instruction has
1994 to be "leaq 8(%rsp), %reg". */
1995 if ((buf[0] & 0xfb) == 0x48
1996 && buf[1] == 0x8d
1997 && buf[3] == 0x24
1998 && buf[4] == 0x8)
2000 /* MOD must be binary 10 and R/M must be binary 100. */
2001 if ((buf[2] & 0xc7) != 0x44)
2002 return pc;
2004 /* REG has register number. */
2005 reg = (buf[2] >> 3) & 7;
2007 /* Check the REX.R bit. */
2008 if (buf[0] == 0x4c)
2009 reg += 8;
2011 offset = 5;
2013 else
2015 /* Check callee-saved saved register. The first instruction
2016 has to be "pushq %reg". */
2017 reg = 0;
2018 if ((buf[0] & 0xf8) == 0x50)
2019 offset = 0;
2020 else if ((buf[0] & 0xf6) == 0x40
2021 && (buf[1] & 0xf8) == 0x50)
2023 /* Check the REX.B bit. */
2024 if ((buf[0] & 1) != 0)
2025 reg = 8;
2027 offset = 1;
2029 else
2030 return pc;
2032 /* Get register. */
2033 reg += buf[offset] & 0x7;
2035 offset++;
2037 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2038 if ((buf[offset] & 0xfb) != 0x48
2039 || buf[offset + 1] != 0x8d
2040 || buf[offset + 3] != 0x24
2041 || buf[offset + 4] != 0x10)
2042 return pc;
2044 /* MOD must be binary 10 and R/M must be binary 100. */
2045 if ((buf[offset + 2] & 0xc7) != 0x44)
2046 return pc;
2048 /* REG has register number. */
2049 r = (buf[offset + 2] >> 3) & 7;
2051 /* Check the REX.R bit. */
2052 if (buf[offset] == 0x4c)
2053 r += 8;
2055 /* Registers in pushq and leaq have to be the same. */
2056 if (reg != r)
2057 return pc;
2059 offset += 5;
2062 /* Rigister can't be %rsp nor %rbp. */
2063 if (reg == 4 || reg == 5)
2064 return pc;
2066 /* The next instruction has to be "andq $-XXX, %rsp". */
2067 if (buf[offset] != 0x48
2068 || buf[offset + 2] != 0xe4
2069 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2070 return pc;
2072 offset_and = offset;
2073 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2075 /* The next instruction has to be "pushq -8(%reg)". */
2076 r = 0;
2077 if (buf[offset] == 0xff)
2078 offset++;
2079 else if ((buf[offset] & 0xf6) == 0x40
2080 && buf[offset + 1] == 0xff)
2082 /* Check the REX.B bit. */
2083 if ((buf[offset] & 0x1) != 0)
2084 r = 8;
2085 offset += 2;
2087 else
2088 return pc;
2090 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2091 01. */
2092 if (buf[offset + 1] != 0xf8
2093 || (buf[offset] & 0xf8) != 0x70)
2094 return pc;
2096 /* R/M has register. */
2097 r += buf[offset] & 7;
2099 /* Registers in leaq and pushq have to be the same. */
2100 if (reg != r)
2101 return pc;
2103 if (current_pc > pc + offset_and)
2104 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2106 return std::min (pc + offset + 2, current_pc);
2109 /* Similar to amd64_analyze_stack_align for x32. */
2111 static CORE_ADDR
2112 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2113 struct amd64_frame_cache *cache)
2115 /* There are 2 code sequences to re-align stack before the frame
2116 gets set up:
2118 1. Use a caller-saved saved register:
2120 leaq 8(%rsp), %reg
2121 andq $-XXX, %rsp
2122 pushq -8(%reg)
2126 [addr32] leal 8(%rsp), %reg
2127 andl $-XXX, %esp
2128 [addr32] pushq -8(%reg)
2130 2. Use a callee-saved saved register:
2132 pushq %reg
2133 leaq 16(%rsp), %reg
2134 andq $-XXX, %rsp
2135 pushq -8(%reg)
2139 pushq %reg
2140 [addr32] leal 16(%rsp), %reg
2141 andl $-XXX, %esp
2142 [addr32] pushq -8(%reg)
2144 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2146 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2147 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2149 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2151 0x83 0xe4 0xf0 andl $-16, %esp
2152 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2155 gdb_byte buf[19];
2156 int reg, r;
2157 int offset, offset_and;
2159 if (target_read_memory (pc, buf, sizeof buf))
2160 return pc;
2162 /* Skip optional addr32 prefix. */
2163 offset = buf[0] == 0x67 ? 1 : 0;
2165 /* Check caller-saved saved register. The first instruction has
2166 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2167 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2168 && buf[offset + 1] == 0x8d
2169 && buf[offset + 3] == 0x24
2170 && buf[offset + 4] == 0x8)
2172 /* MOD must be binary 10 and R/M must be binary 100. */
2173 if ((buf[offset + 2] & 0xc7) != 0x44)
2174 return pc;
2176 /* REG has register number. */
2177 reg = (buf[offset + 2] >> 3) & 7;
2179 /* Check the REX.R bit. */
2180 if ((buf[offset] & 0x4) != 0)
2181 reg += 8;
2183 offset += 5;
2185 else
2187 /* Check callee-saved saved register. The first instruction
2188 has to be "pushq %reg". */
2189 reg = 0;
2190 if ((buf[offset] & 0xf6) == 0x40
2191 && (buf[offset + 1] & 0xf8) == 0x50)
2193 /* Check the REX.B bit. */
2194 if ((buf[offset] & 1) != 0)
2195 reg = 8;
2197 offset += 1;
2199 else if ((buf[offset] & 0xf8) != 0x50)
2200 return pc;
2202 /* Get register. */
2203 reg += buf[offset] & 0x7;
2205 offset++;
2207 /* Skip optional addr32 prefix. */
2208 if (buf[offset] == 0x67)
2209 offset++;
2211 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2212 "leal 16(%rsp), %reg". */
2213 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2214 || buf[offset + 1] != 0x8d
2215 || buf[offset + 3] != 0x24
2216 || buf[offset + 4] != 0x10)
2217 return pc;
2219 /* MOD must be binary 10 and R/M must be binary 100. */
2220 if ((buf[offset + 2] & 0xc7) != 0x44)
2221 return pc;
2223 /* REG has register number. */
2224 r = (buf[offset + 2] >> 3) & 7;
2226 /* Check the REX.R bit. */
2227 if ((buf[offset] & 0x4) != 0)
2228 r += 8;
2230 /* Registers in pushq and leaq have to be the same. */
2231 if (reg != r)
2232 return pc;
2234 offset += 5;
2237 /* Rigister can't be %rsp nor %rbp. */
2238 if (reg == 4 || reg == 5)
2239 return pc;
2241 /* The next instruction may be "andq $-XXX, %rsp" or
2242 "andl $-XXX, %esp". */
2243 if (buf[offset] != 0x48)
2244 offset--;
2246 if (buf[offset + 2] != 0xe4
2247 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2248 return pc;
2250 offset_and = offset;
2251 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2253 /* Skip optional addr32 prefix. */
2254 if (buf[offset] == 0x67)
2255 offset++;
2257 /* The next instruction has to be "pushq -8(%reg)". */
2258 r = 0;
2259 if (buf[offset] == 0xff)
2260 offset++;
2261 else if ((buf[offset] & 0xf6) == 0x40
2262 && buf[offset + 1] == 0xff)
2264 /* Check the REX.B bit. */
2265 if ((buf[offset] & 0x1) != 0)
2266 r = 8;
2267 offset += 2;
2269 else
2270 return pc;
2272 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2273 01. */
2274 if (buf[offset + 1] != 0xf8
2275 || (buf[offset] & 0xf8) != 0x70)
2276 return pc;
2278 /* R/M has register. */
2279 r += buf[offset] & 7;
2281 /* Registers in leaq and pushq have to be the same. */
2282 if (reg != r)
2283 return pc;
2285 if (current_pc > pc + offset_and)
2286 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2288 return std::min (pc + offset + 2, current_pc);
2291 /* Do a limited analysis of the prologue at PC and update CACHE
2292 accordingly. Bail out early if CURRENT_PC is reached. Return the
2293 address where the analysis stopped.
2295 We will handle only functions beginning with:
2297 pushq %rbp 0x55
2298 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2300 or (for the X32 ABI):
2302 pushq %rbp 0x55
2303 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2305 The `endbr64` instruction can be found before these sequences, and will be
2306 skipped if found.
2308 Any function that doesn't start with one of these sequences will be
2309 assumed to have no prologue and thus no valid frame pointer in
2310 %rbp. */
2312 static CORE_ADDR
2313 amd64_analyze_prologue (struct gdbarch *gdbarch,
2314 CORE_ADDR pc, CORE_ADDR current_pc,
2315 struct amd64_frame_cache *cache)
2317 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2318 /* The `endbr64` instruction. */
2319 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2320 /* There are two variations of movq %rsp, %rbp. */
2321 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2322 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2323 /* Ditto for movl %esp, %ebp. */
2324 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2325 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2327 gdb_byte buf[3];
2328 gdb_byte op;
2330 if (current_pc <= pc)
2331 return current_pc;
2333 if (gdbarch_ptr_bit (gdbarch) == 32)
2334 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2335 else
2336 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2338 op = read_code_unsigned_integer (pc, 1, byte_order);
2340 /* Check for the `endbr64` instruction, skip it if found. */
2341 if (op == endbr64[0])
2343 read_code (pc + 1, buf, 3);
2345 if (memcmp (buf, &endbr64[1], 3) == 0)
2346 pc += 4;
2348 op = read_code_unsigned_integer (pc, 1, byte_order);
2351 if (current_pc <= pc)
2352 return current_pc;
2354 if (op == 0x55) /* pushq %rbp */
2356 /* Take into account that we've executed the `pushq %rbp' that
2357 starts this instruction sequence. */
2358 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2359 cache->sp_offset += 8;
2361 /* If that's all, return now. */
2362 if (current_pc <= pc + 1)
2363 return current_pc;
2365 read_code (pc + 1, buf, 3);
2367 /* Check for `movq %rsp, %rbp'. */
2368 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2369 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2371 /* OK, we actually have a frame. */
2372 cache->frameless_p = 0;
2373 return pc + 4;
2376 /* For X32, also check for `movl %esp, %ebp'. */
2377 if (gdbarch_ptr_bit (gdbarch) == 32)
2379 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2380 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2382 /* OK, we actually have a frame. */
2383 cache->frameless_p = 0;
2384 return pc + 3;
2388 return pc + 1;
2391 return pc;
2394 /* Work around false termination of prologue - GCC PR debug/48827.
2396 START_PC is the first instruction of a function, PC is its minimal already
2397 determined advanced address. Function returns PC if it has nothing to do.
2399 84 c0 test %al,%al
2400 74 23 je after
2401 <-- here is 0 lines advance - the false prologue end marker.
2402 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2403 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2404 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2405 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2406 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2407 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2408 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2409 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2410 after: */
2412 static CORE_ADDR
2413 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2415 struct symtab_and_line start_pc_sal, next_sal;
2416 gdb_byte buf[4 + 8 * 7];
2417 int offset, xmmreg;
2419 if (pc == start_pc)
2420 return pc;
2422 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2423 if (start_pc_sal.symtab == NULL
2424 || producer_is_gcc_ge_4 (start_pc_sal.symtab->compunit ()
2425 ->producer ()) < 6
2426 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2427 return pc;
2429 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2430 if (next_sal.line != start_pc_sal.line)
2431 return pc;
2433 /* START_PC can be from overlayed memory, ignored here. */
2434 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2435 return pc;
2437 /* test %al,%al */
2438 if (buf[0] != 0x84 || buf[1] != 0xc0)
2439 return pc;
2440 /* je AFTER */
2441 if (buf[2] != 0x74)
2442 return pc;
2444 offset = 4;
2445 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2447 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2448 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2449 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2450 return pc;
2452 /* 0b01?????? */
2453 if ((buf[offset + 2] & 0xc0) == 0x40)
2455 /* 8-bit displacement. */
2456 offset += 4;
2458 /* 0b10?????? */
2459 else if ((buf[offset + 2] & 0xc0) == 0x80)
2461 /* 32-bit displacement. */
2462 offset += 7;
2464 else
2465 return pc;
2468 /* je AFTER */
2469 if (offset - 4 != buf[3])
2470 return pc;
2472 return next_sal.end;
2475 /* Return PC of first real instruction. */
2477 static CORE_ADDR
2478 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2480 struct amd64_frame_cache cache;
2481 CORE_ADDR pc;
2482 CORE_ADDR func_addr;
2484 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2486 CORE_ADDR post_prologue_pc
2487 = skip_prologue_using_sal (gdbarch, func_addr);
2488 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2490 /* LLVM backend (Clang/Flang) always emits a line note before the
2491 prologue and another one after. We trust clang and newer Intel
2492 compilers to emit usable line notes. */
2493 if (post_prologue_pc
2494 && (cust != NULL
2495 && cust->producer () != nullptr
2496 && (producer_is_llvm (cust->producer ())
2497 || producer_is_icc_ge_19 (cust->producer ()))))
2498 return std::max (start_pc, post_prologue_pc);
2501 amd64_init_frame_cache (&cache);
2502 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2503 &cache);
2504 if (cache.frameless_p)
2505 return start_pc;
2507 return amd64_skip_xmm_prologue (pc, start_pc);
2511 /* Normal frames. */
2513 static void
2514 amd64_frame_cache_1 (frame_info_ptr this_frame,
2515 struct amd64_frame_cache *cache)
2517 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2518 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2519 gdb_byte buf[8];
2520 int i;
2522 cache->pc = get_frame_func (this_frame);
2523 if (cache->pc != 0)
2524 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2525 cache);
2527 if (cache->frameless_p)
2529 /* We didn't find a valid frame. If we're at the start of a
2530 function, or somewhere half-way its prologue, the function's
2531 frame probably hasn't been fully setup yet. Try to
2532 reconstruct the base address for the stack frame by looking
2533 at the stack pointer. For truly "frameless" functions this
2534 might work too. */
2536 if (cache->saved_sp_reg != -1)
2538 /* Stack pointer has been saved. */
2539 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2540 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2542 /* We're halfway aligning the stack. */
2543 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2544 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2546 /* This will be added back below. */
2547 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2549 else
2551 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2552 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2553 + cache->sp_offset;
2556 else
2558 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2559 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2562 /* Now that we have the base address for the stack frame we can
2563 calculate the value of %rsp in the calling frame. */
2564 cache->saved_sp = cache->base + 16;
2566 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2567 frame we find it at the same offset from the reconstructed base
2568 address. If we're halfway aligning the stack, %rip is handled
2569 differently (see above). */
2570 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2571 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2573 /* Adjust all the saved registers such that they contain addresses
2574 instead of offsets. */
2575 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2576 if (cache->saved_regs[i] != -1)
2577 cache->saved_regs[i] += cache->base;
2579 cache->base_p = 1;
2582 static struct amd64_frame_cache *
2583 amd64_frame_cache (frame_info_ptr this_frame, void **this_cache)
2585 struct amd64_frame_cache *cache;
2587 if (*this_cache)
2588 return (struct amd64_frame_cache *) *this_cache;
2590 cache = amd64_alloc_frame_cache ();
2591 *this_cache = cache;
2595 amd64_frame_cache_1 (this_frame, cache);
2597 catch (const gdb_exception_error &ex)
2599 if (ex.error != NOT_AVAILABLE_ERROR)
2600 throw;
2603 return cache;
2606 static enum unwind_stop_reason
2607 amd64_frame_unwind_stop_reason (frame_info_ptr this_frame,
2608 void **this_cache)
2610 struct amd64_frame_cache *cache =
2611 amd64_frame_cache (this_frame, this_cache);
2613 if (!cache->base_p)
2614 return UNWIND_UNAVAILABLE;
2616 /* This marks the outermost frame. */
2617 if (cache->base == 0)
2618 return UNWIND_OUTERMOST;
2620 return UNWIND_NO_REASON;
2623 static void
2624 amd64_frame_this_id (frame_info_ptr this_frame, void **this_cache,
2625 struct frame_id *this_id)
2627 struct amd64_frame_cache *cache =
2628 amd64_frame_cache (this_frame, this_cache);
2630 if (!cache->base_p)
2631 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2632 else if (cache->base == 0)
2634 /* This marks the outermost frame. */
2635 return;
2637 else
2638 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2641 static struct value *
2642 amd64_frame_prev_register (frame_info_ptr this_frame, void **this_cache,
2643 int regnum)
2645 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2646 struct amd64_frame_cache *cache =
2647 amd64_frame_cache (this_frame, this_cache);
2649 gdb_assert (regnum >= 0);
2651 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2652 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2654 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2655 return frame_unwind_got_memory (this_frame, regnum,
2656 cache->saved_regs[regnum]);
2658 return frame_unwind_got_register (this_frame, regnum, regnum);
2661 static const struct frame_unwind amd64_frame_unwind =
2663 "amd64 prologue",
2664 NORMAL_FRAME,
2665 amd64_frame_unwind_stop_reason,
2666 amd64_frame_this_id,
2667 amd64_frame_prev_register,
2668 NULL,
2669 default_frame_sniffer
2672 /* Generate a bytecode expression to get the value of the saved PC. */
2674 static void
2675 amd64_gen_return_address (struct gdbarch *gdbarch,
2676 struct agent_expr *ax, struct axs_value *value,
2677 CORE_ADDR scope)
2679 /* The following sequence assumes the traditional use of the base
2680 register. */
2681 ax_reg (ax, AMD64_RBP_REGNUM);
2682 ax_const_l (ax, 8);
2683 ax_simple (ax, aop_add);
2684 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2685 value->kind = axs_lvalue_memory;
2689 /* Signal trampolines. */
2691 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2692 64-bit variants. This would require using identical frame caches
2693 on both platforms. */
2695 static struct amd64_frame_cache *
2696 amd64_sigtramp_frame_cache (frame_info_ptr this_frame, void **this_cache)
2698 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2699 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
2700 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2701 struct amd64_frame_cache *cache;
2702 CORE_ADDR addr;
2703 gdb_byte buf[8];
2704 int i;
2706 if (*this_cache)
2707 return (struct amd64_frame_cache *) *this_cache;
2709 cache = amd64_alloc_frame_cache ();
2713 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2714 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2716 addr = tdep->sigcontext_addr (this_frame);
2717 gdb_assert (tdep->sc_reg_offset);
2718 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2719 for (i = 0; i < tdep->sc_num_regs; i++)
2720 if (tdep->sc_reg_offset[i] != -1)
2721 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2723 cache->base_p = 1;
2725 catch (const gdb_exception_error &ex)
2727 if (ex.error != NOT_AVAILABLE_ERROR)
2728 throw;
2731 *this_cache = cache;
2732 return cache;
2735 static enum unwind_stop_reason
2736 amd64_sigtramp_frame_unwind_stop_reason (frame_info_ptr this_frame,
2737 void **this_cache)
2739 struct amd64_frame_cache *cache =
2740 amd64_sigtramp_frame_cache (this_frame, this_cache);
2742 if (!cache->base_p)
2743 return UNWIND_UNAVAILABLE;
2745 return UNWIND_NO_REASON;
2748 static void
2749 amd64_sigtramp_frame_this_id (frame_info_ptr this_frame,
2750 void **this_cache, struct frame_id *this_id)
2752 struct amd64_frame_cache *cache =
2753 amd64_sigtramp_frame_cache (this_frame, this_cache);
2755 if (!cache->base_p)
2756 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2757 else if (cache->base == 0)
2759 /* This marks the outermost frame. */
2760 return;
2762 else
2763 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2766 static struct value *
2767 amd64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
2768 void **this_cache, int regnum)
2770 /* Make sure we've initialized the cache. */
2771 amd64_sigtramp_frame_cache (this_frame, this_cache);
2773 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2776 static int
2777 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2778 frame_info_ptr this_frame,
2779 void **this_cache)
2781 gdbarch *arch = get_frame_arch (this_frame);
2782 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (arch);
2784 /* We shouldn't even bother if we don't have a sigcontext_addr
2785 handler. */
2786 if (tdep->sigcontext_addr == NULL)
2787 return 0;
2789 if (tdep->sigtramp_p != NULL)
2791 if (tdep->sigtramp_p (this_frame))
2792 return 1;
2795 if (tdep->sigtramp_start != 0)
2797 CORE_ADDR pc = get_frame_pc (this_frame);
2799 gdb_assert (tdep->sigtramp_end != 0);
2800 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2801 return 1;
2804 return 0;
2807 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2809 "amd64 sigtramp",
2810 SIGTRAMP_FRAME,
2811 amd64_sigtramp_frame_unwind_stop_reason,
2812 amd64_sigtramp_frame_this_id,
2813 amd64_sigtramp_frame_prev_register,
2814 NULL,
2815 amd64_sigtramp_frame_sniffer
2819 static CORE_ADDR
2820 amd64_frame_base_address (frame_info_ptr this_frame, void **this_cache)
2822 struct amd64_frame_cache *cache =
2823 amd64_frame_cache (this_frame, this_cache);
2825 return cache->base;
2828 static const struct frame_base amd64_frame_base =
2830 &amd64_frame_unwind,
2831 amd64_frame_base_address,
2832 amd64_frame_base_address,
2833 amd64_frame_base_address
2836 /* Normal frames, but in a function epilogue. */
2838 /* Implement the stack_frame_destroyed_p gdbarch method.
2840 The epilogue is defined here as the 'ret' instruction, which will
2841 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2842 the function's stack frame. */
2844 static int
2845 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2847 gdb_byte insn;
2849 if (target_read_memory (pc, &insn, 1))
2850 return 0; /* Can't read memory at pc. */
2852 if (insn != 0xc3) /* 'ret' instruction. */
2853 return 0;
2855 return 1;
2858 static int
2859 amd64_epilogue_frame_sniffer_1 (const struct frame_unwind *self,
2860 frame_info_ptr this_frame,
2861 void **this_prologue_cache, bool override_p)
2863 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2864 CORE_ADDR pc = get_frame_pc (this_frame);
2866 if (frame_relative_level (this_frame) != 0)
2867 /* We're not in the inner frame, so assume we're not in an epilogue. */
2868 return 0;
2870 bool unwind_valid_p
2871 = compunit_epilogue_unwind_valid (find_pc_compunit_symtab (pc));
2872 if (override_p)
2874 if (unwind_valid_p)
2875 /* Don't override the symtab unwinders, skip
2876 "amd64 epilogue override". */
2877 return 0;
2879 else
2881 if (!unwind_valid_p)
2882 /* "amd64 epilogue override" unwinder already ran, skip
2883 "amd64 epilogue". */
2884 return 0;
2887 /* Check whether we're in an epilogue. */
2888 return amd64_stack_frame_destroyed_p (gdbarch, pc);
2891 static int
2892 amd64_epilogue_override_frame_sniffer (const struct frame_unwind *self,
2893 frame_info_ptr this_frame,
2894 void **this_prologue_cache)
2896 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2897 true);
2900 static int
2901 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2902 frame_info_ptr this_frame,
2903 void **this_prologue_cache)
2905 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2906 false);
2909 static struct amd64_frame_cache *
2910 amd64_epilogue_frame_cache (frame_info_ptr this_frame, void **this_cache)
2912 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2913 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2914 struct amd64_frame_cache *cache;
2915 gdb_byte buf[8];
2917 if (*this_cache)
2918 return (struct amd64_frame_cache *) *this_cache;
2920 cache = amd64_alloc_frame_cache ();
2921 *this_cache = cache;
2925 /* Cache base will be %rsp plus cache->sp_offset (-8). */
2926 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2927 cache->base = extract_unsigned_integer (buf, 8,
2928 byte_order) + cache->sp_offset;
2930 /* Cache pc will be the frame func. */
2931 cache->pc = get_frame_func (this_frame);
2933 /* The previous value of %rsp is cache->base plus 16. */
2934 cache->saved_sp = cache->base + 16;
2936 /* The saved %rip will be at cache->base plus 8. */
2937 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2939 cache->base_p = 1;
2941 catch (const gdb_exception_error &ex)
2943 if (ex.error != NOT_AVAILABLE_ERROR)
2944 throw;
2947 return cache;
2950 static enum unwind_stop_reason
2951 amd64_epilogue_frame_unwind_stop_reason (frame_info_ptr this_frame,
2952 void **this_cache)
2954 struct amd64_frame_cache *cache
2955 = amd64_epilogue_frame_cache (this_frame, this_cache);
2957 if (!cache->base_p)
2958 return UNWIND_UNAVAILABLE;
2960 return UNWIND_NO_REASON;
2963 static void
2964 amd64_epilogue_frame_this_id (frame_info_ptr this_frame,
2965 void **this_cache,
2966 struct frame_id *this_id)
2968 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2969 this_cache);
2971 if (!cache->base_p)
2972 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2973 else
2974 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2977 static const struct frame_unwind amd64_epilogue_override_frame_unwind =
2979 "amd64 epilogue override",
2980 NORMAL_FRAME,
2981 amd64_epilogue_frame_unwind_stop_reason,
2982 amd64_epilogue_frame_this_id,
2983 amd64_frame_prev_register,
2984 NULL,
2985 amd64_epilogue_override_frame_sniffer
2988 static const struct frame_unwind amd64_epilogue_frame_unwind =
2990 "amd64 epilogue",
2991 NORMAL_FRAME,
2992 amd64_epilogue_frame_unwind_stop_reason,
2993 amd64_epilogue_frame_this_id,
2994 amd64_frame_prev_register,
2995 NULL,
2996 amd64_epilogue_frame_sniffer
2999 static struct frame_id
3000 amd64_dummy_id (struct gdbarch *gdbarch, frame_info_ptr this_frame)
3002 CORE_ADDR fp;
3004 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
3006 return frame_id_build (fp + 16, get_frame_pc (this_frame));
3009 /* 16 byte align the SP per frame requirements. */
3011 static CORE_ADDR
3012 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3014 return sp & -(CORE_ADDR)16;
3018 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3019 in the floating-point register set REGSET to register cache
3020 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3022 static void
3023 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3024 int regnum, const void *fpregs, size_t len)
3026 struct gdbarch *gdbarch = regcache->arch ();
3027 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3029 gdb_assert (len >= tdep->sizeof_fpregset);
3030 amd64_supply_fxsave (regcache, regnum, fpregs);
3033 /* Collect register REGNUM from the register cache REGCACHE and store
3034 it in the buffer specified by FPREGS and LEN as described by the
3035 floating-point register set REGSET. If REGNUM is -1, do this for
3036 all registers in REGSET. */
3038 static void
3039 amd64_collect_fpregset (const struct regset *regset,
3040 const struct regcache *regcache,
3041 int regnum, void *fpregs, size_t len)
3043 struct gdbarch *gdbarch = regcache->arch ();
3044 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3046 gdb_assert (len >= tdep->sizeof_fpregset);
3047 amd64_collect_fxsave (regcache, regnum, fpregs);
3050 const struct regset amd64_fpregset =
3052 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3056 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3057 %rdi. We expect its value to be a pointer to the jmp_buf structure
3058 from which we extract the address that we will land at. This
3059 address is copied into PC. This routine returns non-zero on
3060 success. */
3062 static int
3063 amd64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
3065 gdb_byte buf[8];
3066 CORE_ADDR jb_addr;
3067 struct gdbarch *gdbarch = get_frame_arch (frame);
3068 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3069 int jb_pc_offset = tdep->jb_pc_offset;
3070 int len = builtin_type (gdbarch)->builtin_func_ptr->length ();
3072 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3073 longjmp will land. */
3074 if (jb_pc_offset == -1)
3075 return 0;
3077 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3078 jb_addr= extract_typed_address
3079 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3080 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3081 return 0;
3083 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3085 return 1;
3088 static const int amd64_record_regmap[] =
3090 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3091 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3092 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3093 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3094 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3095 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3098 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3100 static bool
3101 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3103 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3104 AMD64_RAX_REGNUM,
3105 AMD64_RIP_REGNUM);
3108 void
3109 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3110 const target_desc *default_tdesc)
3112 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3113 const struct target_desc *tdesc = info.target_desc;
3114 static const char *const stap_integer_prefixes[] = { "$", NULL };
3115 static const char *const stap_register_prefixes[] = { "%", NULL };
3116 static const char *const stap_register_indirection_prefixes[] = { "(",
3117 NULL };
3118 static const char *const stap_register_indirection_suffixes[] = { ")",
3119 NULL };
3121 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3122 floating-point registers. */
3123 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3124 tdep->fpregset = &amd64_fpregset;
3126 if (! tdesc_has_registers (tdesc))
3127 tdesc = default_tdesc;
3128 tdep->tdesc = tdesc;
3130 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3131 tdep->register_names = amd64_register_names;
3133 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3135 tdep->zmmh_register_names = amd64_zmmh_names;
3136 tdep->k_register_names = amd64_k_names;
3137 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3138 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3140 tdep->num_zmm_regs = 32;
3141 tdep->num_xmm_avx512_regs = 16;
3142 tdep->num_ymm_avx512_regs = 16;
3144 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3145 tdep->k0_regnum = AMD64_K0_REGNUM;
3146 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3147 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3150 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3152 tdep->ymmh_register_names = amd64_ymmh_names;
3153 tdep->num_ymm_regs = 16;
3154 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3157 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3159 tdep->mpx_register_names = amd64_mpx_names;
3160 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3161 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3164 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3166 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3169 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3171 tdep->pkeys_register_names = amd64_pkeys_names;
3172 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3173 tdep->num_pkeys_regs = 1;
3176 tdep->num_byte_regs = 20;
3177 tdep->num_word_regs = 16;
3178 tdep->num_dword_regs = 16;
3179 /* Avoid wiring in the MMX registers for now. */
3180 tdep->num_mmx_regs = 0;
3182 set_gdbarch_pseudo_register_read_value (gdbarch,
3183 amd64_pseudo_register_read_value);
3184 set_gdbarch_pseudo_register_write (gdbarch, amd64_pseudo_register_write);
3185 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3186 amd64_ax_pseudo_register_collect);
3188 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3190 /* AMD64 has an FPU and 16 SSE registers. */
3191 tdep->st0_regnum = AMD64_ST0_REGNUM;
3192 tdep->num_xmm_regs = 16;
3194 /* This is what all the fuss is about. */
3195 set_gdbarch_long_bit (gdbarch, 64);
3196 set_gdbarch_long_long_bit (gdbarch, 64);
3197 set_gdbarch_ptr_bit (gdbarch, 64);
3199 /* In contrast to the i386, on AMD64 a `long double' actually takes
3200 up 128 bits, even though it's still based on the i387 extended
3201 floating-point format which has only 80 significant bits. */
3202 set_gdbarch_long_double_bit (gdbarch, 128);
3204 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3206 /* Register numbers of various important registers. */
3207 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3208 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3209 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3210 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3212 /* The "default" register numbering scheme for AMD64 is referred to
3213 as the "DWARF Register Number Mapping" in the System V psABI.
3214 The preferred debugging format for all known AMD64 targets is
3215 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3216 DWARF-1), but we provide the same mapping just in case. This
3217 mapping is also used for stabs, which GCC does support. */
3218 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3219 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3221 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3222 be in use on any of the supported AMD64 targets. */
3224 /* Call dummy code. */
3225 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3226 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3227 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3229 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3230 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3231 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3233 set_gdbarch_return_value_as_value (gdbarch, amd64_return_value);
3235 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3237 tdep->record_regmap = amd64_record_regmap;
3239 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3241 /* Hook the function epilogue frame unwinder. This unwinder is
3242 appended to the list first, so that it supersedes the other
3243 unwinders in function epilogues. */
3244 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_override_frame_unwind);
3246 frame_unwind_append_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3248 /* Hook the prologue-based frame unwinders. */
3249 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3250 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3251 frame_base_set_default (gdbarch, &amd64_frame_base);
3253 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3255 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3257 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3259 /* SystemTap variables and functions. */
3260 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3261 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3262 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3263 stap_register_indirection_prefixes);
3264 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3265 stap_register_indirection_suffixes);
3266 set_gdbarch_stap_is_single_operand (gdbarch,
3267 i386_stap_is_single_operand);
3268 set_gdbarch_stap_parse_special_token (gdbarch,
3269 i386_stap_parse_special_token);
3270 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3271 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3272 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3274 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3275 amd64_in_indirect_branch_thunk);
3277 register_amd64_ravenscar_ops (gdbarch);
3280 /* Initialize ARCH for x86-64, no osabi. */
3282 static void
3283 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3285 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3286 true));
3289 static struct type *
3290 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3292 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3294 switch (regnum - tdep->eax_regnum)
3296 case AMD64_RBP_REGNUM: /* %ebp */
3297 case AMD64_RSP_REGNUM: /* %esp */
3298 return builtin_type (gdbarch)->builtin_data_ptr;
3299 case AMD64_RIP_REGNUM: /* %eip */
3300 return builtin_type (gdbarch)->builtin_func_ptr;
3303 return i386_pseudo_register_type (gdbarch, regnum);
3306 void
3307 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3308 const target_desc *default_tdesc)
3310 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3312 amd64_init_abi (info, gdbarch, default_tdesc);
3314 tdep->num_dword_regs = 17;
3315 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3317 set_gdbarch_long_bit (gdbarch, 32);
3318 set_gdbarch_ptr_bit (gdbarch, 32);
3321 /* Initialize ARCH for x64-32, no osabi. */
3323 static void
3324 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3326 amd64_x32_init_abi (info, arch,
3327 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3330 /* Return the target description for a specified XSAVE feature mask. */
3332 const struct target_desc *
3333 amd64_target_description (uint64_t xcr0, bool segments)
3335 static target_desc *amd64_tdescs \
3336 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3337 target_desc **tdesc;
3339 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3340 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3341 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3342 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3343 [segments ? 1 : 0];
3345 if (*tdesc == NULL)
3346 *tdesc = amd64_create_target_description (xcr0, false, false,
3347 segments);
3349 return *tdesc;
3352 void _initialize_amd64_tdep ();
3353 void
3354 _initialize_amd64_tdep ()
3356 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3357 amd64_none_init_abi);
3358 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3359 amd64_x32_none_init_abi);
3363 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3364 sense that the instruction pointer and data pointer are simply
3365 64-bit offsets into the code segment and the data segment instead
3366 of a selector offset pair. The functions below store the upper 32
3367 bits of these pointers (instead of just the 16-bits of the segment
3368 selector). */
3370 /* Fill register REGNUM in REGCACHE with the appropriate
3371 floating-point or SSE register value from *FXSAVE. If REGNUM is
3372 -1, do this for all registers. This function masks off any of the
3373 reserved bits in *FXSAVE. */
3375 void
3376 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3377 const void *fxsave)
3379 struct gdbarch *gdbarch = regcache->arch ();
3380 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3382 i387_supply_fxsave (regcache, regnum, fxsave);
3384 if (fxsave
3385 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3387 const gdb_byte *regs = (const gdb_byte *) fxsave;
3389 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3390 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3391 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3392 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3396 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3398 void
3399 amd64_supply_xsave (struct regcache *regcache, int regnum,
3400 const void *xsave)
3402 struct gdbarch *gdbarch = regcache->arch ();
3403 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3405 i387_supply_xsave (regcache, regnum, xsave);
3407 if (xsave
3408 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3410 const gdb_byte *regs = (const gdb_byte *) xsave;
3411 ULONGEST clear_bv;
3413 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3415 /* If the FISEG and FOSEG registers have not been initialised yet
3416 (their CLEAR_BV bit is set) then their default values of zero will
3417 have already been setup by I387_SUPPLY_XSAVE. */
3418 if (!(clear_bv & X86_XSTATE_X87))
3420 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3421 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3422 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3423 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3428 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3429 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3430 all registers. This function doesn't touch any of the reserved
3431 bits in *FXSAVE. */
3433 void
3434 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3435 void *fxsave)
3437 struct gdbarch *gdbarch = regcache->arch ();
3438 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3439 gdb_byte *regs = (gdb_byte *) fxsave;
3441 i387_collect_fxsave (regcache, regnum, fxsave);
3443 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3445 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3446 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3447 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3448 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3452 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3454 void
3455 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3456 void *xsave, int gcore)
3458 struct gdbarch *gdbarch = regcache->arch ();
3459 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3460 gdb_byte *regs = (gdb_byte *) xsave;
3462 i387_collect_xsave (regcache, regnum, xsave, gcore);
3464 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3466 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3467 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3468 regs + 12);
3469 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3470 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3471 regs + 20);