Fix null pointer dereference in process_debug_info()
[binutils-gdb.git] / gdb / amd64-tdep.c
blob0bb7a24cbd08c73ffce9955008ec6ed71e89046d
1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "language.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "dummy-frame.h"
27 #include "frame.h"
28 #include "frame-base.h"
29 #include "frame-unwind.h"
30 #include "inferior.h"
31 #include "infrun.h"
32 #include "gdbcmd.h"
33 #include "gdbcore.h"
34 #include "objfiles.h"
35 #include "regcache.h"
36 #include "regset.h"
37 #include "symfile.h"
38 #include "disasm.h"
39 #include "amd64-tdep.h"
40 #include "i387-tdep.h"
41 #include "gdbsupport/x86-xstate.h"
42 #include <algorithm>
43 #include "target-descriptions.h"
44 #include "arch/amd64.h"
45 #include "producer.h"
46 #include "ax.h"
47 #include "ax-gdb.h"
48 #include "gdbsupport/byte-vector.h"
49 #include "osabi.h"
50 #include "x86-tdep.h"
51 #include "amd64-ravenscar-thread.h"
53 /* Note that the AMD64 architecture was previously known as x86-64.
54 The latter is (forever) engraved into the canonical system name as
55 returned by config.guess, and used as the name for the AMD64 port
56 of GNU/Linux. The BSD's have renamed their ports to amd64; they
57 don't like to shout. For GDB we prefer the amd64_-prefix over the
58 x86_64_-prefix since it's so much easier to type. */
60 /* Register information. */
62 static const char * const amd64_register_names[] =
64 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
66 /* %r8 is indeed register number 8. */
67 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
68 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
70 /* %st0 is register number 24. */
71 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
72 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
74 /* %xmm0 is register number 40. */
75 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
76 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
77 "mxcsr",
80 static const char * const amd64_ymm_names[] =
82 "ymm0", "ymm1", "ymm2", "ymm3",
83 "ymm4", "ymm5", "ymm6", "ymm7",
84 "ymm8", "ymm9", "ymm10", "ymm11",
85 "ymm12", "ymm13", "ymm14", "ymm15"
88 static const char * const amd64_ymm_avx512_names[] =
90 "ymm16", "ymm17", "ymm18", "ymm19",
91 "ymm20", "ymm21", "ymm22", "ymm23",
92 "ymm24", "ymm25", "ymm26", "ymm27",
93 "ymm28", "ymm29", "ymm30", "ymm31"
96 static const char * const amd64_ymmh_names[] =
98 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
99 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
100 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
101 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
104 static const char * const amd64_ymmh_avx512_names[] =
106 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
107 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
108 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
109 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
112 static const char * const amd64_mpx_names[] =
114 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
117 static const char * const amd64_k_names[] =
119 "k0", "k1", "k2", "k3",
120 "k4", "k5", "k6", "k7"
123 static const char * const amd64_zmmh_names[] =
125 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
126 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
127 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
128 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
129 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
130 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
131 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
132 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
135 static const char * const amd64_zmm_names[] =
137 "zmm0", "zmm1", "zmm2", "zmm3",
138 "zmm4", "zmm5", "zmm6", "zmm7",
139 "zmm8", "zmm9", "zmm10", "zmm11",
140 "zmm12", "zmm13", "zmm14", "zmm15",
141 "zmm16", "zmm17", "zmm18", "zmm19",
142 "zmm20", "zmm21", "zmm22", "zmm23",
143 "zmm24", "zmm25", "zmm26", "zmm27",
144 "zmm28", "zmm29", "zmm30", "zmm31"
147 static const char * const amd64_xmm_avx512_names[] = {
148 "xmm16", "xmm17", "xmm18", "xmm19",
149 "xmm20", "xmm21", "xmm22", "xmm23",
150 "xmm24", "xmm25", "xmm26", "xmm27",
151 "xmm28", "xmm29", "xmm30", "xmm31"
154 static const char * const amd64_pkeys_names[] = {
155 "pkru"
158 /* DWARF Register Number Mapping as defined in the System V psABI,
159 section 3.6. */
161 static int amd64_dwarf_regmap[] =
163 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
164 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
165 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
166 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
168 /* Frame Pointer Register RBP. */
169 AMD64_RBP_REGNUM,
171 /* Stack Pointer Register RSP. */
172 AMD64_RSP_REGNUM,
174 /* Extended Integer Registers 8 - 15. */
175 AMD64_R8_REGNUM, /* %r8 */
176 AMD64_R9_REGNUM, /* %r9 */
177 AMD64_R10_REGNUM, /* %r10 */
178 AMD64_R11_REGNUM, /* %r11 */
179 AMD64_R12_REGNUM, /* %r12 */
180 AMD64_R13_REGNUM, /* %r13 */
181 AMD64_R14_REGNUM, /* %r14 */
182 AMD64_R15_REGNUM, /* %r15 */
184 /* Return Address RA. Mapped to RIP. */
185 AMD64_RIP_REGNUM,
187 /* SSE Registers 0 - 7. */
188 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
189 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
190 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
191 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
193 /* Extended SSE Registers 8 - 15. */
194 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
195 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
196 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
197 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
199 /* Floating Point Registers 0-7. */
200 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
201 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
202 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
203 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
205 /* MMX Registers 0 - 7.
206 We have to handle those registers specifically, as their register
207 number within GDB depends on the target (or they may even not be
208 available at all). */
209 -1, -1, -1, -1, -1, -1, -1, -1,
211 /* Control and Status Flags Register. */
212 AMD64_EFLAGS_REGNUM,
214 /* Selector Registers. */
215 AMD64_ES_REGNUM,
216 AMD64_CS_REGNUM,
217 AMD64_SS_REGNUM,
218 AMD64_DS_REGNUM,
219 AMD64_FS_REGNUM,
220 AMD64_GS_REGNUM,
224 /* Segment Base Address Registers. */
230 /* Special Selector Registers. */
234 /* Floating Point Control Registers. */
235 AMD64_MXCSR_REGNUM,
236 AMD64_FCTRL_REGNUM,
237 AMD64_FSTAT_REGNUM,
239 /* XMM16-XMM31. */
240 AMD64_XMM16_REGNUM + 0, AMD64_XMM16_REGNUM + 1,
241 AMD64_XMM16_REGNUM + 2, AMD64_XMM16_REGNUM + 3,
242 AMD64_XMM16_REGNUM + 4, AMD64_XMM16_REGNUM + 5,
243 AMD64_XMM16_REGNUM + 6, AMD64_XMM16_REGNUM + 7,
244 AMD64_XMM16_REGNUM + 8, AMD64_XMM16_REGNUM + 9,
245 AMD64_XMM16_REGNUM + 10, AMD64_XMM16_REGNUM + 11,
246 AMD64_XMM16_REGNUM + 12, AMD64_XMM16_REGNUM + 13,
247 AMD64_XMM16_REGNUM + 14, AMD64_XMM16_REGNUM + 15,
249 /* Reserved. */
250 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
251 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
252 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
254 /* Mask Registers. */
255 AMD64_K0_REGNUM + 0, AMD64_K0_REGNUM + 1,
256 AMD64_K0_REGNUM + 2, AMD64_K0_REGNUM + 3,
257 AMD64_K0_REGNUM + 4, AMD64_K0_REGNUM + 5,
258 AMD64_K0_REGNUM + 6, AMD64_K0_REGNUM + 7
261 static const int amd64_dwarf_regmap_len =
262 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
264 /* Convert DWARF register number REG to the appropriate register
265 number used by GDB. */
267 static int
268 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
270 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
271 int ymm0_regnum = tdep->ymm0_regnum;
272 int regnum = -1;
274 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
275 regnum = amd64_dwarf_regmap[reg];
277 if (ymm0_regnum >= 0 && i386_xmm_regnum_p (gdbarch, regnum))
278 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
280 return regnum;
283 /* Map architectural register numbers to gdb register numbers. */
285 static const int amd64_arch_regmap[16] =
287 AMD64_RAX_REGNUM, /* %rax */
288 AMD64_RCX_REGNUM, /* %rcx */
289 AMD64_RDX_REGNUM, /* %rdx */
290 AMD64_RBX_REGNUM, /* %rbx */
291 AMD64_RSP_REGNUM, /* %rsp */
292 AMD64_RBP_REGNUM, /* %rbp */
293 AMD64_RSI_REGNUM, /* %rsi */
294 AMD64_RDI_REGNUM, /* %rdi */
295 AMD64_R8_REGNUM, /* %r8 */
296 AMD64_R9_REGNUM, /* %r9 */
297 AMD64_R10_REGNUM, /* %r10 */
298 AMD64_R11_REGNUM, /* %r11 */
299 AMD64_R12_REGNUM, /* %r12 */
300 AMD64_R13_REGNUM, /* %r13 */
301 AMD64_R14_REGNUM, /* %r14 */
302 AMD64_R15_REGNUM /* %r15 */
305 static const int amd64_arch_regmap_len =
306 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
308 /* Convert architectural register number REG to the appropriate register
309 number used by GDB. */
311 static int
312 amd64_arch_reg_to_regnum (int reg)
314 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
316 return amd64_arch_regmap[reg];
319 /* Register names for byte pseudo-registers. */
321 static const char * const amd64_byte_names[] =
323 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
324 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
325 "ah", "bh", "ch", "dh"
328 /* Number of lower byte registers. */
329 #define AMD64_NUM_LOWER_BYTE_REGS 16
331 /* Register names for word pseudo-registers. */
333 static const char * const amd64_word_names[] =
335 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
336 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
339 /* Register names for dword pseudo-registers. */
341 static const char * const amd64_dword_names[] =
343 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
344 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
345 "eip"
348 /* Return the name of register REGNUM. */
350 static const char *
351 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
353 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
354 if (i386_byte_regnum_p (gdbarch, regnum))
355 return amd64_byte_names[regnum - tdep->al_regnum];
356 else if (i386_zmm_regnum_p (gdbarch, regnum))
357 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
358 else if (i386_ymm_regnum_p (gdbarch, regnum))
359 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
360 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
361 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
362 else if (i386_word_regnum_p (gdbarch, regnum))
363 return amd64_word_names[regnum - tdep->ax_regnum];
364 else if (i386_dword_regnum_p (gdbarch, regnum))
365 return amd64_dword_names[regnum - tdep->eax_regnum];
366 else
367 return i386_pseudo_register_name (gdbarch, regnum);
370 static value *
371 amd64_pseudo_register_read_value (gdbarch *gdbarch, const frame_info_ptr &next_frame,
372 int regnum)
374 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
376 if (i386_byte_regnum_p (gdbarch, regnum))
378 int gpnum = regnum - tdep->al_regnum;
380 /* Extract (always little endian). */
381 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
383 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
385 /* Special handling for AH, BH, CH, DH. */
386 return pseudo_from_raw_part (next_frame, regnum, gpnum, 1);
388 else
389 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
391 else if (i386_dword_regnum_p (gdbarch, regnum))
393 int gpnum = regnum - tdep->eax_regnum;
395 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
397 else
398 return i386_pseudo_register_read_value (gdbarch, next_frame, regnum);
401 static void
402 amd64_pseudo_register_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
403 int regnum, gdb::array_view<const gdb_byte> buf)
405 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
407 if (i386_byte_regnum_p (gdbarch, regnum))
409 int gpnum = regnum - tdep->al_regnum;
411 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
413 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
414 pseudo_to_raw_part (next_frame, buf, gpnum, 1);
416 else
417 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
419 else if (i386_dword_regnum_p (gdbarch, regnum))
421 int gpnum = regnum - tdep->eax_regnum;
422 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
424 else
425 i386_pseudo_register_write (gdbarch, next_frame, regnum, buf);
428 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
430 static int
431 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
432 struct agent_expr *ax, int regnum)
434 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
436 if (i386_byte_regnum_p (gdbarch, regnum))
438 int gpnum = regnum - tdep->al_regnum;
440 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
441 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
442 else
443 ax_reg_mask (ax, gpnum);
444 return 0;
446 else if (i386_dword_regnum_p (gdbarch, regnum))
448 int gpnum = regnum - tdep->eax_regnum;
450 ax_reg_mask (ax, gpnum);
451 return 0;
453 else
454 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
459 /* Register classes as defined in the psABI. */
461 enum amd64_reg_class
463 AMD64_INTEGER,
464 AMD64_SSE,
465 AMD64_SSEUP,
466 AMD64_X87,
467 AMD64_X87UP,
468 AMD64_COMPLEX_X87,
469 AMD64_NO_CLASS,
470 AMD64_MEMORY
473 /* Return the union class of CLASS1 and CLASS2. See the psABI for
474 details. */
476 static enum amd64_reg_class
477 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
479 /* Rule (a): If both classes are equal, this is the resulting class. */
480 if (class1 == class2)
481 return class1;
483 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
484 is the other class. */
485 if (class1 == AMD64_NO_CLASS)
486 return class2;
487 if (class2 == AMD64_NO_CLASS)
488 return class1;
490 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
491 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
492 return AMD64_MEMORY;
494 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
495 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
496 return AMD64_INTEGER;
498 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
499 MEMORY is used as class. */
500 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
501 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
502 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
503 return AMD64_MEMORY;
505 /* Rule (f): Otherwise class SSE is used. */
506 return AMD64_SSE;
509 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
511 /* Return true if TYPE is a structure or union with unaligned fields. */
513 static bool
514 amd64_has_unaligned_fields (struct type *type)
516 if (type->code () == TYPE_CODE_STRUCT
517 || type->code () == TYPE_CODE_UNION)
519 for (int i = 0; i < type->num_fields (); i++)
521 struct type *subtype = check_typedef (type->field (i).type ());
523 /* Ignore static fields, empty fields (for example nested
524 empty structures), and bitfields (these are handled by
525 the caller). */
526 if (type->field (i).is_static ()
527 || (type->field (i).bitsize () == 0
528 && subtype->length () == 0)
529 || type->field (i).is_packed ())
530 continue;
532 int bitpos = type->field (i).loc_bitpos ();
534 if (bitpos % 8 != 0)
535 return true;
537 int align = type_align (subtype);
538 if (align == 0)
539 error (_("could not determine alignment of type"));
541 int bytepos = bitpos / 8;
542 if (bytepos % align != 0)
543 return true;
545 if (amd64_has_unaligned_fields (subtype))
546 return true;
550 return false;
553 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
554 structures and union types, and store the result in THECLASS. */
556 static void
557 amd64_classify_aggregate_field (struct type *type, int i,
558 enum amd64_reg_class theclass[2],
559 unsigned int bitoffset)
561 struct type *subtype = check_typedef (type->field (i).type ());
562 enum amd64_reg_class subclass[2];
563 int bitsize = type->field (i).bitsize ();
565 if (bitsize == 0)
566 bitsize = subtype->length () * 8;
568 /* Ignore static fields, or empty fields, for example nested
569 empty structures.*/
570 if (type->field (i).is_static () || bitsize == 0)
571 return;
573 int bitpos = bitoffset + type->field (i).loc_bitpos ();
574 int pos = bitpos / 64;
575 int endpos = (bitpos + bitsize - 1) / 64;
577 if (subtype->code () == TYPE_CODE_STRUCT
578 || subtype->code () == TYPE_CODE_UNION)
580 /* Each field of an object is classified recursively. */
581 int j;
582 for (j = 0; j < subtype->num_fields (); j++)
583 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
584 return;
587 gdb_assert (pos == 0 || pos == 1);
589 amd64_classify (subtype, subclass);
590 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
591 if (bitsize <= 64 && pos == 0 && endpos == 1)
592 /* This is a bit of an odd case: We have a field that would
593 normally fit in one of the two eightbytes, except that
594 it is placed in a way that this field straddles them.
595 This has been seen with a structure containing an array.
597 The ABI is a bit unclear in this case, but we assume that
598 this field's class (stored in subclass[0]) must also be merged
599 into class[1]. In other words, our field has a piece stored
600 in the second eight-byte, and thus its class applies to
601 the second eight-byte as well.
603 In the case where the field length exceeds 8 bytes,
604 it should not be necessary to merge the field class
605 into class[1]. As LEN > 8, subclass[1] is necessarily
606 different from AMD64_NO_CLASS. If subclass[1] is equal
607 to subclass[0], then the normal class[1]/subclass[1]
608 merging will take care of everything. For subclass[1]
609 to be different from subclass[0], I can only see the case
610 where we have a SSE/SSEUP or X87/X87UP pair, which both
611 use up all 16 bytes of the aggregate, and are already
612 handled just fine (because each portion sits on its own
613 8-byte). */
614 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
615 if (pos == 0)
616 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
619 /* Classify TYPE according to the rules for aggregate (structures and
620 arrays) and union types, and store the result in CLASS. */
622 static void
623 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
625 /* 1. If the size of an object is larger than two times eight bytes, or
626 it is a non-trivial C++ object, or it has unaligned fields, then it
627 has class memory.
629 It is important that the trivially_copyable check is before the
630 unaligned fields check, as C++ classes with virtual base classes
631 will have fields (for the virtual base classes) with non-constant
632 loc_bitpos attributes, which will cause an assert to trigger within
633 the unaligned field check. As classes with virtual bases are not
634 trivially copyable, checking that first avoids this problem. */
635 if (TYPE_HAS_DYNAMIC_LENGTH (type)
636 || type->length () > 16
637 || !language_pass_by_reference (type).trivially_copyable
638 || amd64_has_unaligned_fields (type))
640 theclass[0] = theclass[1] = AMD64_MEMORY;
641 return;
644 /* 2. Both eightbytes get initialized to class NO_CLASS. */
645 theclass[0] = theclass[1] = AMD64_NO_CLASS;
647 /* 3. Each field of an object is classified recursively so that
648 always two fields are considered. The resulting class is
649 calculated according to the classes of the fields in the
650 eightbyte: */
652 if (type->code () == TYPE_CODE_ARRAY)
654 struct type *subtype = check_typedef (type->target_type ());
656 /* All fields in an array have the same type. */
657 amd64_classify (subtype, theclass);
658 if (type->length () > 8 && theclass[1] == AMD64_NO_CLASS)
659 theclass[1] = theclass[0];
661 else
663 int i;
665 /* Structure or union. */
666 gdb_assert (type->code () == TYPE_CODE_STRUCT
667 || type->code () == TYPE_CODE_UNION);
669 for (i = 0; i < type->num_fields (); i++)
670 amd64_classify_aggregate_field (type, i, theclass, 0);
673 /* 4. Then a post merger cleanup is done: */
675 /* Rule (a): If one of the classes is MEMORY, the whole argument is
676 passed in memory. */
677 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
678 theclass[0] = theclass[1] = AMD64_MEMORY;
680 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
681 SSE. */
682 if (theclass[0] == AMD64_SSEUP)
683 theclass[0] = AMD64_SSE;
684 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
685 theclass[1] = AMD64_SSE;
688 /* Classify TYPE, and store the result in CLASS. */
690 static void
691 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
693 enum type_code code = type->code ();
694 int len = type->length ();
696 theclass[0] = theclass[1] = AMD64_NO_CLASS;
698 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
699 long, long long, and pointers are in the INTEGER class. Similarly,
700 range types, used by languages such as Ada, are also in the INTEGER
701 class. */
702 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
703 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
704 || code == TYPE_CODE_CHAR
705 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
706 && (len == 1 || len == 2 || len == 4 || len == 8))
707 theclass[0] = AMD64_INTEGER;
709 /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
710 __m64 are in class SSE. */
711 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
712 && (len == 2 || len == 4 || len == 8))
713 /* FIXME: __m64 . */
714 theclass[0] = AMD64_SSE;
716 /* Arguments of types __float128, _Decimal128 and __m128 are split into
717 two halves. The least significant ones belong to class SSE, the most
718 significant one to class SSEUP. */
719 else if (code == TYPE_CODE_DECFLOAT && len == 16)
720 /* FIXME: __float128, __m128. */
721 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
723 /* The 64-bit mantissa of arguments of type long double belongs to
724 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
725 class X87UP. */
726 else if (code == TYPE_CODE_FLT && len == 16)
727 /* Class X87 and X87UP. */
728 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
730 /* Arguments of complex T - where T is one of the types _Float16, float or
731 double - get treated as if they are implemented as:
733 struct complexT {
734 T real;
735 T imag;
739 else if (code == TYPE_CODE_COMPLEX && (len == 8 || len == 4))
740 theclass[0] = AMD64_SSE;
741 else if (code == TYPE_CODE_COMPLEX && len == 16)
742 theclass[0] = theclass[1] = AMD64_SSE;
744 /* A variable of type complex long double is classified as type
745 COMPLEX_X87. */
746 else if (code == TYPE_CODE_COMPLEX && len == 32)
747 theclass[0] = AMD64_COMPLEX_X87;
749 /* Aggregates. */
750 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
751 || code == TYPE_CODE_UNION)
752 amd64_classify_aggregate (type, theclass);
755 static enum return_value_convention
756 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
757 struct type *type, struct regcache *regcache,
758 struct value **read_value, const gdb_byte *writebuf)
760 enum amd64_reg_class theclass[2];
761 int len = type->length ();
762 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
763 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
764 int integer_reg = 0;
765 int sse_reg = 0;
766 int i;
768 gdb_assert (!(read_value && writebuf));
770 /* 1. Classify the return type with the classification algorithm. */
771 amd64_classify (type, theclass);
773 /* 2. If the type has class MEMORY, then the caller provides space
774 for the return value and passes the address of this storage in
775 %rdi as if it were the first argument to the function. In effect,
776 this address becomes a hidden first argument.
778 On return %rax will contain the address that has been passed in
779 by the caller in %rdi. */
780 if (theclass[0] == AMD64_MEMORY)
782 /* As indicated by the comment above, the ABI guarantees that we
783 can always find the return value just after the function has
784 returned. */
786 if (read_value != nullptr)
788 ULONGEST addr;
790 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
791 *read_value = value_at_non_lval (type, addr);
794 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
797 gdb_byte *readbuf = nullptr;
798 if (read_value != nullptr)
800 *read_value = value::allocate (type);
801 readbuf = (*read_value)->contents_raw ().data ();
804 /* 8. If the class is COMPLEX_X87, the real part of the value is
805 returned in %st0 and the imaginary part in %st1. */
806 if (theclass[0] == AMD64_COMPLEX_X87)
808 if (readbuf)
810 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
811 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
814 if (writebuf)
816 i387_return_value (gdbarch, regcache);
817 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
818 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
820 /* Fix up the tag word such that both %st(0) and %st(1) are
821 marked as valid. */
822 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
825 return RETURN_VALUE_REGISTER_CONVENTION;
828 gdb_assert (theclass[1] != AMD64_MEMORY);
829 gdb_assert (len <= 16);
831 for (i = 0; len > 0; i++, len -= 8)
833 int regnum = -1;
834 int offset = 0;
836 switch (theclass[i])
838 case AMD64_INTEGER:
839 /* 3. If the class is INTEGER, the next available register
840 of the sequence %rax, %rdx is used. */
841 regnum = integer_regnum[integer_reg++];
842 break;
844 case AMD64_SSE:
845 /* 4. If the class is SSE, the next available SSE register
846 of the sequence %xmm0, %xmm1 is used. */
847 regnum = sse_regnum[sse_reg++];
848 break;
850 case AMD64_SSEUP:
851 /* 5. If the class is SSEUP, the eightbyte is passed in the
852 upper half of the last used SSE register. */
853 gdb_assert (sse_reg > 0);
854 regnum = sse_regnum[sse_reg - 1];
855 offset = 8;
856 break;
858 case AMD64_X87:
859 /* 6. If the class is X87, the value is returned on the X87
860 stack in %st0 as 80-bit x87 number. */
861 regnum = AMD64_ST0_REGNUM;
862 if (writebuf)
863 i387_return_value (gdbarch, regcache);
864 break;
866 case AMD64_X87UP:
867 /* 7. If the class is X87UP, the value is returned together
868 with the previous X87 value in %st0. */
869 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
870 regnum = AMD64_ST0_REGNUM;
871 offset = 8;
872 len = 2;
873 break;
875 case AMD64_NO_CLASS:
876 continue;
878 default:
879 gdb_assert (!"Unexpected register class.");
882 gdb_assert (regnum != -1);
884 if (readbuf)
885 regcache->raw_read_part (regnum, offset, std::min (len, 8),
886 readbuf + i * 8);
887 if (writebuf)
888 regcache->raw_write_part (regnum, offset, std::min (len, 8),
889 writebuf + i * 8);
892 return RETURN_VALUE_REGISTER_CONVENTION;
896 static CORE_ADDR
897 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
898 CORE_ADDR sp, function_call_return_method return_method)
900 static int integer_regnum[] =
902 AMD64_RDI_REGNUM, /* %rdi */
903 AMD64_RSI_REGNUM, /* %rsi */
904 AMD64_RDX_REGNUM, /* %rdx */
905 AMD64_RCX_REGNUM, /* %rcx */
906 AMD64_R8_REGNUM, /* %r8 */
907 AMD64_R9_REGNUM /* %r9 */
909 static int sse_regnum[] =
911 /* %xmm0 ... %xmm7 */
912 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
913 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
914 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
915 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
917 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
918 int num_stack_args = 0;
919 int num_elements = 0;
920 int element = 0;
921 int integer_reg = 0;
922 int sse_reg = 0;
923 int i;
925 /* Reserve a register for the "hidden" argument. */
926 if (return_method == return_method_struct)
927 integer_reg++;
929 for (i = 0; i < nargs; i++)
931 struct type *type = args[i]->type ();
932 int len = type->length ();
933 enum amd64_reg_class theclass[2];
934 int needed_integer_regs = 0;
935 int needed_sse_regs = 0;
936 int j;
938 /* Classify argument. */
939 amd64_classify (type, theclass);
941 /* Calculate the number of integer and SSE registers needed for
942 this argument. */
943 for (j = 0; j < 2; j++)
945 if (theclass[j] == AMD64_INTEGER)
946 needed_integer_regs++;
947 else if (theclass[j] == AMD64_SSE)
948 needed_sse_regs++;
951 /* Check whether enough registers are available, and if the
952 argument should be passed in registers at all. */
953 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
954 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
955 || (needed_integer_regs == 0 && needed_sse_regs == 0))
957 /* The argument will be passed on the stack. */
958 num_elements += ((len + 7) / 8);
959 stack_args[num_stack_args++] = args[i];
961 else
963 /* The argument will be passed in registers. */
964 const gdb_byte *valbuf = args[i]->contents ().data ();
965 gdb_byte buf[8];
967 gdb_assert (len <= 16);
969 for (j = 0; len > 0; j++, len -= 8)
971 int regnum = -1;
972 int offset = 0;
974 switch (theclass[j])
976 case AMD64_INTEGER:
977 regnum = integer_regnum[integer_reg++];
978 break;
980 case AMD64_SSE:
981 regnum = sse_regnum[sse_reg++];
982 break;
984 case AMD64_SSEUP:
985 gdb_assert (sse_reg > 0);
986 regnum = sse_regnum[sse_reg - 1];
987 offset = 8;
988 break;
990 case AMD64_NO_CLASS:
991 continue;
993 default:
994 gdb_assert (!"Unexpected register class.");
997 gdb_assert (regnum != -1);
998 memset (buf, 0, sizeof buf);
999 memcpy (buf, valbuf + j * 8, std::min (len, 8));
1000 regcache->raw_write_part (regnum, offset, 8, buf);
1005 /* Allocate space for the arguments on the stack. */
1006 sp -= num_elements * 8;
1008 /* The psABI says that "The end of the input argument area shall be
1009 aligned on a 16 byte boundary." */
1010 sp &= ~0xf;
1012 /* Write out the arguments to the stack. */
1013 for (i = 0; i < num_stack_args; i++)
1015 struct type *type = stack_args[i]->type ();
1016 const gdb_byte *valbuf = stack_args[i]->contents ().data ();
1017 int len = type->length ();
1019 write_memory (sp + element * 8, valbuf, len);
1020 element += ((len + 7) / 8);
1023 /* The psABI says that "For calls that may call functions that use
1024 varargs or stdargs (prototype-less calls or calls to functions
1025 containing ellipsis (...) in the declaration) %al is used as
1026 hidden argument to specify the number of SSE registers used. */
1027 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1028 return sp;
1031 static CORE_ADDR
1032 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1033 struct regcache *regcache, CORE_ADDR bp_addr,
1034 int nargs, struct value **args, CORE_ADDR sp,
1035 function_call_return_method return_method,
1036 CORE_ADDR struct_addr)
1038 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1039 gdb_byte buf[8];
1041 /* BND registers can be in arbitrary values at the moment of the
1042 inferior call. This can cause boundary violations that are not
1043 due to a real bug or even desired by the user. The best to be done
1044 is set the BND registers to allow access to the whole memory, INIT
1045 state, before pushing the inferior call. */
1046 i387_reset_bnd_regs (gdbarch, regcache);
1048 /* Pass arguments. */
1049 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1051 /* Pass "hidden" argument". */
1052 if (return_method == return_method_struct)
1054 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1055 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1058 /* Store return address. */
1059 sp -= 8;
1060 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1061 write_memory (sp, buf, 8);
1063 /* Finally, update the stack pointer... */
1064 store_unsigned_integer (buf, 8, byte_order, sp);
1065 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1067 /* ...and fake a frame pointer. */
1068 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1070 return sp + 16;
1073 /* Displaced instruction handling. */
1075 /* A partially decoded instruction.
1076 This contains enough details for displaced stepping purposes. */
1078 struct amd64_insn
1080 /* The number of opcode bytes. */
1081 int opcode_len;
1082 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1083 not present. */
1084 int enc_prefix_offset;
1085 /* The offset to the first opcode byte. */
1086 int opcode_offset;
1087 /* The offset to the modrm byte or -1 if not present. */
1088 int modrm_offset;
1090 /* The raw instruction. */
1091 gdb_byte *raw_insn;
1094 struct amd64_displaced_step_copy_insn_closure
1095 : public displaced_step_copy_insn_closure
1097 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
1098 : insn_buf (insn_buf_len, 0)
1101 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1102 int tmp_used = 0;
1103 int tmp_regno;
1104 ULONGEST tmp_save;
1106 /* Details of the instruction. */
1107 struct amd64_insn insn_details;
1109 /* The possibly modified insn. */
1110 gdb::byte_vector insn_buf;
1113 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1114 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1115 at which point delete these in favor of libopcodes' versions). */
1117 static const unsigned char onebyte_has_modrm[256] = {
1118 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1119 /* ------------------------------- */
1120 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1121 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1122 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1123 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1124 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1125 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1126 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1127 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1128 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1129 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1130 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1131 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1132 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1133 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1134 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1135 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1136 /* ------------------------------- */
1137 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1140 static const unsigned char twobyte_has_modrm[256] = {
1141 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1142 /* ------------------------------- */
1143 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1144 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1145 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1146 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1147 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1148 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1149 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1150 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1151 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1152 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1153 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1154 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1155 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1156 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1157 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1158 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1159 /* ------------------------------- */
1160 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1163 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1165 static int
1166 rex_prefix_p (gdb_byte pfx)
1168 return REX_PREFIX_P (pfx);
1171 /* True if PFX is the start of the 2-byte VEX prefix. */
1173 static bool
1174 vex2_prefix_p (gdb_byte pfx)
1176 return pfx == 0xc5;
1179 /* True if PFX is the start of the 3-byte VEX prefix. */
1181 static bool
1182 vex3_prefix_p (gdb_byte pfx)
1184 return pfx == 0xc4;
1187 /* Skip the legacy instruction prefixes in INSN.
1188 We assume INSN is properly sentineled so we don't have to worry
1189 about falling off the end of the buffer. */
1191 static gdb_byte *
1192 amd64_skip_prefixes (gdb_byte *insn)
1194 while (1)
1196 switch (*insn)
1198 case DATA_PREFIX_OPCODE:
1199 case ADDR_PREFIX_OPCODE:
1200 case CS_PREFIX_OPCODE:
1201 case DS_PREFIX_OPCODE:
1202 case ES_PREFIX_OPCODE:
1203 case FS_PREFIX_OPCODE:
1204 case GS_PREFIX_OPCODE:
1205 case SS_PREFIX_OPCODE:
1206 case LOCK_PREFIX_OPCODE:
1207 case REPE_PREFIX_OPCODE:
1208 case REPNE_PREFIX_OPCODE:
1209 ++insn;
1210 continue;
1211 default:
1212 break;
1214 break;
1217 return insn;
1220 /* Return an integer register (other than RSP) that is unused as an input
1221 operand in INSN.
1222 In order to not require adding a rex prefix if the insn doesn't already
1223 have one, the result is restricted to RAX ... RDI, sans RSP.
1224 The register numbering of the result follows architecture ordering,
1225 e.g. RDI = 7. */
1227 static int
1228 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1230 /* 1 bit for each reg */
1231 int used_regs_mask = 0;
1233 /* There can be at most 3 int regs used as inputs in an insn, and we have
1234 7 to choose from (RAX ... RDI, sans RSP).
1235 This allows us to take a conservative approach and keep things simple.
1236 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1237 that implicitly specify RAX. */
1239 /* Avoid RAX. */
1240 used_regs_mask |= 1 << EAX_REG_NUM;
1241 /* Similarily avoid RDX, implicit operand in divides. */
1242 used_regs_mask |= 1 << EDX_REG_NUM;
1243 /* Avoid RSP. */
1244 used_regs_mask |= 1 << ESP_REG_NUM;
1246 /* If the opcode is one byte long and there's no ModRM byte,
1247 assume the opcode specifies a register. */
1248 if (details->opcode_len == 1 && details->modrm_offset == -1)
1249 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1251 /* Mark used regs in the modrm/sib bytes. */
1252 if (details->modrm_offset != -1)
1254 int modrm = details->raw_insn[details->modrm_offset];
1255 int mod = MODRM_MOD_FIELD (modrm);
1256 int reg = MODRM_REG_FIELD (modrm);
1257 int rm = MODRM_RM_FIELD (modrm);
1258 int have_sib = mod != 3 && rm == 4;
1260 /* Assume the reg field of the modrm byte specifies a register. */
1261 used_regs_mask |= 1 << reg;
1263 if (have_sib)
1265 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1266 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1267 used_regs_mask |= 1 << base;
1268 used_regs_mask |= 1 << idx;
1270 else
1272 used_regs_mask |= 1 << rm;
1276 gdb_assert (used_regs_mask < 256);
1277 gdb_assert (used_regs_mask != 255);
1279 /* Finally, find a free reg. */
1281 int i;
1283 for (i = 0; i < 8; ++i)
1285 if (! (used_regs_mask & (1 << i)))
1286 return i;
1289 /* We shouldn't get here. */
1290 internal_error (_("unable to find free reg"));
1294 /* Extract the details of INSN that we need. */
1296 static void
1297 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1299 gdb_byte *start = insn;
1300 int need_modrm;
1302 details->raw_insn = insn;
1304 details->opcode_len = -1;
1305 details->enc_prefix_offset = -1;
1306 details->opcode_offset = -1;
1307 details->modrm_offset = -1;
1309 /* Skip legacy instruction prefixes. */
1310 insn = amd64_skip_prefixes (insn);
1312 /* Skip REX/VEX instruction encoding prefixes. */
1313 if (rex_prefix_p (*insn))
1315 details->enc_prefix_offset = insn - start;
1316 ++insn;
1318 else if (vex2_prefix_p (*insn))
1320 /* Don't record the offset in this case because this prefix has
1321 no REX.B equivalent. */
1322 insn += 2;
1324 else if (vex3_prefix_p (*insn))
1326 details->enc_prefix_offset = insn - start;
1327 insn += 3;
1330 details->opcode_offset = insn - start;
1332 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1334 /* Two or three-byte opcode. */
1335 ++insn;
1336 need_modrm = twobyte_has_modrm[*insn];
1338 /* Check for three-byte opcode. */
1339 switch (*insn)
1341 case 0x24:
1342 case 0x25:
1343 case 0x38:
1344 case 0x3a:
1345 case 0x7a:
1346 case 0x7b:
1347 ++insn;
1348 details->opcode_len = 3;
1349 break;
1350 default:
1351 details->opcode_len = 2;
1352 break;
1355 else
1357 /* One-byte opcode. */
1358 need_modrm = onebyte_has_modrm[*insn];
1359 details->opcode_len = 1;
1362 if (need_modrm)
1364 ++insn;
1365 details->modrm_offset = insn - start;
1369 /* Update %rip-relative addressing in INSN.
1371 %rip-relative addressing only uses a 32-bit displacement.
1372 32 bits is not enough to be guaranteed to cover the distance between where
1373 the real instruction is and where its copy is.
1374 Convert the insn to use base+disp addressing.
1375 We set base = pc + insn_length so we can leave disp unchanged. */
1377 static void
1378 fixup_riprel (struct gdbarch *gdbarch,
1379 amd64_displaced_step_copy_insn_closure *dsc,
1380 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1382 const struct amd64_insn *insn_details = &dsc->insn_details;
1383 int modrm_offset = insn_details->modrm_offset;
1384 CORE_ADDR rip_base;
1385 int insn_length;
1386 int arch_tmp_regno, tmp_regno;
1387 ULONGEST orig_value;
1389 /* Compute the rip-relative address. */
1390 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1391 dsc->insn_buf.size (), from);
1392 rip_base = from + insn_length;
1394 /* We need a register to hold the address.
1395 Pick one not used in the insn.
1396 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1397 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1398 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1400 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1401 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1403 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1404 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1405 is not r8-r15. */
1406 if (insn_details->enc_prefix_offset != -1)
1408 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1409 if (rex_prefix_p (pfx[0]))
1410 pfx[0] &= ~REX_B;
1411 else if (vex3_prefix_p (pfx[0]))
1412 pfx[1] |= VEX3_NOT_B;
1413 else
1414 gdb_assert_not_reached ("unhandled prefix");
1417 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1418 dsc->tmp_regno = tmp_regno;
1419 dsc->tmp_save = orig_value;
1420 dsc->tmp_used = 1;
1422 /* Convert the ModRM field to be base+disp. */
1423 dsc->insn_buf[modrm_offset] &= ~0xc7;
1424 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1426 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1428 displaced_debug_printf ("%%rip-relative addressing used.");
1429 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1430 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1431 paddress (gdbarch, rip_base));
1434 static void
1435 fixup_displaced_copy (struct gdbarch *gdbarch,
1436 amd64_displaced_step_copy_insn_closure *dsc,
1437 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1439 const struct amd64_insn *details = &dsc->insn_details;
1441 if (details->modrm_offset != -1)
1443 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1445 if ((modrm & 0xc7) == 0x05)
1447 /* The insn uses rip-relative addressing.
1448 Deal with it. */
1449 fixup_riprel (gdbarch, dsc, from, to, regs);
1454 displaced_step_copy_insn_closure_up
1455 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1456 CORE_ADDR from, CORE_ADDR to,
1457 struct regcache *regs)
1459 int len = gdbarch_max_insn_length (gdbarch);
1460 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1461 continually watch for running off the end of the buffer. */
1462 int fixup_sentinel_space = len;
1463 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1464 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
1465 gdb_byte *buf = &dsc->insn_buf[0];
1466 struct amd64_insn *details = &dsc->insn_details;
1468 read_memory (from, buf, len);
1470 /* Set up the sentinel space so we don't have to worry about running
1471 off the end of the buffer. An excessive number of leading prefixes
1472 could otherwise cause this. */
1473 memset (buf + len, 0, fixup_sentinel_space);
1475 amd64_get_insn_details (buf, details);
1477 /* GDB may get control back after the insn after the syscall.
1478 Presumably this is a kernel bug.
1479 If this is a syscall, make sure there's a nop afterwards. */
1481 int syscall_length;
1483 if (amd64_syscall_p (details, &syscall_length))
1484 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1487 /* Modify the insn to cope with the address where it will be executed from.
1488 In particular, handle any rip-relative addressing. */
1489 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1491 write_memory (to, buf, len);
1493 displaced_debug_printf ("copy %s->%s: %s",
1494 paddress (gdbarch, from), paddress (gdbarch, to),
1495 bytes_to_string (buf, len).c_str ());
1497 /* This is a work around for a problem with g++ 4.8. */
1498 return displaced_step_copy_insn_closure_up (dsc.release ());
1501 static int
1502 amd64_absolute_jmp_p (const struct amd64_insn *details)
1504 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1506 if (insn[0] == 0xff)
1508 /* jump near, absolute indirect (/4) */
1509 if ((insn[1] & 0x38) == 0x20)
1510 return 1;
1512 /* jump far, absolute indirect (/5) */
1513 if ((insn[1] & 0x38) == 0x28)
1514 return 1;
1517 return 0;
1520 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1522 static int
1523 amd64_jmp_p (const struct amd64_insn *details)
1525 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1527 /* jump short, relative. */
1528 if (insn[0] == 0xeb)
1529 return 1;
1531 /* jump near, relative. */
1532 if (insn[0] == 0xe9)
1533 return 1;
1535 return amd64_absolute_jmp_p (details);
1538 static int
1539 amd64_absolute_call_p (const struct amd64_insn *details)
1541 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1543 if (insn[0] == 0xff)
1545 /* Call near, absolute indirect (/2) */
1546 if ((insn[1] & 0x38) == 0x10)
1547 return 1;
1549 /* Call far, absolute indirect (/3) */
1550 if ((insn[1] & 0x38) == 0x18)
1551 return 1;
1554 return 0;
1557 static int
1558 amd64_ret_p (const struct amd64_insn *details)
1560 /* NOTE: gcc can emit "repz ; ret". */
1561 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1563 switch (insn[0])
1565 case 0xc2: /* ret near, pop N bytes */
1566 case 0xc3: /* ret near */
1567 case 0xca: /* ret far, pop N bytes */
1568 case 0xcb: /* ret far */
1569 case 0xcf: /* iret */
1570 return 1;
1572 default:
1573 return 0;
1577 static int
1578 amd64_call_p (const struct amd64_insn *details)
1580 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1582 if (amd64_absolute_call_p (details))
1583 return 1;
1585 /* call near, relative */
1586 if (insn[0] == 0xe8)
1587 return 1;
1589 return 0;
1592 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1593 length in bytes. Otherwise, return zero. */
1595 static int
1596 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1598 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1600 if (insn[0] == 0x0f && insn[1] == 0x05)
1602 *lengthp = 2;
1603 return 1;
1606 return 0;
1609 /* Classify the instruction at ADDR using PRED.
1610 Throw an error if the memory can't be read. */
1612 static int
1613 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1614 int (*pred) (const struct amd64_insn *))
1616 struct amd64_insn details;
1618 gdb::byte_vector buf (gdbarch_max_insn_length (gdbarch));
1620 read_code (addr, buf.data (), buf.size ());
1621 amd64_get_insn_details (buf.data (), &details);
1623 int classification = pred (&details);
1625 return classification;
1628 /* The gdbarch insn_is_call method. */
1630 static int
1631 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1633 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1636 /* The gdbarch insn_is_ret method. */
1638 static int
1639 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1641 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1644 /* The gdbarch insn_is_jump method. */
1646 static int
1647 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1649 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1652 /* Fix up the state of registers and memory after having single-stepped
1653 a displaced instruction. */
1655 void
1656 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1657 struct displaced_step_copy_insn_closure *dsc_,
1658 CORE_ADDR from, CORE_ADDR to,
1659 struct regcache *regs, bool completed_p)
1661 amd64_displaced_step_copy_insn_closure *dsc
1662 = (amd64_displaced_step_copy_insn_closure *) dsc_;
1663 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1664 /* The offset we applied to the instruction's address. */
1665 ULONGEST insn_offset = to - from;
1666 gdb_byte *insn = dsc->insn_buf.data ();
1667 const struct amd64_insn *insn_details = &dsc->insn_details;
1669 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1670 paddress (gdbarch, from), paddress (gdbarch, to),
1671 insn[0], insn[1]);
1673 /* If we used a tmp reg, restore it. */
1675 if (dsc->tmp_used)
1677 displaced_debug_printf ("restoring reg %d to %s",
1678 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1679 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1682 /* The list of issues to contend with here is taken from
1683 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1684 Yay for Free Software! */
1686 /* Relocate the %rip back to the program's instruction stream,
1687 if necessary. */
1689 /* Except in the case of absolute or indirect jump or call
1690 instructions, or a return instruction, the new rip is relative to
1691 the displaced instruction; make it relative to the original insn.
1692 Well, signal handler returns don't need relocation either, but we use the
1693 value of %rip to recognize those; see below. */
1694 if (!completed_p
1695 || (!amd64_absolute_jmp_p (insn_details)
1696 && !amd64_absolute_call_p (insn_details)
1697 && !amd64_ret_p (insn_details)))
1699 int insn_len;
1701 CORE_ADDR pc = regcache_read_pc (regs);
1703 /* A signal trampoline system call changes the %rip, resuming
1704 execution of the main program after the signal handler has
1705 returned. That makes them like 'return' instructions; we
1706 shouldn't relocate %rip.
1708 But most system calls don't, and we do need to relocate %rip.
1710 Our heuristic for distinguishing these cases: if stepping
1711 over the system call instruction left control directly after
1712 the instruction, the we relocate --- control almost certainly
1713 doesn't belong in the displaced copy. Otherwise, we assume
1714 the instruction has put control where it belongs, and leave
1715 it unrelocated. Goodness help us if there are PC-relative
1716 system calls. */
1717 if (amd64_syscall_p (insn_details, &insn_len)
1718 /* GDB can get control back after the insn after the syscall.
1719 Presumably this is a kernel bug. Fixup ensures it's a nop, we
1720 add one to the length for it. */
1721 && (pc < to || pc > (to + insn_len + 1)))
1722 displaced_debug_printf ("syscall changed %%rip; not relocating");
1723 else
1725 CORE_ADDR rip = pc - insn_offset;
1727 /* If we just stepped over a breakpoint insn, we don't backup
1728 the pc on purpose; this is to match behaviour without
1729 stepping. */
1731 regcache_write_pc (regs, rip);
1733 displaced_debug_printf ("relocated %%rip from %s to %s",
1734 paddress (gdbarch, pc),
1735 paddress (gdbarch, rip));
1739 /* If the instruction was PUSHFL, then the TF bit will be set in the
1740 pushed value, and should be cleared. We'll leave this for later,
1741 since GDB already messes up the TF flag when stepping over a
1742 pushfl. */
1744 /* If the instruction was a call, the return address now atop the
1745 stack is the address following the copied instruction. We need
1746 to make it the address following the original instruction. */
1747 if (completed_p && amd64_call_p (insn_details))
1749 ULONGEST rsp;
1750 ULONGEST retaddr;
1751 const ULONGEST retaddr_len = 8;
1753 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1754 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1755 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1756 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1758 displaced_debug_printf ("relocated return addr at %s to %s",
1759 paddress (gdbarch, rsp),
1760 paddress (gdbarch, retaddr));
1764 /* If the instruction INSN uses RIP-relative addressing, return the
1765 offset into the raw INSN where the displacement to be adjusted is
1766 found. Returns 0 if the instruction doesn't use RIP-relative
1767 addressing. */
1769 static int
1770 rip_relative_offset (struct amd64_insn *insn)
1772 if (insn->modrm_offset != -1)
1774 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1776 if ((modrm & 0xc7) == 0x05)
1778 /* The displacement is found right after the ModRM byte. */
1779 return insn->modrm_offset + 1;
1783 return 0;
1786 static void
1787 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1789 target_write_memory (*to, buf, len);
1790 *to += len;
1793 static void
1794 amd64_relocate_instruction (struct gdbarch *gdbarch,
1795 CORE_ADDR *to, CORE_ADDR oldloc)
1797 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1798 int len = gdbarch_max_insn_length (gdbarch);
1799 /* Extra space for sentinels. */
1800 int fixup_sentinel_space = len;
1801 gdb::byte_vector buf (len + fixup_sentinel_space);
1802 struct amd64_insn insn_details;
1803 int offset = 0;
1804 LONGEST rel32, newrel;
1805 gdb_byte *insn;
1806 int insn_length;
1808 read_memory (oldloc, buf.data (), len);
1810 /* Set up the sentinel space so we don't have to worry about running
1811 off the end of the buffer. An excessive number of leading prefixes
1812 could otherwise cause this. */
1813 memset (buf.data () + len, 0, fixup_sentinel_space);
1815 insn = buf.data ();
1816 amd64_get_insn_details (insn, &insn_details);
1818 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1820 /* Skip legacy instruction prefixes. */
1821 insn = amd64_skip_prefixes (insn);
1823 /* Adjust calls with 32-bit relative addresses as push/jump, with
1824 the address pushed being the location where the original call in
1825 the user program would return to. */
1826 if (insn[0] == 0xe8)
1828 gdb_byte push_buf[32];
1829 CORE_ADDR ret_addr;
1830 int i = 0;
1832 /* Where "ret" in the original code will return to. */
1833 ret_addr = oldloc + insn_length;
1835 /* If pushing an address higher than or equal to 0x80000000,
1836 avoid 'pushq', as that sign extends its 32-bit operand, which
1837 would be incorrect. */
1838 if (ret_addr <= 0x7fffffff)
1840 push_buf[0] = 0x68; /* pushq $... */
1841 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1842 i = 5;
1844 else
1846 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1847 push_buf[i++] = 0x83;
1848 push_buf[i++] = 0xec;
1849 push_buf[i++] = 0x08;
1851 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1852 push_buf[i++] = 0x04;
1853 push_buf[i++] = 0x24;
1854 store_unsigned_integer (&push_buf[i], 4, byte_order,
1855 ret_addr & 0xffffffff);
1856 i += 4;
1858 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1859 push_buf[i++] = 0x44;
1860 push_buf[i++] = 0x24;
1861 push_buf[i++] = 0x04;
1862 store_unsigned_integer (&push_buf[i], 4, byte_order,
1863 ret_addr >> 32);
1864 i += 4;
1866 gdb_assert (i <= sizeof (push_buf));
1867 /* Push the push. */
1868 append_insns (to, i, push_buf);
1870 /* Convert the relative call to a relative jump. */
1871 insn[0] = 0xe9;
1873 /* Adjust the destination offset. */
1874 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1875 newrel = (oldloc - *to) + rel32;
1876 store_signed_integer (insn + 1, 4, byte_order, newrel);
1878 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1879 hex_string (rel32), paddress (gdbarch, oldloc),
1880 hex_string (newrel), paddress (gdbarch, *to));
1882 /* Write the adjusted jump into its displaced location. */
1883 append_insns (to, 5, insn);
1884 return;
1887 offset = rip_relative_offset (&insn_details);
1888 if (!offset)
1890 /* Adjust jumps with 32-bit relative addresses. Calls are
1891 already handled above. */
1892 if (insn[0] == 0xe9)
1893 offset = 1;
1894 /* Adjust conditional jumps. */
1895 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1896 offset = 2;
1899 if (offset)
1901 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1902 newrel = (oldloc - *to) + rel32;
1903 store_signed_integer (insn + offset, 4, byte_order, newrel);
1904 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1905 hex_string (rel32), paddress (gdbarch, oldloc),
1906 hex_string (newrel), paddress (gdbarch, *to));
1909 /* Write the adjusted instruction into its displaced location. */
1910 append_insns (to, insn_length, buf.data ());
1914 /* The maximum number of saved registers. This should include %rip. */
1915 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1917 struct amd64_frame_cache
1919 /* Base address. */
1920 CORE_ADDR base;
1921 int base_p;
1922 CORE_ADDR sp_offset;
1923 CORE_ADDR pc;
1925 /* Saved registers. */
1926 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1927 CORE_ADDR saved_sp;
1928 int saved_sp_reg;
1930 /* Do we have a frame? */
1931 int frameless_p;
1934 /* Initialize a frame cache. */
1936 static void
1937 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1939 int i;
1941 /* Base address. */
1942 cache->base = 0;
1943 cache->base_p = 0;
1944 cache->sp_offset = -8;
1945 cache->pc = 0;
1947 /* Saved registers. We initialize these to -1 since zero is a valid
1948 offset (that's where %rbp is supposed to be stored).
1949 The values start out as being offsets, and are later converted to
1950 addresses (at which point -1 is interpreted as an address, still meaning
1951 "invalid"). */
1952 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1953 cache->saved_regs[i] = -1;
1954 cache->saved_sp = 0;
1955 cache->saved_sp_reg = -1;
1957 /* Frameless until proven otherwise. */
1958 cache->frameless_p = 1;
1961 /* Allocate and initialize a frame cache. */
1963 static struct amd64_frame_cache *
1964 amd64_alloc_frame_cache (void)
1966 struct amd64_frame_cache *cache;
1968 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1969 amd64_init_frame_cache (cache);
1970 return cache;
1973 /* GCC 4.4 and later, can put code in the prologue to realign the
1974 stack pointer. Check whether PC points to such code, and update
1975 CACHE accordingly. Return the first instruction after the code
1976 sequence or CURRENT_PC, whichever is smaller. If we don't
1977 recognize the code, return PC. */
1979 static CORE_ADDR
1980 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1981 struct amd64_frame_cache *cache)
1983 /* There are 2 code sequences to re-align stack before the frame
1984 gets set up:
1986 1. Use a caller-saved saved register:
1988 leaq 8(%rsp), %reg
1989 andq $-XXX, %rsp
1990 pushq -8(%reg)
1992 2. Use a callee-saved saved register:
1994 pushq %reg
1995 leaq 16(%rsp), %reg
1996 andq $-XXX, %rsp
1997 pushq -8(%reg)
1999 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2001 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2002 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2005 gdb_byte buf[18];
2006 int reg, r;
2007 int offset, offset_and;
2009 if (target_read_code (pc, buf, sizeof buf))
2010 return pc;
2012 /* Check caller-saved saved register. The first instruction has
2013 to be "leaq 8(%rsp), %reg". */
2014 if ((buf[0] & 0xfb) == 0x48
2015 && buf[1] == 0x8d
2016 && buf[3] == 0x24
2017 && buf[4] == 0x8)
2019 /* MOD must be binary 10 and R/M must be binary 100. */
2020 if ((buf[2] & 0xc7) != 0x44)
2021 return pc;
2023 /* REG has register number. */
2024 reg = (buf[2] >> 3) & 7;
2026 /* Check the REX.R bit. */
2027 if (buf[0] == 0x4c)
2028 reg += 8;
2030 offset = 5;
2032 else
2034 /* Check callee-saved saved register. The first instruction
2035 has to be "pushq %reg". */
2036 reg = 0;
2037 if ((buf[0] & 0xf8) == 0x50)
2038 offset = 0;
2039 else if ((buf[0] & 0xf6) == 0x40
2040 && (buf[1] & 0xf8) == 0x50)
2042 /* Check the REX.B bit. */
2043 if ((buf[0] & 1) != 0)
2044 reg = 8;
2046 offset = 1;
2048 else
2049 return pc;
2051 /* Get register. */
2052 reg += buf[offset] & 0x7;
2054 offset++;
2056 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2057 if ((buf[offset] & 0xfb) != 0x48
2058 || buf[offset + 1] != 0x8d
2059 || buf[offset + 3] != 0x24
2060 || buf[offset + 4] != 0x10)
2061 return pc;
2063 /* MOD must be binary 10 and R/M must be binary 100. */
2064 if ((buf[offset + 2] & 0xc7) != 0x44)
2065 return pc;
2067 /* REG has register number. */
2068 r = (buf[offset + 2] >> 3) & 7;
2070 /* Check the REX.R bit. */
2071 if (buf[offset] == 0x4c)
2072 r += 8;
2074 /* Registers in pushq and leaq have to be the same. */
2075 if (reg != r)
2076 return pc;
2078 offset += 5;
2081 /* Rigister can't be %rsp nor %rbp. */
2082 if (reg == 4 || reg == 5)
2083 return pc;
2085 /* The next instruction has to be "andq $-XXX, %rsp". */
2086 if (buf[offset] != 0x48
2087 || buf[offset + 2] != 0xe4
2088 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2089 return pc;
2091 offset_and = offset;
2092 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2094 /* The next instruction has to be "pushq -8(%reg)". */
2095 r = 0;
2096 if (buf[offset] == 0xff)
2097 offset++;
2098 else if ((buf[offset] & 0xf6) == 0x40
2099 && buf[offset + 1] == 0xff)
2101 /* Check the REX.B bit. */
2102 if ((buf[offset] & 0x1) != 0)
2103 r = 8;
2104 offset += 2;
2106 else
2107 return pc;
2109 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2110 01. */
2111 if (buf[offset + 1] != 0xf8
2112 || (buf[offset] & 0xf8) != 0x70)
2113 return pc;
2115 /* R/M has register. */
2116 r += buf[offset] & 7;
2118 /* Registers in leaq and pushq have to be the same. */
2119 if (reg != r)
2120 return pc;
2122 if (current_pc > pc + offset_and)
2123 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2125 return std::min (pc + offset + 2, current_pc);
2128 /* Similar to amd64_analyze_stack_align for x32. */
2130 static CORE_ADDR
2131 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2132 struct amd64_frame_cache *cache)
2134 /* There are 2 code sequences to re-align stack before the frame
2135 gets set up:
2137 1. Use a caller-saved saved register:
2139 leaq 8(%rsp), %reg
2140 andq $-XXX, %rsp
2141 pushq -8(%reg)
2145 [addr32] leal 8(%rsp), %reg
2146 andl $-XXX, %esp
2147 [addr32] pushq -8(%reg)
2149 2. Use a callee-saved saved register:
2151 pushq %reg
2152 leaq 16(%rsp), %reg
2153 andq $-XXX, %rsp
2154 pushq -8(%reg)
2158 pushq %reg
2159 [addr32] leal 16(%rsp), %reg
2160 andl $-XXX, %esp
2161 [addr32] pushq -8(%reg)
2163 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2165 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2166 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2168 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2170 0x83 0xe4 0xf0 andl $-16, %esp
2171 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2174 gdb_byte buf[19];
2175 int reg, r;
2176 int offset, offset_and;
2178 if (target_read_memory (pc, buf, sizeof buf))
2179 return pc;
2181 /* Skip optional addr32 prefix. */
2182 offset = buf[0] == 0x67 ? 1 : 0;
2184 /* Check caller-saved saved register. The first instruction has
2185 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2186 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2187 && buf[offset + 1] == 0x8d
2188 && buf[offset + 3] == 0x24
2189 && buf[offset + 4] == 0x8)
2191 /* MOD must be binary 10 and R/M must be binary 100. */
2192 if ((buf[offset + 2] & 0xc7) != 0x44)
2193 return pc;
2195 /* REG has register number. */
2196 reg = (buf[offset + 2] >> 3) & 7;
2198 /* Check the REX.R bit. */
2199 if ((buf[offset] & 0x4) != 0)
2200 reg += 8;
2202 offset += 5;
2204 else
2206 /* Check callee-saved saved register. The first instruction
2207 has to be "pushq %reg". */
2208 reg = 0;
2209 if ((buf[offset] & 0xf6) == 0x40
2210 && (buf[offset + 1] & 0xf8) == 0x50)
2212 /* Check the REX.B bit. */
2213 if ((buf[offset] & 1) != 0)
2214 reg = 8;
2216 offset += 1;
2218 else if ((buf[offset] & 0xf8) != 0x50)
2219 return pc;
2221 /* Get register. */
2222 reg += buf[offset] & 0x7;
2224 offset++;
2226 /* Skip optional addr32 prefix. */
2227 if (buf[offset] == 0x67)
2228 offset++;
2230 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2231 "leal 16(%rsp), %reg". */
2232 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2233 || buf[offset + 1] != 0x8d
2234 || buf[offset + 3] != 0x24
2235 || buf[offset + 4] != 0x10)
2236 return pc;
2238 /* MOD must be binary 10 and R/M must be binary 100. */
2239 if ((buf[offset + 2] & 0xc7) != 0x44)
2240 return pc;
2242 /* REG has register number. */
2243 r = (buf[offset + 2] >> 3) & 7;
2245 /* Check the REX.R bit. */
2246 if ((buf[offset] & 0x4) != 0)
2247 r += 8;
2249 /* Registers in pushq and leaq have to be the same. */
2250 if (reg != r)
2251 return pc;
2253 offset += 5;
2256 /* Rigister can't be %rsp nor %rbp. */
2257 if (reg == 4 || reg == 5)
2258 return pc;
2260 /* The next instruction may be "andq $-XXX, %rsp" or
2261 "andl $-XXX, %esp". */
2262 if (buf[offset] != 0x48)
2263 offset--;
2265 if (buf[offset + 2] != 0xe4
2266 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2267 return pc;
2269 offset_and = offset;
2270 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2272 /* Skip optional addr32 prefix. */
2273 if (buf[offset] == 0x67)
2274 offset++;
2276 /* The next instruction has to be "pushq -8(%reg)". */
2277 r = 0;
2278 if (buf[offset] == 0xff)
2279 offset++;
2280 else if ((buf[offset] & 0xf6) == 0x40
2281 && buf[offset + 1] == 0xff)
2283 /* Check the REX.B bit. */
2284 if ((buf[offset] & 0x1) != 0)
2285 r = 8;
2286 offset += 2;
2288 else
2289 return pc;
2291 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2292 01. */
2293 if (buf[offset + 1] != 0xf8
2294 || (buf[offset] & 0xf8) != 0x70)
2295 return pc;
2297 /* R/M has register. */
2298 r += buf[offset] & 7;
2300 /* Registers in leaq and pushq have to be the same. */
2301 if (reg != r)
2302 return pc;
2304 if (current_pc > pc + offset_and)
2305 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2307 return std::min (pc + offset + 2, current_pc);
2310 /* Do a limited analysis of the prologue at PC and update CACHE
2311 accordingly. Bail out early if CURRENT_PC is reached. Return the
2312 address where the analysis stopped.
2314 We will handle only functions beginning with:
2316 pushq %rbp 0x55
2317 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2319 or (for the X32 ABI):
2321 pushq %rbp 0x55
2322 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2324 The `endbr64` instruction can be found before these sequences, and will be
2325 skipped if found.
2327 Any function that doesn't start with one of these sequences will be
2328 assumed to have no prologue and thus no valid frame pointer in
2329 %rbp. */
2331 static CORE_ADDR
2332 amd64_analyze_prologue (struct gdbarch *gdbarch,
2333 CORE_ADDR pc, CORE_ADDR current_pc,
2334 struct amd64_frame_cache *cache)
2336 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2337 /* The `endbr64` instruction. */
2338 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2339 /* There are two variations of movq %rsp, %rbp. */
2340 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2341 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2342 /* Ditto for movl %esp, %ebp. */
2343 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2344 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2346 gdb_byte buf[3];
2347 gdb_byte op;
2349 if (current_pc <= pc)
2350 return current_pc;
2352 if (gdbarch_ptr_bit (gdbarch) == 32)
2353 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2354 else
2355 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2357 op = read_code_unsigned_integer (pc, 1, byte_order);
2359 /* Check for the `endbr64` instruction, skip it if found. */
2360 if (op == endbr64[0])
2362 read_code (pc + 1, buf, 3);
2364 if (memcmp (buf, &endbr64[1], 3) == 0)
2365 pc += 4;
2367 op = read_code_unsigned_integer (pc, 1, byte_order);
2370 if (current_pc <= pc)
2371 return current_pc;
2373 if (op == 0x55) /* pushq %rbp */
2375 /* Take into account that we've executed the `pushq %rbp' that
2376 starts this instruction sequence. */
2377 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2378 cache->sp_offset += 8;
2380 /* If that's all, return now. */
2381 if (current_pc <= pc + 1)
2382 return current_pc;
2384 read_code (pc + 1, buf, 3);
2386 /* Check for `movq %rsp, %rbp'. */
2387 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2388 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2390 /* OK, we actually have a frame. */
2391 cache->frameless_p = 0;
2392 return pc + 4;
2395 /* For X32, also check for `movl %esp, %ebp'. */
2396 if (gdbarch_ptr_bit (gdbarch) == 32)
2398 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2399 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2401 /* OK, we actually have a frame. */
2402 cache->frameless_p = 0;
2403 return pc + 3;
2407 return pc + 1;
2410 return pc;
2413 /* Work around false termination of prologue - GCC PR debug/48827.
2415 START_PC is the first instruction of a function, PC is its minimal already
2416 determined advanced address. Function returns PC if it has nothing to do.
2418 84 c0 test %al,%al
2419 74 23 je after
2420 <-- here is 0 lines advance - the false prologue end marker.
2421 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2422 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2423 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2424 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2425 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2426 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2427 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2428 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2429 after: */
2431 static CORE_ADDR
2432 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2434 struct symtab_and_line start_pc_sal, next_sal;
2435 gdb_byte buf[4 + 8 * 7];
2436 int offset, xmmreg;
2438 if (pc == start_pc)
2439 return pc;
2441 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2442 if (start_pc_sal.symtab == NULL
2443 || producer_is_gcc_ge_4 (start_pc_sal.symtab->compunit ()
2444 ->producer ()) < 6
2445 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2446 return pc;
2448 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2449 if (next_sal.line != start_pc_sal.line)
2450 return pc;
2452 /* START_PC can be from overlayed memory, ignored here. */
2453 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2454 return pc;
2456 /* test %al,%al */
2457 if (buf[0] != 0x84 || buf[1] != 0xc0)
2458 return pc;
2459 /* je AFTER */
2460 if (buf[2] != 0x74)
2461 return pc;
2463 offset = 4;
2464 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2466 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2467 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2468 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2469 return pc;
2471 /* 0b01?????? */
2472 if ((buf[offset + 2] & 0xc0) == 0x40)
2474 /* 8-bit displacement. */
2475 offset += 4;
2477 /* 0b10?????? */
2478 else if ((buf[offset + 2] & 0xc0) == 0x80)
2480 /* 32-bit displacement. */
2481 offset += 7;
2483 else
2484 return pc;
2487 /* je AFTER */
2488 if (offset - 4 != buf[3])
2489 return pc;
2491 return next_sal.end;
2494 /* Return PC of first real instruction. */
2496 static CORE_ADDR
2497 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2499 struct amd64_frame_cache cache;
2500 CORE_ADDR pc;
2501 CORE_ADDR func_addr;
2503 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2505 CORE_ADDR post_prologue_pc
2506 = skip_prologue_using_sal (gdbarch, func_addr);
2507 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2509 /* LLVM backend (Clang/Flang) always emits a line note before the
2510 prologue and another one after. We trust clang and newer Intel
2511 compilers to emit usable line notes. */
2512 if (post_prologue_pc
2513 && (cust != NULL
2514 && cust->producer () != nullptr
2515 && (producer_is_llvm (cust->producer ())
2516 || producer_is_icc_ge_19 (cust->producer ()))))
2517 return std::max (start_pc, post_prologue_pc);
2520 amd64_init_frame_cache (&cache);
2521 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2522 &cache);
2523 if (cache.frameless_p)
2524 return start_pc;
2526 return amd64_skip_xmm_prologue (pc, start_pc);
2530 /* Normal frames. */
2532 static void
2533 amd64_frame_cache_1 (const frame_info_ptr &this_frame,
2534 struct amd64_frame_cache *cache)
2536 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2537 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2538 gdb_byte buf[8];
2539 int i;
2541 cache->pc = get_frame_func (this_frame);
2542 if (cache->pc != 0)
2543 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2544 cache);
2546 if (cache->frameless_p)
2548 /* We didn't find a valid frame. If we're at the start of a
2549 function, or somewhere half-way its prologue, the function's
2550 frame probably hasn't been fully setup yet. Try to
2551 reconstruct the base address for the stack frame by looking
2552 at the stack pointer. For truly "frameless" functions this
2553 might work too. */
2555 if (cache->saved_sp_reg != -1)
2557 /* Stack pointer has been saved. */
2558 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2559 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2561 /* We're halfway aligning the stack. */
2562 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2563 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2565 /* This will be added back below. */
2566 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2568 else
2570 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2571 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2572 + cache->sp_offset;
2575 else
2577 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2578 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2581 /* Now that we have the base address for the stack frame we can
2582 calculate the value of %rsp in the calling frame. */
2583 cache->saved_sp = cache->base + 16;
2585 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2586 frame we find it at the same offset from the reconstructed base
2587 address. If we're halfway aligning the stack, %rip is handled
2588 differently (see above). */
2589 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2590 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2592 /* Adjust all the saved registers such that they contain addresses
2593 instead of offsets. */
2594 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2595 if (cache->saved_regs[i] != -1)
2596 cache->saved_regs[i] += cache->base;
2598 cache->base_p = 1;
2601 static struct amd64_frame_cache *
2602 amd64_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2604 struct amd64_frame_cache *cache;
2606 if (*this_cache)
2607 return (struct amd64_frame_cache *) *this_cache;
2609 cache = amd64_alloc_frame_cache ();
2610 *this_cache = cache;
2614 amd64_frame_cache_1 (this_frame, cache);
2616 catch (const gdb_exception_error &ex)
2618 if (ex.error != NOT_AVAILABLE_ERROR)
2619 throw;
2622 return cache;
2625 static enum unwind_stop_reason
2626 amd64_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2627 void **this_cache)
2629 struct amd64_frame_cache *cache =
2630 amd64_frame_cache (this_frame, this_cache);
2632 if (!cache->base_p)
2633 return UNWIND_UNAVAILABLE;
2635 /* This marks the outermost frame. */
2636 if (cache->base == 0)
2637 return UNWIND_OUTERMOST;
2639 return UNWIND_NO_REASON;
2642 static void
2643 amd64_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
2644 struct frame_id *this_id)
2646 struct amd64_frame_cache *cache =
2647 amd64_frame_cache (this_frame, this_cache);
2649 if (!cache->base_p)
2650 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2651 else if (cache->base == 0)
2653 /* This marks the outermost frame. */
2654 return;
2656 else
2657 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2660 static struct value *
2661 amd64_frame_prev_register (const frame_info_ptr &this_frame, void **this_cache,
2662 int regnum)
2664 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2665 struct amd64_frame_cache *cache =
2666 amd64_frame_cache (this_frame, this_cache);
2668 gdb_assert (regnum >= 0);
2670 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2671 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2673 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2674 return frame_unwind_got_memory (this_frame, regnum,
2675 cache->saved_regs[regnum]);
2677 return frame_unwind_got_register (this_frame, regnum, regnum);
2680 static const struct frame_unwind amd64_frame_unwind =
2682 "amd64 prologue",
2683 NORMAL_FRAME,
2684 amd64_frame_unwind_stop_reason,
2685 amd64_frame_this_id,
2686 amd64_frame_prev_register,
2687 NULL,
2688 default_frame_sniffer
2691 /* Generate a bytecode expression to get the value of the saved PC. */
2693 static void
2694 amd64_gen_return_address (struct gdbarch *gdbarch,
2695 struct agent_expr *ax, struct axs_value *value,
2696 CORE_ADDR scope)
2698 /* The following sequence assumes the traditional use of the base
2699 register. */
2700 ax_reg (ax, AMD64_RBP_REGNUM);
2701 ax_const_l (ax, 8);
2702 ax_simple (ax, aop_add);
2703 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2704 value->kind = axs_lvalue_memory;
2708 /* Signal trampolines. */
2710 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2711 64-bit variants. This would require using identical frame caches
2712 on both platforms. */
2714 static struct amd64_frame_cache *
2715 amd64_sigtramp_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2717 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2718 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
2719 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2720 struct amd64_frame_cache *cache;
2721 CORE_ADDR addr;
2722 gdb_byte buf[8];
2723 int i;
2725 if (*this_cache)
2726 return (struct amd64_frame_cache *) *this_cache;
2728 cache = amd64_alloc_frame_cache ();
2732 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2733 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2735 addr = tdep->sigcontext_addr (this_frame);
2736 gdb_assert (tdep->sc_reg_offset);
2737 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2738 for (i = 0; i < tdep->sc_num_regs; i++)
2739 if (tdep->sc_reg_offset[i] != -1)
2740 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2742 cache->base_p = 1;
2744 catch (const gdb_exception_error &ex)
2746 if (ex.error != NOT_AVAILABLE_ERROR)
2747 throw;
2750 *this_cache = cache;
2751 return cache;
2754 static enum unwind_stop_reason
2755 amd64_sigtramp_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2756 void **this_cache)
2758 struct amd64_frame_cache *cache =
2759 amd64_sigtramp_frame_cache (this_frame, this_cache);
2761 if (!cache->base_p)
2762 return UNWIND_UNAVAILABLE;
2764 return UNWIND_NO_REASON;
2767 static void
2768 amd64_sigtramp_frame_this_id (const frame_info_ptr &this_frame,
2769 void **this_cache, struct frame_id *this_id)
2771 struct amd64_frame_cache *cache =
2772 amd64_sigtramp_frame_cache (this_frame, this_cache);
2774 if (!cache->base_p)
2775 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2776 else if (cache->base == 0)
2778 /* This marks the outermost frame. */
2779 return;
2781 else
2782 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2785 static struct value *
2786 amd64_sigtramp_frame_prev_register (const frame_info_ptr &this_frame,
2787 void **this_cache, int regnum)
2789 /* Make sure we've initialized the cache. */
2790 amd64_sigtramp_frame_cache (this_frame, this_cache);
2792 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2795 static int
2796 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2797 const frame_info_ptr &this_frame,
2798 void **this_cache)
2800 gdbarch *arch = get_frame_arch (this_frame);
2801 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (arch);
2803 /* We shouldn't even bother if we don't have a sigcontext_addr
2804 handler. */
2805 if (tdep->sigcontext_addr == NULL)
2806 return 0;
2808 if (tdep->sigtramp_p != NULL)
2810 if (tdep->sigtramp_p (this_frame))
2811 return 1;
2814 if (tdep->sigtramp_start != 0)
2816 CORE_ADDR pc = get_frame_pc (this_frame);
2818 gdb_assert (tdep->sigtramp_end != 0);
2819 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2820 return 1;
2823 return 0;
2826 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2828 "amd64 sigtramp",
2829 SIGTRAMP_FRAME,
2830 amd64_sigtramp_frame_unwind_stop_reason,
2831 amd64_sigtramp_frame_this_id,
2832 amd64_sigtramp_frame_prev_register,
2833 NULL,
2834 amd64_sigtramp_frame_sniffer
2838 static CORE_ADDR
2839 amd64_frame_base_address (const frame_info_ptr &this_frame, void **this_cache)
2841 struct amd64_frame_cache *cache =
2842 amd64_frame_cache (this_frame, this_cache);
2844 return cache->base;
2847 static const struct frame_base amd64_frame_base =
2849 &amd64_frame_unwind,
2850 amd64_frame_base_address,
2851 amd64_frame_base_address,
2852 amd64_frame_base_address
2855 /* Implement core of the stack_frame_destroyed_p gdbarch method. */
2857 static int
2858 amd64_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
2860 gdb_byte insn;
2862 std::optional<CORE_ADDR> epilogue = find_epilogue_using_linetable (pc);
2864 /* PC is pointing at the next instruction to be executed. If it is
2865 equal to the epilogue start, it means we're right before it starts,
2866 so the stack is still valid. */
2867 if (epilogue)
2868 return pc > epilogue;
2870 if (target_read_memory (pc, &insn, 1))
2871 return 0; /* Can't read memory at pc. */
2873 if (insn != 0xc3) /* 'ret' instruction. */
2874 return 0;
2876 return 1;
2879 /* Normal frames, but in a function epilogue. */
2881 /* Implement the stack_frame_destroyed_p gdbarch method.
2883 The epilogue is defined here as the 'ret' instruction, which will
2884 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2885 the function's stack frame. */
2887 static int
2888 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2890 struct compunit_symtab *cust = find_pc_compunit_symtab (pc);
2892 if (cust != nullptr && cust->producer () != nullptr
2893 && producer_is_llvm (cust->producer ()))
2894 return amd64_stack_frame_destroyed_p_1 (gdbarch, pc);
2896 return 0;
2899 static int
2900 amd64_epilogue_frame_sniffer_1 (const struct frame_unwind *self,
2901 const frame_info_ptr &this_frame,
2902 void **this_prologue_cache, bool override_p)
2904 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2905 CORE_ADDR pc = get_frame_pc (this_frame);
2907 if (frame_relative_level (this_frame) != 0)
2908 /* We're not in the inner frame, so assume we're not in an epilogue. */
2909 return 0;
2911 bool unwind_valid_p
2912 = compunit_epilogue_unwind_valid (find_pc_compunit_symtab (pc));
2913 if (override_p)
2915 if (unwind_valid_p)
2916 /* Don't override the symtab unwinders, skip
2917 "amd64 epilogue override". */
2918 return 0;
2920 else
2922 if (!unwind_valid_p)
2923 /* "amd64 epilogue override" unwinder already ran, skip
2924 "amd64 epilogue". */
2925 return 0;
2928 /* Check whether we're in an epilogue. */
2929 return amd64_stack_frame_destroyed_p_1 (gdbarch, pc);
2932 static int
2933 amd64_epilogue_override_frame_sniffer (const struct frame_unwind *self,
2934 const frame_info_ptr &this_frame,
2935 void **this_prologue_cache)
2937 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2938 true);
2941 static int
2942 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2943 const frame_info_ptr &this_frame,
2944 void **this_prologue_cache)
2946 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2947 false);
2950 static struct amd64_frame_cache *
2951 amd64_epilogue_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2953 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2954 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2955 struct amd64_frame_cache *cache;
2956 gdb_byte buf[8];
2958 if (*this_cache)
2959 return (struct amd64_frame_cache *) *this_cache;
2961 cache = amd64_alloc_frame_cache ();
2962 *this_cache = cache;
2966 /* Cache base will be %rsp plus cache->sp_offset (-8). */
2967 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2968 cache->base = extract_unsigned_integer (buf, 8,
2969 byte_order) + cache->sp_offset;
2971 /* Cache pc will be the frame func. */
2972 cache->pc = get_frame_func (this_frame);
2974 /* The previous value of %rsp is cache->base plus 16. */
2975 cache->saved_sp = cache->base + 16;
2977 /* The saved %rip will be at cache->base plus 8. */
2978 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2980 cache->base_p = 1;
2982 catch (const gdb_exception_error &ex)
2984 if (ex.error != NOT_AVAILABLE_ERROR)
2985 throw;
2988 return cache;
2991 static enum unwind_stop_reason
2992 amd64_epilogue_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2993 void **this_cache)
2995 struct amd64_frame_cache *cache
2996 = amd64_epilogue_frame_cache (this_frame, this_cache);
2998 if (!cache->base_p)
2999 return UNWIND_UNAVAILABLE;
3001 return UNWIND_NO_REASON;
3004 static void
3005 amd64_epilogue_frame_this_id (const frame_info_ptr &this_frame,
3006 void **this_cache,
3007 struct frame_id *this_id)
3009 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
3010 this_cache);
3012 if (!cache->base_p)
3013 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
3014 else
3015 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
3018 static const struct frame_unwind amd64_epilogue_override_frame_unwind =
3020 "amd64 epilogue override",
3021 NORMAL_FRAME,
3022 amd64_epilogue_frame_unwind_stop_reason,
3023 amd64_epilogue_frame_this_id,
3024 amd64_frame_prev_register,
3025 NULL,
3026 amd64_epilogue_override_frame_sniffer
3029 static const struct frame_unwind amd64_epilogue_frame_unwind =
3031 "amd64 epilogue",
3032 NORMAL_FRAME,
3033 amd64_epilogue_frame_unwind_stop_reason,
3034 amd64_epilogue_frame_this_id,
3035 amd64_frame_prev_register,
3036 NULL,
3037 amd64_epilogue_frame_sniffer
3040 static struct frame_id
3041 amd64_dummy_id (struct gdbarch *gdbarch, const frame_info_ptr &this_frame)
3043 CORE_ADDR fp;
3045 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
3047 return frame_id_build (fp + 16, get_frame_pc (this_frame));
3050 /* 16 byte align the SP per frame requirements. */
3052 static CORE_ADDR
3053 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3055 return sp & -(CORE_ADDR)16;
3059 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3060 in the floating-point register set REGSET to register cache
3061 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3063 static void
3064 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3065 int regnum, const void *fpregs, size_t len)
3067 struct gdbarch *gdbarch = regcache->arch ();
3068 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3070 gdb_assert (len >= tdep->sizeof_fpregset);
3071 amd64_supply_fxsave (regcache, regnum, fpregs);
3074 /* Collect register REGNUM from the register cache REGCACHE and store
3075 it in the buffer specified by FPREGS and LEN as described by the
3076 floating-point register set REGSET. If REGNUM is -1, do this for
3077 all registers in REGSET. */
3079 static void
3080 amd64_collect_fpregset (const struct regset *regset,
3081 const struct regcache *regcache,
3082 int regnum, void *fpregs, size_t len)
3084 struct gdbarch *gdbarch = regcache->arch ();
3085 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3087 gdb_assert (len >= tdep->sizeof_fpregset);
3088 amd64_collect_fxsave (regcache, regnum, fpregs);
3091 const struct regset amd64_fpregset =
3093 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3097 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3098 %rdi. We expect its value to be a pointer to the jmp_buf structure
3099 from which we extract the address that we will land at. This
3100 address is copied into PC. This routine returns non-zero on
3101 success. */
3103 static int
3104 amd64_get_longjmp_target (const frame_info_ptr &frame, CORE_ADDR *pc)
3106 gdb_byte buf[8];
3107 CORE_ADDR jb_addr;
3108 struct gdbarch *gdbarch = get_frame_arch (frame);
3109 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3110 int jb_pc_offset = tdep->jb_pc_offset;
3111 int len = builtin_type (gdbarch)->builtin_func_ptr->length ();
3113 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3114 longjmp will land. */
3115 if (jb_pc_offset == -1)
3116 return 0;
3118 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3119 jb_addr= extract_typed_address
3120 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3121 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3122 return 0;
3124 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3126 return 1;
3129 static const int amd64_record_regmap[] =
3131 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3132 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3133 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3134 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3135 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3136 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3139 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3141 static bool
3142 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3144 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3145 AMD64_RAX_REGNUM,
3146 AMD64_RIP_REGNUM);
3149 void
3150 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3151 const target_desc *default_tdesc)
3153 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3154 const struct target_desc *tdesc = info.target_desc;
3155 static const char *const stap_integer_prefixes[] = { "$", NULL };
3156 static const char *const stap_register_prefixes[] = { "%", NULL };
3157 static const char *const stap_register_indirection_prefixes[] = { "(",
3158 NULL };
3159 static const char *const stap_register_indirection_suffixes[] = { ")",
3160 NULL };
3162 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3163 floating-point registers. */
3164 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3165 tdep->fpregset = &amd64_fpregset;
3167 if (! tdesc_has_registers (tdesc))
3168 tdesc = default_tdesc;
3169 tdep->tdesc = tdesc;
3171 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3172 tdep->register_names = amd64_register_names;
3174 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3176 tdep->zmmh_register_names = amd64_zmmh_names;
3177 tdep->k_register_names = amd64_k_names;
3178 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3179 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3181 tdep->num_zmm_regs = 32;
3182 tdep->num_xmm_avx512_regs = 16;
3183 tdep->num_ymm_avx512_regs = 16;
3185 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3186 tdep->k0_regnum = AMD64_K0_REGNUM;
3187 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3188 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3191 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3193 tdep->ymmh_register_names = amd64_ymmh_names;
3194 tdep->num_ymm_regs = 16;
3195 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3198 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3200 tdep->mpx_register_names = amd64_mpx_names;
3201 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3202 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3205 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3207 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3210 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3212 tdep->pkeys_register_names = amd64_pkeys_names;
3213 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3214 tdep->num_pkeys_regs = 1;
3217 tdep->num_byte_regs = 20;
3218 tdep->num_word_regs = 16;
3219 tdep->num_dword_regs = 16;
3220 /* Avoid wiring in the MMX registers for now. */
3221 tdep->num_mmx_regs = 0;
3223 set_gdbarch_pseudo_register_read_value (gdbarch,
3224 amd64_pseudo_register_read_value);
3225 set_gdbarch_pseudo_register_write (gdbarch, amd64_pseudo_register_write);
3226 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3227 amd64_ax_pseudo_register_collect);
3229 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3231 /* AMD64 has an FPU and 16 SSE registers. */
3232 tdep->st0_regnum = AMD64_ST0_REGNUM;
3233 tdep->num_xmm_regs = 16;
3235 /* This is what all the fuss is about. */
3236 set_gdbarch_long_bit (gdbarch, 64);
3237 set_gdbarch_long_long_bit (gdbarch, 64);
3238 set_gdbarch_ptr_bit (gdbarch, 64);
3240 /* In contrast to the i386, on AMD64 a `long double' actually takes
3241 up 128 bits, even though it's still based on the i387 extended
3242 floating-point format which has only 80 significant bits. */
3243 set_gdbarch_long_double_bit (gdbarch, 128);
3245 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3247 /* Register numbers of various important registers. */
3248 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3249 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3250 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3251 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3253 /* The "default" register numbering scheme for AMD64 is referred to
3254 as the "DWARF Register Number Mapping" in the System V psABI.
3255 The preferred debugging format for all known AMD64 targets is
3256 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3257 DWARF-1), but we provide the same mapping just in case. This
3258 mapping is also used for stabs, which GCC does support. */
3259 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3260 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3262 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3263 be in use on any of the supported AMD64 targets. */
3265 /* Call dummy code. */
3266 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3267 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3268 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3270 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3271 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3272 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3274 set_gdbarch_return_value_as_value (gdbarch, amd64_return_value);
3276 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3278 tdep->record_regmap = amd64_record_regmap;
3280 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3282 /* Hook the function epilogue frame unwinder. This unwinder is
3283 appended to the list first, so that it supersedes the other
3284 unwinders in function epilogues. */
3285 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_override_frame_unwind);
3287 frame_unwind_append_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3289 /* Hook the prologue-based frame unwinders. */
3290 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3291 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3292 frame_base_set_default (gdbarch, &amd64_frame_base);
3294 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3296 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3298 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3300 set_gdbarch_stack_frame_destroyed_p (gdbarch, amd64_stack_frame_destroyed_p);
3302 /* SystemTap variables and functions. */
3303 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3304 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3305 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3306 stap_register_indirection_prefixes);
3307 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3308 stap_register_indirection_suffixes);
3309 set_gdbarch_stap_is_single_operand (gdbarch,
3310 i386_stap_is_single_operand);
3311 set_gdbarch_stap_parse_special_token (gdbarch,
3312 i386_stap_parse_special_token);
3313 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3314 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3315 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3317 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3318 amd64_in_indirect_branch_thunk);
3320 register_amd64_ravenscar_ops (gdbarch);
3323 /* Initialize ARCH for x86-64, no osabi. */
3325 static void
3326 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3328 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3329 true));
3332 static struct type *
3333 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3335 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3337 switch (regnum - tdep->eax_regnum)
3339 case AMD64_RBP_REGNUM: /* %ebp */
3340 case AMD64_RSP_REGNUM: /* %esp */
3341 return builtin_type (gdbarch)->builtin_data_ptr;
3342 case AMD64_RIP_REGNUM: /* %eip */
3343 return builtin_type (gdbarch)->builtin_func_ptr;
3346 return i386_pseudo_register_type (gdbarch, regnum);
3349 void
3350 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3351 const target_desc *default_tdesc)
3353 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3355 amd64_init_abi (info, gdbarch, default_tdesc);
3357 tdep->num_dword_regs = 17;
3358 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3360 set_gdbarch_long_bit (gdbarch, 32);
3361 set_gdbarch_ptr_bit (gdbarch, 32);
3364 /* Initialize ARCH for x64-32, no osabi. */
3366 static void
3367 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3369 amd64_x32_init_abi (info, arch,
3370 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3373 /* Return the target description for a specified XSAVE feature mask. */
3375 const struct target_desc *
3376 amd64_target_description (uint64_t xcr0, bool segments)
3378 static target_desc *amd64_tdescs \
3379 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3380 target_desc **tdesc;
3382 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3383 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3384 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3385 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3386 [segments ? 1 : 0];
3388 if (*tdesc == NULL)
3389 *tdesc = amd64_create_target_description (xcr0, false, false,
3390 segments);
3392 return *tdesc;
3395 void _initialize_amd64_tdep ();
3396 void
3397 _initialize_amd64_tdep ()
3399 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3400 amd64_none_init_abi);
3401 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3402 amd64_x32_none_init_abi);
3406 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3407 sense that the instruction pointer and data pointer are simply
3408 64-bit offsets into the code segment and the data segment instead
3409 of a selector offset pair. The functions below store the upper 32
3410 bits of these pointers (instead of just the 16-bits of the segment
3411 selector). */
3413 /* Fill register REGNUM in REGCACHE with the appropriate
3414 floating-point or SSE register value from *FXSAVE. If REGNUM is
3415 -1, do this for all registers. This function masks off any of the
3416 reserved bits in *FXSAVE. */
3418 void
3419 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3420 const void *fxsave)
3422 struct gdbarch *gdbarch = regcache->arch ();
3423 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3425 i387_supply_fxsave (regcache, regnum, fxsave);
3427 if (fxsave
3428 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3430 const gdb_byte *regs = (const gdb_byte *) fxsave;
3432 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3433 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3434 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3435 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3439 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3441 void
3442 amd64_supply_xsave (struct regcache *regcache, int regnum,
3443 const void *xsave)
3445 struct gdbarch *gdbarch = regcache->arch ();
3446 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3448 i387_supply_xsave (regcache, regnum, xsave);
3450 if (xsave
3451 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3453 const gdb_byte *regs = (const gdb_byte *) xsave;
3454 ULONGEST clear_bv;
3456 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3458 /* If the FISEG and FOSEG registers have not been initialised yet
3459 (their CLEAR_BV bit is set) then their default values of zero will
3460 have already been setup by I387_SUPPLY_XSAVE. */
3461 if (!(clear_bv & X86_XSTATE_X87))
3463 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3464 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3465 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3466 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3471 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3472 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3473 all registers. This function doesn't touch any of the reserved
3474 bits in *FXSAVE. */
3476 void
3477 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3478 void *fxsave)
3480 struct gdbarch *gdbarch = regcache->arch ();
3481 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3482 gdb_byte *regs = (gdb_byte *) fxsave;
3484 i387_collect_fxsave (regcache, regnum, fxsave);
3486 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3488 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3489 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3490 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3491 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3495 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3497 void
3498 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3499 void *xsave, int gcore)
3501 struct gdbarch *gdbarch = regcache->arch ();
3502 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3503 gdb_byte *regs = (gdb_byte *) xsave;
3505 i387_collect_xsave (regcache, regnum, xsave, gcore);
3507 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3509 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3510 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3511 regs + 12);
3512 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3513 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3514 regs + 20);