Automatic date update in version.in
[binutils-gdb.git] / gdb / amd64-tdep.c
blobdf6b882a3fbab618a7bf95acac4c926d65b95420
1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2024 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "extract-store-integer.h"
23 #include "language.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "cli/cli-cmds.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52 #include "amd64-ravenscar-thread.h"
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
61 /* Register information. */
63 static const char * const amd64_register_names[] =
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
78 "mxcsr",
81 static const char * const amd64_ymm_names[] =
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
89 static const char * const amd64_ymm_avx512_names[] =
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
97 static const char * const amd64_ymmh_names[] =
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 static const char * const amd64_ymmh_avx512_names[] =
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 static const char * const amd64_mpx_names[] =
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 static const char * const amd64_k_names[] =
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
124 static const char * const amd64_zmmh_names[] =
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 static const char * const amd64_zmm_names[] =
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
148 static const char * const amd64_xmm_avx512_names[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
155 static const char * const amd64_pkeys_names[] = {
156 "pkru"
159 /* DWARF Register Number Mapping as defined in the System V psABI,
160 section 3.6. */
162 static int amd64_dwarf_regmap[] =
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
166 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
167 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
169 /* Frame Pointer Register RBP. */
170 AMD64_RBP_REGNUM,
172 /* Stack Pointer Register RSP. */
173 AMD64_RSP_REGNUM,
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM, /* %r8 */
177 AMD64_R9_REGNUM, /* %r9 */
178 AMD64_R10_REGNUM, /* %r10 */
179 AMD64_R11_REGNUM, /* %r11 */
180 AMD64_R12_REGNUM, /* %r12 */
181 AMD64_R13_REGNUM, /* %r13 */
182 AMD64_R14_REGNUM, /* %r14 */
183 AMD64_R15_REGNUM, /* %r15 */
185 /* Return Address RA. Mapped to RIP. */
186 AMD64_RIP_REGNUM,
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
190 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
191 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
192 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
196 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
197 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
198 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
202 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
203 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
204 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 /* Control and Status Flags Register. */
213 AMD64_EFLAGS_REGNUM,
215 /* Selector Registers. */
216 AMD64_ES_REGNUM,
217 AMD64_CS_REGNUM,
218 AMD64_SS_REGNUM,
219 AMD64_DS_REGNUM,
220 AMD64_FS_REGNUM,
221 AMD64_GS_REGNUM,
225 /* Segment Base Address Registers. */
231 /* Special Selector Registers. */
235 /* Floating Point Control Registers. */
236 AMD64_MXCSR_REGNUM,
237 AMD64_FCTRL_REGNUM,
238 AMD64_FSTAT_REGNUM,
240 /* XMM16-XMM31. */
241 AMD64_XMM16_REGNUM + 0, AMD64_XMM16_REGNUM + 1,
242 AMD64_XMM16_REGNUM + 2, AMD64_XMM16_REGNUM + 3,
243 AMD64_XMM16_REGNUM + 4, AMD64_XMM16_REGNUM + 5,
244 AMD64_XMM16_REGNUM + 6, AMD64_XMM16_REGNUM + 7,
245 AMD64_XMM16_REGNUM + 8, AMD64_XMM16_REGNUM + 9,
246 AMD64_XMM16_REGNUM + 10, AMD64_XMM16_REGNUM + 11,
247 AMD64_XMM16_REGNUM + 12, AMD64_XMM16_REGNUM + 13,
248 AMD64_XMM16_REGNUM + 14, AMD64_XMM16_REGNUM + 15,
250 /* Reserved. */
251 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
252 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
253 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
255 /* Mask Registers. */
256 AMD64_K0_REGNUM + 0, AMD64_K0_REGNUM + 1,
257 AMD64_K0_REGNUM + 2, AMD64_K0_REGNUM + 3,
258 AMD64_K0_REGNUM + 4, AMD64_K0_REGNUM + 5,
259 AMD64_K0_REGNUM + 6, AMD64_K0_REGNUM + 7
262 static const int amd64_dwarf_regmap_len =
263 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
265 /* Convert DWARF register number REG to the appropriate register
266 number used by GDB. */
268 static int
269 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
271 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
272 int ymm0_regnum = tdep->ymm0_regnum;
273 int regnum = -1;
275 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
276 regnum = amd64_dwarf_regmap[reg];
278 if (ymm0_regnum >= 0 && i386_xmm_regnum_p (gdbarch, regnum))
279 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
281 return regnum;
284 /* Map architectural register numbers to gdb register numbers. */
286 static const int amd64_arch_regmap[16] =
288 AMD64_RAX_REGNUM, /* %rax */
289 AMD64_RCX_REGNUM, /* %rcx */
290 AMD64_RDX_REGNUM, /* %rdx */
291 AMD64_RBX_REGNUM, /* %rbx */
292 AMD64_RSP_REGNUM, /* %rsp */
293 AMD64_RBP_REGNUM, /* %rbp */
294 AMD64_RSI_REGNUM, /* %rsi */
295 AMD64_RDI_REGNUM, /* %rdi */
296 AMD64_R8_REGNUM, /* %r8 */
297 AMD64_R9_REGNUM, /* %r9 */
298 AMD64_R10_REGNUM, /* %r10 */
299 AMD64_R11_REGNUM, /* %r11 */
300 AMD64_R12_REGNUM, /* %r12 */
301 AMD64_R13_REGNUM, /* %r13 */
302 AMD64_R14_REGNUM, /* %r14 */
303 AMD64_R15_REGNUM /* %r15 */
306 static const int amd64_arch_regmap_len =
307 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
309 /* Convert architectural register number REG to the appropriate register
310 number used by GDB. */
312 static int
313 amd64_arch_reg_to_regnum (int reg)
315 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
317 return amd64_arch_regmap[reg];
320 /* Register names for byte pseudo-registers. */
322 static const char * const amd64_byte_names[] =
324 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
325 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
326 "ah", "bh", "ch", "dh"
329 /* Number of lower byte registers. */
330 #define AMD64_NUM_LOWER_BYTE_REGS 16
332 /* Register names for word pseudo-registers. */
334 static const char * const amd64_word_names[] =
336 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
337 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
340 /* Register names for dword pseudo-registers. */
342 static const char * const amd64_dword_names[] =
344 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
345 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
346 "eip"
349 /* Return the name of register REGNUM. */
351 static const char *
352 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
354 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
355 if (i386_byte_regnum_p (gdbarch, regnum))
356 return amd64_byte_names[regnum - tdep->al_regnum];
357 else if (i386_zmm_regnum_p (gdbarch, regnum))
358 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
359 else if (i386_ymm_regnum_p (gdbarch, regnum))
360 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
361 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
362 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
363 else if (i386_word_regnum_p (gdbarch, regnum))
364 return amd64_word_names[regnum - tdep->ax_regnum];
365 else if (i386_dword_regnum_p (gdbarch, regnum))
366 return amd64_dword_names[regnum - tdep->eax_regnum];
367 else
368 return i386_pseudo_register_name (gdbarch, regnum);
371 static value *
372 amd64_pseudo_register_read_value (gdbarch *gdbarch, const frame_info_ptr &next_frame,
373 int regnum)
375 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
377 if (i386_byte_regnum_p (gdbarch, regnum))
379 int gpnum = regnum - tdep->al_regnum;
381 /* Extract (always little endian). */
382 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
384 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
386 /* Special handling for AH, BH, CH, DH. */
387 return pseudo_from_raw_part (next_frame, regnum, gpnum, 1);
389 else
390 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
392 else if (i386_dword_regnum_p (gdbarch, regnum))
394 int gpnum = regnum - tdep->eax_regnum;
396 return pseudo_from_raw_part (next_frame, regnum, gpnum, 0);
398 else
399 return i386_pseudo_register_read_value (gdbarch, next_frame, regnum);
402 static void
403 amd64_pseudo_register_write (gdbarch *gdbarch, const frame_info_ptr &next_frame,
404 int regnum, gdb::array_view<const gdb_byte> buf)
406 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
408 if (i386_byte_regnum_p (gdbarch, regnum))
410 int gpnum = regnum - tdep->al_regnum;
412 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
414 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
415 pseudo_to_raw_part (next_frame, buf, gpnum, 1);
417 else
418 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
420 else if (i386_dword_regnum_p (gdbarch, regnum))
422 int gpnum = regnum - tdep->eax_regnum;
423 pseudo_to_raw_part (next_frame, buf, gpnum, 0);
425 else
426 i386_pseudo_register_write (gdbarch, next_frame, regnum, buf);
429 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
431 static int
432 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
433 struct agent_expr *ax, int regnum)
435 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
437 if (i386_byte_regnum_p (gdbarch, regnum))
439 int gpnum = regnum - tdep->al_regnum;
441 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
442 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
443 else
444 ax_reg_mask (ax, gpnum);
445 return 0;
447 else if (i386_dword_regnum_p (gdbarch, regnum))
449 int gpnum = regnum - tdep->eax_regnum;
451 ax_reg_mask (ax, gpnum);
452 return 0;
454 else
455 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
460 /* Register classes as defined in the psABI. */
462 enum amd64_reg_class
464 AMD64_INTEGER,
465 AMD64_SSE,
466 AMD64_SSEUP,
467 AMD64_X87,
468 AMD64_X87UP,
469 AMD64_COMPLEX_X87,
470 AMD64_NO_CLASS,
471 AMD64_MEMORY
474 /* Return the union class of CLASS1 and CLASS2. See the psABI for
475 details. */
477 static enum amd64_reg_class
478 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
480 /* Rule (a): If both classes are equal, this is the resulting class. */
481 if (class1 == class2)
482 return class1;
484 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
485 is the other class. */
486 if (class1 == AMD64_NO_CLASS)
487 return class2;
488 if (class2 == AMD64_NO_CLASS)
489 return class1;
491 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
492 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
493 return AMD64_MEMORY;
495 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
496 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
497 return AMD64_INTEGER;
499 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
500 MEMORY is used as class. */
501 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
502 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
503 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
504 return AMD64_MEMORY;
506 /* Rule (f): Otherwise class SSE is used. */
507 return AMD64_SSE;
510 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
512 /* Return true if TYPE is a structure or union with unaligned fields. */
514 static bool
515 amd64_has_unaligned_fields (struct type *type)
517 if (type->code () == TYPE_CODE_STRUCT
518 || type->code () == TYPE_CODE_UNION)
520 for (int i = 0; i < type->num_fields (); i++)
522 struct type *subtype = check_typedef (type->field (i).type ());
524 /* Ignore static fields, empty fields (for example nested
525 empty structures), and bitfields (these are handled by
526 the caller). */
527 if (type->field (i).is_static ()
528 || (type->field (i).bitsize () == 0
529 && subtype->length () == 0)
530 || type->field (i).is_packed ())
531 continue;
533 int bitpos = type->field (i).loc_bitpos ();
535 if (bitpos % 8 != 0)
536 return true;
538 int align = type_align (subtype);
539 if (align == 0)
540 error (_("could not determine alignment of type"));
542 int bytepos = bitpos / 8;
543 if (bytepos % align != 0)
544 return true;
546 if (amd64_has_unaligned_fields (subtype))
547 return true;
551 return false;
554 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
555 structures and union types, and store the result in THECLASS. */
557 static void
558 amd64_classify_aggregate_field (struct type *type, int i,
559 enum amd64_reg_class theclass[2],
560 unsigned int bitoffset)
562 struct type *subtype = check_typedef (type->field (i).type ());
563 enum amd64_reg_class subclass[2];
564 int bitsize = type->field (i).bitsize ();
566 if (bitsize == 0)
567 bitsize = subtype->length () * 8;
569 /* Ignore static fields, or empty fields, for example nested
570 empty structures.*/
571 if (type->field (i).is_static () || bitsize == 0)
572 return;
574 int bitpos = bitoffset + type->field (i).loc_bitpos ();
575 int pos = bitpos / 64;
576 int endpos = (bitpos + bitsize - 1) / 64;
578 if (subtype->code () == TYPE_CODE_STRUCT
579 || subtype->code () == TYPE_CODE_UNION)
581 /* Each field of an object is classified recursively. */
582 int j;
583 for (j = 0; j < subtype->num_fields (); j++)
584 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
585 return;
588 gdb_assert (pos == 0 || pos == 1);
590 amd64_classify (subtype, subclass);
591 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
592 if (bitsize <= 64 && pos == 0 && endpos == 1)
593 /* This is a bit of an odd case: We have a field that would
594 normally fit in one of the two eightbytes, except that
595 it is placed in a way that this field straddles them.
596 This has been seen with a structure containing an array.
598 The ABI is a bit unclear in this case, but we assume that
599 this field's class (stored in subclass[0]) must also be merged
600 into class[1]. In other words, our field has a piece stored
601 in the second eight-byte, and thus its class applies to
602 the second eight-byte as well.
604 In the case where the field length exceeds 8 bytes,
605 it should not be necessary to merge the field class
606 into class[1]. As LEN > 8, subclass[1] is necessarily
607 different from AMD64_NO_CLASS. If subclass[1] is equal
608 to subclass[0], then the normal class[1]/subclass[1]
609 merging will take care of everything. For subclass[1]
610 to be different from subclass[0], I can only see the case
611 where we have a SSE/SSEUP or X87/X87UP pair, which both
612 use up all 16 bytes of the aggregate, and are already
613 handled just fine (because each portion sits on its own
614 8-byte). */
615 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
616 if (pos == 0)
617 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
620 /* Classify TYPE according to the rules for aggregate (structures and
621 arrays) and union types, and store the result in CLASS. */
623 static void
624 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
626 /* 1. If the size of an object is larger than two times eight bytes, or
627 it is a non-trivial C++ object, or it has unaligned fields, then it
628 has class memory.
630 It is important that the trivially_copyable check is before the
631 unaligned fields check, as C++ classes with virtual base classes
632 will have fields (for the virtual base classes) with non-constant
633 loc_bitpos attributes, which will cause an assert to trigger within
634 the unaligned field check. As classes with virtual bases are not
635 trivially copyable, checking that first avoids this problem. */
636 if (TYPE_HAS_DYNAMIC_LENGTH (type)
637 || type->length () > 16
638 || !language_pass_by_reference (type).trivially_copyable
639 || amd64_has_unaligned_fields (type))
641 theclass[0] = theclass[1] = AMD64_MEMORY;
642 return;
645 /* 2. Both eightbytes get initialized to class NO_CLASS. */
646 theclass[0] = theclass[1] = AMD64_NO_CLASS;
648 /* 3. Each field of an object is classified recursively so that
649 always two fields are considered. The resulting class is
650 calculated according to the classes of the fields in the
651 eightbyte: */
653 if (type->code () == TYPE_CODE_ARRAY)
655 struct type *subtype = check_typedef (type->target_type ());
657 /* All fields in an array have the same type. */
658 amd64_classify (subtype, theclass);
659 if (type->length () > 8 && theclass[1] == AMD64_NO_CLASS)
660 theclass[1] = theclass[0];
662 else
664 int i;
666 /* Structure or union. */
667 gdb_assert (type->code () == TYPE_CODE_STRUCT
668 || type->code () == TYPE_CODE_UNION);
670 for (i = 0; i < type->num_fields (); i++)
671 amd64_classify_aggregate_field (type, i, theclass, 0);
674 /* 4. Then a post merger cleanup is done: */
676 /* Rule (a): If one of the classes is MEMORY, the whole argument is
677 passed in memory. */
678 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
679 theclass[0] = theclass[1] = AMD64_MEMORY;
681 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
682 SSE. */
683 if (theclass[0] == AMD64_SSEUP)
684 theclass[0] = AMD64_SSE;
685 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
686 theclass[1] = AMD64_SSE;
689 /* Classify TYPE, and store the result in CLASS. */
691 static void
692 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
694 enum type_code code = type->code ();
695 int len = type->length ();
697 theclass[0] = theclass[1] = AMD64_NO_CLASS;
699 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
700 long, long long, and pointers are in the INTEGER class. Similarly,
701 range types, used by languages such as Ada, are also in the INTEGER
702 class. */
703 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
704 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
705 || code == TYPE_CODE_CHAR
706 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
707 && (len == 1 || len == 2 || len == 4 || len == 8))
708 theclass[0] = AMD64_INTEGER;
710 /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
711 __m64 are in class SSE. */
712 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
713 && (len == 2 || len == 4 || len == 8))
714 /* FIXME: __m64 . */
715 theclass[0] = AMD64_SSE;
717 /* Arguments of types __float128, _Decimal128 and __m128 are split into
718 two halves. The least significant ones belong to class SSE, the most
719 significant one to class SSEUP. */
720 else if (code == TYPE_CODE_DECFLOAT && len == 16)
721 /* FIXME: __float128, __m128. */
722 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
724 /* The 64-bit mantissa of arguments of type long double belongs to
725 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
726 class X87UP. */
727 else if (code == TYPE_CODE_FLT && len == 16)
728 /* Class X87 and X87UP. */
729 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
731 /* Arguments of complex T - where T is one of the types _Float16, float or
732 double - get treated as if they are implemented as:
734 struct complexT {
735 T real;
736 T imag;
740 else if (code == TYPE_CODE_COMPLEX && (len == 8 || len == 4))
741 theclass[0] = AMD64_SSE;
742 else if (code == TYPE_CODE_COMPLEX && len == 16)
743 theclass[0] = theclass[1] = AMD64_SSE;
745 /* A variable of type complex long double is classified as type
746 COMPLEX_X87. */
747 else if (code == TYPE_CODE_COMPLEX && len == 32)
748 theclass[0] = AMD64_COMPLEX_X87;
750 /* Aggregates. */
751 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
752 || code == TYPE_CODE_UNION)
753 amd64_classify_aggregate (type, theclass);
756 static enum return_value_convention
757 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
758 struct type *type, struct regcache *regcache,
759 struct value **read_value, const gdb_byte *writebuf)
761 enum amd64_reg_class theclass[2];
762 int len = type->length ();
763 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
764 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
765 int integer_reg = 0;
766 int sse_reg = 0;
767 int i;
769 gdb_assert (!(read_value && writebuf));
771 /* 1. Classify the return type with the classification algorithm. */
772 amd64_classify (type, theclass);
774 /* 2. If the type has class MEMORY, then the caller provides space
775 for the return value and passes the address of this storage in
776 %rdi as if it were the first argument to the function. In effect,
777 this address becomes a hidden first argument.
779 On return %rax will contain the address that has been passed in
780 by the caller in %rdi. */
781 if (theclass[0] == AMD64_MEMORY)
783 /* As indicated by the comment above, the ABI guarantees that we
784 can always find the return value just after the function has
785 returned. */
787 if (read_value != nullptr)
789 ULONGEST addr;
791 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
792 *read_value = value_at_non_lval (type, addr);
795 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
798 gdb_byte *readbuf = nullptr;
799 if (read_value != nullptr)
801 *read_value = value::allocate (type);
802 readbuf = (*read_value)->contents_raw ().data ();
805 /* 8. If the class is COMPLEX_X87, the real part of the value is
806 returned in %st0 and the imaginary part in %st1. */
807 if (theclass[0] == AMD64_COMPLEX_X87)
809 if (readbuf)
811 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
812 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
815 if (writebuf)
817 i387_return_value (gdbarch, regcache);
818 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
819 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
821 /* Fix up the tag word such that both %st(0) and %st(1) are
822 marked as valid. */
823 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
826 return RETURN_VALUE_REGISTER_CONVENTION;
829 gdb_assert (theclass[1] != AMD64_MEMORY);
830 gdb_assert (len <= 16);
832 for (i = 0; len > 0; i++, len -= 8)
834 int regnum = -1;
835 int offset = 0;
837 switch (theclass[i])
839 case AMD64_INTEGER:
840 /* 3. If the class is INTEGER, the next available register
841 of the sequence %rax, %rdx is used. */
842 regnum = integer_regnum[integer_reg++];
843 break;
845 case AMD64_SSE:
846 /* 4. If the class is SSE, the next available SSE register
847 of the sequence %xmm0, %xmm1 is used. */
848 regnum = sse_regnum[sse_reg++];
849 break;
851 case AMD64_SSEUP:
852 /* 5. If the class is SSEUP, the eightbyte is passed in the
853 upper half of the last used SSE register. */
854 gdb_assert (sse_reg > 0);
855 regnum = sse_regnum[sse_reg - 1];
856 offset = 8;
857 break;
859 case AMD64_X87:
860 /* 6. If the class is X87, the value is returned on the X87
861 stack in %st0 as 80-bit x87 number. */
862 regnum = AMD64_ST0_REGNUM;
863 if (writebuf)
864 i387_return_value (gdbarch, regcache);
865 break;
867 case AMD64_X87UP:
868 /* 7. If the class is X87UP, the value is returned together
869 with the previous X87 value in %st0. */
870 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
871 regnum = AMD64_ST0_REGNUM;
872 offset = 8;
873 len = 2;
874 break;
876 case AMD64_NO_CLASS:
877 continue;
879 default:
880 gdb_assert (!"Unexpected register class.");
883 gdb_assert (regnum != -1);
885 if (readbuf)
886 regcache->raw_read_part (regnum, offset, std::min (len, 8),
887 readbuf + i * 8);
888 if (writebuf)
889 regcache->raw_write_part (regnum, offset, std::min (len, 8),
890 writebuf + i * 8);
893 return RETURN_VALUE_REGISTER_CONVENTION;
897 static CORE_ADDR
898 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
899 CORE_ADDR sp, function_call_return_method return_method)
901 static int integer_regnum[] =
903 AMD64_RDI_REGNUM, /* %rdi */
904 AMD64_RSI_REGNUM, /* %rsi */
905 AMD64_RDX_REGNUM, /* %rdx */
906 AMD64_RCX_REGNUM, /* %rcx */
907 AMD64_R8_REGNUM, /* %r8 */
908 AMD64_R9_REGNUM /* %r9 */
910 static int sse_regnum[] =
912 /* %xmm0 ... %xmm7 */
913 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
914 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
915 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
916 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
918 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
919 int num_stack_args = 0;
920 int num_elements = 0;
921 int element = 0;
922 int integer_reg = 0;
923 int sse_reg = 0;
924 int i;
926 /* Reserve a register for the "hidden" argument. */
927 if (return_method == return_method_struct)
928 integer_reg++;
930 for (i = 0; i < nargs; i++)
932 struct type *type = args[i]->type ();
933 int len = type->length ();
934 enum amd64_reg_class theclass[2];
935 int needed_integer_regs = 0;
936 int needed_sse_regs = 0;
937 int j;
939 /* Classify argument. */
940 amd64_classify (type, theclass);
942 /* Calculate the number of integer and SSE registers needed for
943 this argument. */
944 for (j = 0; j < 2; j++)
946 if (theclass[j] == AMD64_INTEGER)
947 needed_integer_regs++;
948 else if (theclass[j] == AMD64_SSE)
949 needed_sse_regs++;
952 /* Check whether enough registers are available, and if the
953 argument should be passed in registers at all. */
954 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
955 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
956 || (needed_integer_regs == 0 && needed_sse_regs == 0))
958 /* The argument will be passed on the stack. */
959 num_elements += ((len + 7) / 8);
960 stack_args[num_stack_args++] = args[i];
962 else
964 /* The argument will be passed in registers. */
965 const gdb_byte *valbuf = args[i]->contents ().data ();
966 gdb_byte buf[8];
968 gdb_assert (len <= 16);
970 for (j = 0; len > 0; j++, len -= 8)
972 int regnum = -1;
973 int offset = 0;
975 switch (theclass[j])
977 case AMD64_INTEGER:
978 regnum = integer_regnum[integer_reg++];
979 break;
981 case AMD64_SSE:
982 regnum = sse_regnum[sse_reg++];
983 break;
985 case AMD64_SSEUP:
986 gdb_assert (sse_reg > 0);
987 regnum = sse_regnum[sse_reg - 1];
988 offset = 8;
989 break;
991 case AMD64_NO_CLASS:
992 continue;
994 default:
995 gdb_assert (!"Unexpected register class.");
998 gdb_assert (regnum != -1);
999 memset (buf, 0, sizeof buf);
1000 memcpy (buf, valbuf + j * 8, std::min (len, 8));
1001 regcache->raw_write_part (regnum, offset, 8, buf);
1006 /* Allocate space for the arguments on the stack. */
1007 sp -= num_elements * 8;
1009 /* The psABI says that "The end of the input argument area shall be
1010 aligned on a 16 byte boundary." */
1011 sp &= ~0xf;
1013 /* Write out the arguments to the stack. */
1014 for (i = 0; i < num_stack_args; i++)
1016 struct type *type = stack_args[i]->type ();
1017 const gdb_byte *valbuf = stack_args[i]->contents ().data ();
1018 int len = type->length ();
1020 write_memory (sp + element * 8, valbuf, len);
1021 element += ((len + 7) / 8);
1024 /* The psABI says that "For calls that may call functions that use
1025 varargs or stdargs (prototype-less calls or calls to functions
1026 containing ellipsis (...) in the declaration) %al is used as
1027 hidden argument to specify the number of SSE registers used. */
1028 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1029 return sp;
1032 static CORE_ADDR
1033 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1034 struct regcache *regcache, CORE_ADDR bp_addr,
1035 int nargs, struct value **args, CORE_ADDR sp,
1036 function_call_return_method return_method,
1037 CORE_ADDR struct_addr)
1039 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1040 gdb_byte buf[8];
1042 /* BND registers can be in arbitrary values at the moment of the
1043 inferior call. This can cause boundary violations that are not
1044 due to a real bug or even desired by the user. The best to be done
1045 is set the BND registers to allow access to the whole memory, INIT
1046 state, before pushing the inferior call. */
1047 i387_reset_bnd_regs (gdbarch, regcache);
1049 /* Pass arguments. */
1050 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1052 /* Pass "hidden" argument". */
1053 if (return_method == return_method_struct)
1055 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1056 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1059 /* Store return address. */
1060 sp -= 8;
1061 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1062 write_memory (sp, buf, 8);
1064 /* Finally, update the stack pointer... */
1065 store_unsigned_integer (buf, 8, byte_order, sp);
1066 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1068 /* ...and fake a frame pointer. */
1069 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1071 return sp + 16;
1074 /* Displaced instruction handling. */
1076 /* A partially decoded instruction.
1077 This contains enough details for displaced stepping purposes. */
1079 struct amd64_insn
1081 /* The number of opcode bytes. */
1082 int opcode_len;
1083 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1084 not present. */
1085 int enc_prefix_offset;
1086 /* The offset to the first opcode byte. */
1087 int opcode_offset;
1088 /* The offset to the modrm byte or -1 if not present. */
1089 int modrm_offset;
1091 /* The raw instruction. */
1092 gdb_byte *raw_insn;
1095 struct amd64_displaced_step_copy_insn_closure
1096 : public displaced_step_copy_insn_closure
1098 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
1099 : insn_buf (insn_buf_len, 0)
1102 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1103 int tmp_used = 0;
1104 int tmp_regno;
1105 ULONGEST tmp_save;
1107 /* Details of the instruction. */
1108 struct amd64_insn insn_details;
1110 /* The possibly modified insn. */
1111 gdb::byte_vector insn_buf;
1114 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1115 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1116 at which point delete these in favor of libopcodes' versions). */
1118 static const unsigned char onebyte_has_modrm[256] = {
1119 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1120 /* ------------------------------- */
1121 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1122 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1123 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1124 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1125 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1126 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1127 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1128 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1129 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1130 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1131 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1132 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1133 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1134 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1135 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1136 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1137 /* ------------------------------- */
1138 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1141 static const unsigned char twobyte_has_modrm[256] = {
1142 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1143 /* ------------------------------- */
1144 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1145 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1146 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1147 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1148 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1149 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1150 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1151 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1152 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1153 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1154 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1155 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1156 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1157 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1158 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1159 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1160 /* ------------------------------- */
1161 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1164 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1166 static int
1167 rex_prefix_p (gdb_byte pfx)
1169 return REX_PREFIX_P (pfx);
1172 /* True if PFX is the start of the 2-byte VEX prefix. */
1174 static bool
1175 vex2_prefix_p (gdb_byte pfx)
1177 return pfx == 0xc5;
1180 /* True if PFX is the start of the 3-byte VEX prefix. */
1182 static bool
1183 vex3_prefix_p (gdb_byte pfx)
1185 return pfx == 0xc4;
1188 /* Skip the legacy instruction prefixes in INSN.
1189 We assume INSN is properly sentineled so we don't have to worry
1190 about falling off the end of the buffer. */
1192 static gdb_byte *
1193 amd64_skip_prefixes (gdb_byte *insn)
1195 while (1)
1197 switch (*insn)
1199 case DATA_PREFIX_OPCODE:
1200 case ADDR_PREFIX_OPCODE:
1201 case CS_PREFIX_OPCODE:
1202 case DS_PREFIX_OPCODE:
1203 case ES_PREFIX_OPCODE:
1204 case FS_PREFIX_OPCODE:
1205 case GS_PREFIX_OPCODE:
1206 case SS_PREFIX_OPCODE:
1207 case LOCK_PREFIX_OPCODE:
1208 case REPE_PREFIX_OPCODE:
1209 case REPNE_PREFIX_OPCODE:
1210 ++insn;
1211 continue;
1212 default:
1213 break;
1215 break;
1218 return insn;
1221 /* Return an integer register (other than RSP) that is unused as an input
1222 operand in INSN.
1223 In order to not require adding a rex prefix if the insn doesn't already
1224 have one, the result is restricted to RAX ... RDI, sans RSP.
1225 The register numbering of the result follows architecture ordering,
1226 e.g. RDI = 7. */
1228 static int
1229 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1231 /* 1 bit for each reg */
1232 int used_regs_mask = 0;
1234 /* There can be at most 3 int regs used as inputs in an insn, and we have
1235 7 to choose from (RAX ... RDI, sans RSP).
1236 This allows us to take a conservative approach and keep things simple.
1237 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1238 that implicitly specify RAX. */
1240 /* Avoid RAX. */
1241 used_regs_mask |= 1 << EAX_REG_NUM;
1242 /* Similarily avoid RDX, implicit operand in divides. */
1243 used_regs_mask |= 1 << EDX_REG_NUM;
1244 /* Avoid RSP. */
1245 used_regs_mask |= 1 << ESP_REG_NUM;
1247 /* If the opcode is one byte long and there's no ModRM byte,
1248 assume the opcode specifies a register. */
1249 if (details->opcode_len == 1 && details->modrm_offset == -1)
1250 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1252 /* Mark used regs in the modrm/sib bytes. */
1253 if (details->modrm_offset != -1)
1255 int modrm = details->raw_insn[details->modrm_offset];
1256 int mod = MODRM_MOD_FIELD (modrm);
1257 int reg = MODRM_REG_FIELD (modrm);
1258 int rm = MODRM_RM_FIELD (modrm);
1259 int have_sib = mod != 3 && rm == 4;
1261 /* Assume the reg field of the modrm byte specifies a register. */
1262 used_regs_mask |= 1 << reg;
1264 if (have_sib)
1266 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1267 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1268 used_regs_mask |= 1 << base;
1269 used_regs_mask |= 1 << idx;
1271 else
1273 used_regs_mask |= 1 << rm;
1277 gdb_assert (used_regs_mask < 256);
1278 gdb_assert (used_regs_mask != 255);
1280 /* Finally, find a free reg. */
1282 int i;
1284 for (i = 0; i < 8; ++i)
1286 if (! (used_regs_mask & (1 << i)))
1287 return i;
1290 /* We shouldn't get here. */
1291 internal_error (_("unable to find free reg"));
1295 /* Extract the details of INSN that we need. */
1297 static void
1298 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1300 gdb_byte *start = insn;
1301 int need_modrm;
1303 details->raw_insn = insn;
1305 details->opcode_len = -1;
1306 details->enc_prefix_offset = -1;
1307 details->opcode_offset = -1;
1308 details->modrm_offset = -1;
1310 /* Skip legacy instruction prefixes. */
1311 insn = amd64_skip_prefixes (insn);
1313 /* Skip REX/VEX instruction encoding prefixes. */
1314 if (rex_prefix_p (*insn))
1316 details->enc_prefix_offset = insn - start;
1317 ++insn;
1319 else if (vex2_prefix_p (*insn))
1321 /* Don't record the offset in this case because this prefix has
1322 no REX.B equivalent. */
1323 insn += 2;
1325 else if (vex3_prefix_p (*insn))
1327 details->enc_prefix_offset = insn - start;
1328 insn += 3;
1331 details->opcode_offset = insn - start;
1333 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1335 /* Two or three-byte opcode. */
1336 ++insn;
1337 need_modrm = twobyte_has_modrm[*insn];
1339 /* Check for three-byte opcode. */
1340 switch (*insn)
1342 case 0x24:
1343 case 0x25:
1344 case 0x38:
1345 case 0x3a:
1346 case 0x7a:
1347 case 0x7b:
1348 ++insn;
1349 details->opcode_len = 3;
1350 break;
1351 default:
1352 details->opcode_len = 2;
1353 break;
1356 else
1358 /* One-byte opcode. */
1359 need_modrm = onebyte_has_modrm[*insn];
1360 details->opcode_len = 1;
1363 if (need_modrm)
1365 ++insn;
1366 details->modrm_offset = insn - start;
1370 /* Update %rip-relative addressing in INSN.
1372 %rip-relative addressing only uses a 32-bit displacement.
1373 32 bits is not enough to be guaranteed to cover the distance between where
1374 the real instruction is and where its copy is.
1375 Convert the insn to use base+disp addressing.
1376 We set base = pc + insn_length so we can leave disp unchanged. */
1378 static void
1379 fixup_riprel (struct gdbarch *gdbarch,
1380 amd64_displaced_step_copy_insn_closure *dsc,
1381 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1383 const struct amd64_insn *insn_details = &dsc->insn_details;
1384 int modrm_offset = insn_details->modrm_offset;
1385 CORE_ADDR rip_base;
1386 int insn_length;
1387 int arch_tmp_regno, tmp_regno;
1388 ULONGEST orig_value;
1390 /* Compute the rip-relative address. */
1391 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1392 dsc->insn_buf.size (), from);
1393 rip_base = from + insn_length;
1395 /* We need a register to hold the address.
1396 Pick one not used in the insn.
1397 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1398 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1399 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1401 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1402 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1404 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1405 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1406 is not r8-r15. */
1407 if (insn_details->enc_prefix_offset != -1)
1409 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1410 if (rex_prefix_p (pfx[0]))
1411 pfx[0] &= ~REX_B;
1412 else if (vex3_prefix_p (pfx[0]))
1413 pfx[1] |= VEX3_NOT_B;
1414 else
1415 gdb_assert_not_reached ("unhandled prefix");
1418 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1419 dsc->tmp_regno = tmp_regno;
1420 dsc->tmp_save = orig_value;
1421 dsc->tmp_used = 1;
1423 /* Convert the ModRM field to be base+disp. */
1424 dsc->insn_buf[modrm_offset] &= ~0xc7;
1425 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1427 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1429 displaced_debug_printf ("%%rip-relative addressing used.");
1430 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1431 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1432 paddress (gdbarch, rip_base));
1435 static void
1436 fixup_displaced_copy (struct gdbarch *gdbarch,
1437 amd64_displaced_step_copy_insn_closure *dsc,
1438 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1440 const struct amd64_insn *details = &dsc->insn_details;
1442 if (details->modrm_offset != -1)
1444 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1446 if ((modrm & 0xc7) == 0x05)
1448 /* The insn uses rip-relative addressing.
1449 Deal with it. */
1450 fixup_riprel (gdbarch, dsc, from, to, regs);
1455 displaced_step_copy_insn_closure_up
1456 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1457 CORE_ADDR from, CORE_ADDR to,
1458 struct regcache *regs)
1460 int len = gdbarch_max_insn_length (gdbarch);
1461 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1462 continually watch for running off the end of the buffer. */
1463 int fixup_sentinel_space = len;
1464 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1465 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
1466 gdb_byte *buf = &dsc->insn_buf[0];
1467 struct amd64_insn *details = &dsc->insn_details;
1469 read_memory (from, buf, len);
1471 /* Set up the sentinel space so we don't have to worry about running
1472 off the end of the buffer. An excessive number of leading prefixes
1473 could otherwise cause this. */
1474 memset (buf + len, 0, fixup_sentinel_space);
1476 amd64_get_insn_details (buf, details);
1478 /* GDB may get control back after the insn after the syscall.
1479 Presumably this is a kernel bug.
1480 If this is a syscall, make sure there's a nop afterwards. */
1482 int syscall_length;
1484 if (amd64_syscall_p (details, &syscall_length))
1485 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1488 /* Modify the insn to cope with the address where it will be executed from.
1489 In particular, handle any rip-relative addressing. */
1490 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1492 write_memory (to, buf, len);
1494 displaced_debug_printf ("copy %s->%s: %s",
1495 paddress (gdbarch, from), paddress (gdbarch, to),
1496 bytes_to_string (buf, len).c_str ());
1498 /* This is a work around for a problem with g++ 4.8. */
1499 return displaced_step_copy_insn_closure_up (dsc.release ());
1502 static int
1503 amd64_absolute_jmp_p (const struct amd64_insn *details)
1505 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1507 if (insn[0] == 0xff)
1509 /* jump near, absolute indirect (/4) */
1510 if ((insn[1] & 0x38) == 0x20)
1511 return 1;
1513 /* jump far, absolute indirect (/5) */
1514 if ((insn[1] & 0x38) == 0x28)
1515 return 1;
1518 return 0;
1521 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1523 static int
1524 amd64_jmp_p (const struct amd64_insn *details)
1526 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1528 /* jump short, relative. */
1529 if (insn[0] == 0xeb)
1530 return 1;
1532 /* jump near, relative. */
1533 if (insn[0] == 0xe9)
1534 return 1;
1536 return amd64_absolute_jmp_p (details);
1539 static int
1540 amd64_absolute_call_p (const struct amd64_insn *details)
1542 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1544 if (insn[0] == 0xff)
1546 /* Call near, absolute indirect (/2) */
1547 if ((insn[1] & 0x38) == 0x10)
1548 return 1;
1550 /* Call far, absolute indirect (/3) */
1551 if ((insn[1] & 0x38) == 0x18)
1552 return 1;
1555 return 0;
1558 static int
1559 amd64_ret_p (const struct amd64_insn *details)
1561 /* NOTE: gcc can emit "repz ; ret". */
1562 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1564 switch (insn[0])
1566 case 0xc2: /* ret near, pop N bytes */
1567 case 0xc3: /* ret near */
1568 case 0xca: /* ret far, pop N bytes */
1569 case 0xcb: /* ret far */
1570 case 0xcf: /* iret */
1571 return 1;
1573 default:
1574 return 0;
1578 static int
1579 amd64_call_p (const struct amd64_insn *details)
1581 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1583 if (amd64_absolute_call_p (details))
1584 return 1;
1586 /* call near, relative */
1587 if (insn[0] == 0xe8)
1588 return 1;
1590 return 0;
1593 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1594 length in bytes. Otherwise, return zero. */
1596 static int
1597 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1599 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1601 if (insn[0] == 0x0f && insn[1] == 0x05)
1603 *lengthp = 2;
1604 return 1;
1607 return 0;
1610 /* Classify the instruction at ADDR using PRED.
1611 Throw an error if the memory can't be read. */
1613 static int
1614 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1615 int (*pred) (const struct amd64_insn *))
1617 struct amd64_insn details;
1619 gdb::byte_vector buf (gdbarch_max_insn_length (gdbarch));
1621 read_code (addr, buf.data (), buf.size ());
1622 amd64_get_insn_details (buf.data (), &details);
1624 int classification = pred (&details);
1626 return classification;
1629 /* The gdbarch insn_is_call method. */
1631 static int
1632 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1634 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1637 /* The gdbarch insn_is_ret method. */
1639 static int
1640 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1642 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1645 /* The gdbarch insn_is_jump method. */
1647 static int
1648 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1650 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1653 /* Fix up the state of registers and memory after having single-stepped
1654 a displaced instruction. */
1656 void
1657 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1658 struct displaced_step_copy_insn_closure *dsc_,
1659 CORE_ADDR from, CORE_ADDR to,
1660 struct regcache *regs, bool completed_p)
1662 amd64_displaced_step_copy_insn_closure *dsc
1663 = (amd64_displaced_step_copy_insn_closure *) dsc_;
1664 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1665 /* The offset we applied to the instruction's address. */
1666 ULONGEST insn_offset = to - from;
1667 gdb_byte *insn = dsc->insn_buf.data ();
1668 const struct amd64_insn *insn_details = &dsc->insn_details;
1670 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1671 paddress (gdbarch, from), paddress (gdbarch, to),
1672 insn[0], insn[1]);
1674 /* If we used a tmp reg, restore it. */
1676 if (dsc->tmp_used)
1678 displaced_debug_printf ("restoring reg %d to %s",
1679 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1680 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1683 /* The list of issues to contend with here is taken from
1684 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1685 Yay for Free Software! */
1687 /* Relocate the %rip back to the program's instruction stream,
1688 if necessary. */
1690 /* Except in the case of absolute or indirect jump or call
1691 instructions, or a return instruction, the new rip is relative to
1692 the displaced instruction; make it relative to the original insn.
1693 Well, signal handler returns don't need relocation either, but we use the
1694 value of %rip to recognize those; see below. */
1695 if (!completed_p
1696 || (!amd64_absolute_jmp_p (insn_details)
1697 && !amd64_absolute_call_p (insn_details)
1698 && !amd64_ret_p (insn_details)))
1700 int insn_len;
1702 CORE_ADDR pc = regcache_read_pc (regs);
1704 /* A signal trampoline system call changes the %rip, resuming
1705 execution of the main program after the signal handler has
1706 returned. That makes them like 'return' instructions; we
1707 shouldn't relocate %rip.
1709 But most system calls don't, and we do need to relocate %rip.
1711 Our heuristic for distinguishing these cases: if stepping
1712 over the system call instruction left control directly after
1713 the instruction, the we relocate --- control almost certainly
1714 doesn't belong in the displaced copy. Otherwise, we assume
1715 the instruction has put control where it belongs, and leave
1716 it unrelocated. Goodness help us if there are PC-relative
1717 system calls. */
1718 if (amd64_syscall_p (insn_details, &insn_len)
1719 /* GDB can get control back after the insn after the syscall.
1720 Presumably this is a kernel bug. Fixup ensures it's a nop, we
1721 add one to the length for it. */
1722 && (pc < to || pc > (to + insn_len + 1)))
1723 displaced_debug_printf ("syscall changed %%rip; not relocating");
1724 else
1726 CORE_ADDR rip = pc - insn_offset;
1728 /* If we just stepped over a breakpoint insn, we don't backup
1729 the pc on purpose; this is to match behaviour without
1730 stepping. */
1732 regcache_write_pc (regs, rip);
1734 displaced_debug_printf ("relocated %%rip from %s to %s",
1735 paddress (gdbarch, pc),
1736 paddress (gdbarch, rip));
1740 /* If the instruction was PUSHFL, then the TF bit will be set in the
1741 pushed value, and should be cleared. We'll leave this for later,
1742 since GDB already messes up the TF flag when stepping over a
1743 pushfl. */
1745 /* If the instruction was a call, the return address now atop the
1746 stack is the address following the copied instruction. We need
1747 to make it the address following the original instruction. */
1748 if (completed_p && amd64_call_p (insn_details))
1750 ULONGEST rsp;
1751 ULONGEST retaddr;
1752 const ULONGEST retaddr_len = 8;
1754 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1755 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1756 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1757 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1759 displaced_debug_printf ("relocated return addr at %s to %s",
1760 paddress (gdbarch, rsp),
1761 paddress (gdbarch, retaddr));
1765 /* If the instruction INSN uses RIP-relative addressing, return the
1766 offset into the raw INSN where the displacement to be adjusted is
1767 found. Returns 0 if the instruction doesn't use RIP-relative
1768 addressing. */
1770 static int
1771 rip_relative_offset (struct amd64_insn *insn)
1773 if (insn->modrm_offset != -1)
1775 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1777 if ((modrm & 0xc7) == 0x05)
1779 /* The displacement is found right after the ModRM byte. */
1780 return insn->modrm_offset + 1;
1784 return 0;
1787 static void
1788 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1790 target_write_memory (*to, buf, len);
1791 *to += len;
1794 static void
1795 amd64_relocate_instruction (struct gdbarch *gdbarch,
1796 CORE_ADDR *to, CORE_ADDR oldloc)
1798 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1799 int len = gdbarch_max_insn_length (gdbarch);
1800 /* Extra space for sentinels. */
1801 int fixup_sentinel_space = len;
1802 gdb::byte_vector buf (len + fixup_sentinel_space);
1803 struct amd64_insn insn_details;
1804 int offset = 0;
1805 LONGEST rel32, newrel;
1806 gdb_byte *insn;
1807 int insn_length;
1809 read_memory (oldloc, buf.data (), len);
1811 /* Set up the sentinel space so we don't have to worry about running
1812 off the end of the buffer. An excessive number of leading prefixes
1813 could otherwise cause this. */
1814 memset (buf.data () + len, 0, fixup_sentinel_space);
1816 insn = buf.data ();
1817 amd64_get_insn_details (insn, &insn_details);
1819 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1821 /* Skip legacy instruction prefixes. */
1822 insn = amd64_skip_prefixes (insn);
1824 /* Adjust calls with 32-bit relative addresses as push/jump, with
1825 the address pushed being the location where the original call in
1826 the user program would return to. */
1827 if (insn[0] == 0xe8)
1829 gdb_byte push_buf[32];
1830 CORE_ADDR ret_addr;
1831 int i = 0;
1833 /* Where "ret" in the original code will return to. */
1834 ret_addr = oldloc + insn_length;
1836 /* If pushing an address higher than or equal to 0x80000000,
1837 avoid 'pushq', as that sign extends its 32-bit operand, which
1838 would be incorrect. */
1839 if (ret_addr <= 0x7fffffff)
1841 push_buf[0] = 0x68; /* pushq $... */
1842 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1843 i = 5;
1845 else
1847 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1848 push_buf[i++] = 0x83;
1849 push_buf[i++] = 0xec;
1850 push_buf[i++] = 0x08;
1852 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1853 push_buf[i++] = 0x04;
1854 push_buf[i++] = 0x24;
1855 store_unsigned_integer (&push_buf[i], 4, byte_order,
1856 ret_addr & 0xffffffff);
1857 i += 4;
1859 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1860 push_buf[i++] = 0x44;
1861 push_buf[i++] = 0x24;
1862 push_buf[i++] = 0x04;
1863 store_unsigned_integer (&push_buf[i], 4, byte_order,
1864 ret_addr >> 32);
1865 i += 4;
1867 gdb_assert (i <= sizeof (push_buf));
1868 /* Push the push. */
1869 append_insns (to, i, push_buf);
1871 /* Convert the relative call to a relative jump. */
1872 insn[0] = 0xe9;
1874 /* Adjust the destination offset. */
1875 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1876 newrel = (oldloc - *to) + rel32;
1877 store_signed_integer (insn + 1, 4, byte_order, newrel);
1879 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1880 hex_string (rel32), paddress (gdbarch, oldloc),
1881 hex_string (newrel), paddress (gdbarch, *to));
1883 /* Write the adjusted jump into its displaced location. */
1884 append_insns (to, 5, insn);
1885 return;
1888 offset = rip_relative_offset (&insn_details);
1889 if (!offset)
1891 /* Adjust jumps with 32-bit relative addresses. Calls are
1892 already handled above. */
1893 if (insn[0] == 0xe9)
1894 offset = 1;
1895 /* Adjust conditional jumps. */
1896 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1897 offset = 2;
1900 if (offset)
1902 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1903 newrel = (oldloc - *to) + rel32;
1904 store_signed_integer (insn + offset, 4, byte_order, newrel);
1905 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1906 hex_string (rel32), paddress (gdbarch, oldloc),
1907 hex_string (newrel), paddress (gdbarch, *to));
1910 /* Write the adjusted instruction into its displaced location. */
1911 append_insns (to, insn_length, buf.data ());
1915 /* The maximum number of saved registers. This should include %rip. */
1916 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1918 struct amd64_frame_cache
1920 /* Base address. */
1921 CORE_ADDR base;
1922 int base_p;
1923 CORE_ADDR sp_offset;
1924 CORE_ADDR pc;
1926 /* Saved registers. */
1927 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1928 CORE_ADDR saved_sp;
1929 int saved_sp_reg;
1931 /* Do we have a frame? */
1932 int frameless_p;
1935 /* Initialize a frame cache. */
1937 static void
1938 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1940 int i;
1942 /* Base address. */
1943 cache->base = 0;
1944 cache->base_p = 0;
1945 cache->sp_offset = -8;
1946 cache->pc = 0;
1948 /* Saved registers. We initialize these to -1 since zero is a valid
1949 offset (that's where %rbp is supposed to be stored).
1950 The values start out as being offsets, and are later converted to
1951 addresses (at which point -1 is interpreted as an address, still meaning
1952 "invalid"). */
1953 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1954 cache->saved_regs[i] = -1;
1955 cache->saved_sp = 0;
1956 cache->saved_sp_reg = -1;
1958 /* Frameless until proven otherwise. */
1959 cache->frameless_p = 1;
1962 /* Allocate and initialize a frame cache. */
1964 static struct amd64_frame_cache *
1965 amd64_alloc_frame_cache (void)
1967 struct amd64_frame_cache *cache;
1969 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1970 amd64_init_frame_cache (cache);
1971 return cache;
1974 /* GCC 4.4 and later, can put code in the prologue to realign the
1975 stack pointer. Check whether PC points to such code, and update
1976 CACHE accordingly. Return the first instruction after the code
1977 sequence or CURRENT_PC, whichever is smaller. If we don't
1978 recognize the code, return PC. */
1980 static CORE_ADDR
1981 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1982 struct amd64_frame_cache *cache)
1984 /* There are 2 code sequences to re-align stack before the frame
1985 gets set up:
1987 1. Use a caller-saved saved register:
1989 leaq 8(%rsp), %reg
1990 andq $-XXX, %rsp
1991 pushq -8(%reg)
1993 2. Use a callee-saved saved register:
1995 pushq %reg
1996 leaq 16(%rsp), %reg
1997 andq $-XXX, %rsp
1998 pushq -8(%reg)
2000 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2002 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2003 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2006 gdb_byte buf[18];
2007 int reg, r;
2008 int offset, offset_and;
2010 if (target_read_code (pc, buf, sizeof buf))
2011 return pc;
2013 /* Check caller-saved saved register. The first instruction has
2014 to be "leaq 8(%rsp), %reg". */
2015 if ((buf[0] & 0xfb) == 0x48
2016 && buf[1] == 0x8d
2017 && buf[3] == 0x24
2018 && buf[4] == 0x8)
2020 /* MOD must be binary 10 and R/M must be binary 100. */
2021 if ((buf[2] & 0xc7) != 0x44)
2022 return pc;
2024 /* REG has register number. */
2025 reg = (buf[2] >> 3) & 7;
2027 /* Check the REX.R bit. */
2028 if (buf[0] == 0x4c)
2029 reg += 8;
2031 offset = 5;
2033 else
2035 /* Check callee-saved saved register. The first instruction
2036 has to be "pushq %reg". */
2037 reg = 0;
2038 if ((buf[0] & 0xf8) == 0x50)
2039 offset = 0;
2040 else if ((buf[0] & 0xf6) == 0x40
2041 && (buf[1] & 0xf8) == 0x50)
2043 /* Check the REX.B bit. */
2044 if ((buf[0] & 1) != 0)
2045 reg = 8;
2047 offset = 1;
2049 else
2050 return pc;
2052 /* Get register. */
2053 reg += buf[offset] & 0x7;
2055 offset++;
2057 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2058 if ((buf[offset] & 0xfb) != 0x48
2059 || buf[offset + 1] != 0x8d
2060 || buf[offset + 3] != 0x24
2061 || buf[offset + 4] != 0x10)
2062 return pc;
2064 /* MOD must be binary 10 and R/M must be binary 100. */
2065 if ((buf[offset + 2] & 0xc7) != 0x44)
2066 return pc;
2068 /* REG has register number. */
2069 r = (buf[offset + 2] >> 3) & 7;
2071 /* Check the REX.R bit. */
2072 if (buf[offset] == 0x4c)
2073 r += 8;
2075 /* Registers in pushq and leaq have to be the same. */
2076 if (reg != r)
2077 return pc;
2079 offset += 5;
2082 /* Rigister can't be %rsp nor %rbp. */
2083 if (reg == 4 || reg == 5)
2084 return pc;
2086 /* The next instruction has to be "andq $-XXX, %rsp". */
2087 if (buf[offset] != 0x48
2088 || buf[offset + 2] != 0xe4
2089 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2090 return pc;
2092 offset_and = offset;
2093 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2095 /* The next instruction has to be "pushq -8(%reg)". */
2096 r = 0;
2097 if (buf[offset] == 0xff)
2098 offset++;
2099 else if ((buf[offset] & 0xf6) == 0x40
2100 && buf[offset + 1] == 0xff)
2102 /* Check the REX.B bit. */
2103 if ((buf[offset] & 0x1) != 0)
2104 r = 8;
2105 offset += 2;
2107 else
2108 return pc;
2110 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2111 01. */
2112 if (buf[offset + 1] != 0xf8
2113 || (buf[offset] & 0xf8) != 0x70)
2114 return pc;
2116 /* R/M has register. */
2117 r += buf[offset] & 7;
2119 /* Registers in leaq and pushq have to be the same. */
2120 if (reg != r)
2121 return pc;
2123 if (current_pc > pc + offset_and)
2124 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2126 return std::min (pc + offset + 2, current_pc);
2129 /* Similar to amd64_analyze_stack_align for x32. */
2131 static CORE_ADDR
2132 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2133 struct amd64_frame_cache *cache)
2135 /* There are 2 code sequences to re-align stack before the frame
2136 gets set up:
2138 1. Use a caller-saved saved register:
2140 leaq 8(%rsp), %reg
2141 andq $-XXX, %rsp
2142 pushq -8(%reg)
2146 [addr32] leal 8(%rsp), %reg
2147 andl $-XXX, %esp
2148 [addr32] pushq -8(%reg)
2150 2. Use a callee-saved saved register:
2152 pushq %reg
2153 leaq 16(%rsp), %reg
2154 andq $-XXX, %rsp
2155 pushq -8(%reg)
2159 pushq %reg
2160 [addr32] leal 16(%rsp), %reg
2161 andl $-XXX, %esp
2162 [addr32] pushq -8(%reg)
2164 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2166 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2167 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2169 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2171 0x83 0xe4 0xf0 andl $-16, %esp
2172 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2175 gdb_byte buf[19];
2176 int reg, r;
2177 int offset, offset_and;
2179 if (target_read_memory (pc, buf, sizeof buf))
2180 return pc;
2182 /* Skip optional addr32 prefix. */
2183 offset = buf[0] == 0x67 ? 1 : 0;
2185 /* Check caller-saved saved register. The first instruction has
2186 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2187 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2188 && buf[offset + 1] == 0x8d
2189 && buf[offset + 3] == 0x24
2190 && buf[offset + 4] == 0x8)
2192 /* MOD must be binary 10 and R/M must be binary 100. */
2193 if ((buf[offset + 2] & 0xc7) != 0x44)
2194 return pc;
2196 /* REG has register number. */
2197 reg = (buf[offset + 2] >> 3) & 7;
2199 /* Check the REX.R bit. */
2200 if ((buf[offset] & 0x4) != 0)
2201 reg += 8;
2203 offset += 5;
2205 else
2207 /* Check callee-saved saved register. The first instruction
2208 has to be "pushq %reg". */
2209 reg = 0;
2210 if ((buf[offset] & 0xf6) == 0x40
2211 && (buf[offset + 1] & 0xf8) == 0x50)
2213 /* Check the REX.B bit. */
2214 if ((buf[offset] & 1) != 0)
2215 reg = 8;
2217 offset += 1;
2219 else if ((buf[offset] & 0xf8) != 0x50)
2220 return pc;
2222 /* Get register. */
2223 reg += buf[offset] & 0x7;
2225 offset++;
2227 /* Skip optional addr32 prefix. */
2228 if (buf[offset] == 0x67)
2229 offset++;
2231 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2232 "leal 16(%rsp), %reg". */
2233 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2234 || buf[offset + 1] != 0x8d
2235 || buf[offset + 3] != 0x24
2236 || buf[offset + 4] != 0x10)
2237 return pc;
2239 /* MOD must be binary 10 and R/M must be binary 100. */
2240 if ((buf[offset + 2] & 0xc7) != 0x44)
2241 return pc;
2243 /* REG has register number. */
2244 r = (buf[offset + 2] >> 3) & 7;
2246 /* Check the REX.R bit. */
2247 if ((buf[offset] & 0x4) != 0)
2248 r += 8;
2250 /* Registers in pushq and leaq have to be the same. */
2251 if (reg != r)
2252 return pc;
2254 offset += 5;
2257 /* Rigister can't be %rsp nor %rbp. */
2258 if (reg == 4 || reg == 5)
2259 return pc;
2261 /* The next instruction may be "andq $-XXX, %rsp" or
2262 "andl $-XXX, %esp". */
2263 if (buf[offset] != 0x48)
2264 offset--;
2266 if (buf[offset + 2] != 0xe4
2267 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2268 return pc;
2270 offset_and = offset;
2271 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2273 /* Skip optional addr32 prefix. */
2274 if (buf[offset] == 0x67)
2275 offset++;
2277 /* The next instruction has to be "pushq -8(%reg)". */
2278 r = 0;
2279 if (buf[offset] == 0xff)
2280 offset++;
2281 else if ((buf[offset] & 0xf6) == 0x40
2282 && buf[offset + 1] == 0xff)
2284 /* Check the REX.B bit. */
2285 if ((buf[offset] & 0x1) != 0)
2286 r = 8;
2287 offset += 2;
2289 else
2290 return pc;
2292 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2293 01. */
2294 if (buf[offset + 1] != 0xf8
2295 || (buf[offset] & 0xf8) != 0x70)
2296 return pc;
2298 /* R/M has register. */
2299 r += buf[offset] & 7;
2301 /* Registers in leaq and pushq have to be the same. */
2302 if (reg != r)
2303 return pc;
2305 if (current_pc > pc + offset_and)
2306 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2308 return std::min (pc + offset + 2, current_pc);
2311 /* Do a limited analysis of the prologue at PC and update CACHE
2312 accordingly. Bail out early if CURRENT_PC is reached. Return the
2313 address where the analysis stopped.
2315 We will handle only functions beginning with:
2317 pushq %rbp 0x55
2318 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2320 or (for the X32 ABI):
2322 pushq %rbp 0x55
2323 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2325 The `endbr64` instruction can be found before these sequences, and will be
2326 skipped if found.
2328 Any function that doesn't start with one of these sequences will be
2329 assumed to have no prologue and thus no valid frame pointer in
2330 %rbp. */
2332 static CORE_ADDR
2333 amd64_analyze_prologue (struct gdbarch *gdbarch,
2334 CORE_ADDR pc, CORE_ADDR current_pc,
2335 struct amd64_frame_cache *cache)
2337 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2338 /* The `endbr64` instruction. */
2339 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2340 /* There are two variations of movq %rsp, %rbp. */
2341 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2342 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2343 /* Ditto for movl %esp, %ebp. */
2344 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2345 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2347 gdb_byte buf[3];
2348 gdb_byte op;
2350 if (current_pc <= pc)
2351 return current_pc;
2353 if (gdbarch_ptr_bit (gdbarch) == 32)
2354 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2355 else
2356 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2358 op = read_code_unsigned_integer (pc, 1, byte_order);
2360 /* Check for the `endbr64` instruction, skip it if found. */
2361 if (op == endbr64[0])
2363 read_code (pc + 1, buf, 3);
2365 if (memcmp (buf, &endbr64[1], 3) == 0)
2366 pc += 4;
2368 op = read_code_unsigned_integer (pc, 1, byte_order);
2371 if (current_pc <= pc)
2372 return current_pc;
2374 if (op == 0x55) /* pushq %rbp */
2376 /* Take into account that we've executed the `pushq %rbp' that
2377 starts this instruction sequence. */
2378 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2379 cache->sp_offset += 8;
2381 /* If that's all, return now. */
2382 if (current_pc <= pc + 1)
2383 return current_pc;
2385 read_code (pc + 1, buf, 3);
2387 /* Check for `movq %rsp, %rbp'. */
2388 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2389 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2391 /* OK, we actually have a frame. */
2392 cache->frameless_p = 0;
2393 return pc + 4;
2396 /* For X32, also check for `movl %esp, %ebp'. */
2397 if (gdbarch_ptr_bit (gdbarch) == 32)
2399 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2400 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2402 /* OK, we actually have a frame. */
2403 cache->frameless_p = 0;
2404 return pc + 3;
2408 return pc + 1;
2411 return pc;
2414 /* Work around false termination of prologue - GCC PR debug/48827.
2416 START_PC is the first instruction of a function, PC is its minimal already
2417 determined advanced address. Function returns PC if it has nothing to do.
2419 84 c0 test %al,%al
2420 74 23 je after
2421 <-- here is 0 lines advance - the false prologue end marker.
2422 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2423 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2424 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2425 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2426 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2427 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2428 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2429 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2430 after: */
2432 static CORE_ADDR
2433 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2435 struct symtab_and_line start_pc_sal, next_sal;
2436 gdb_byte buf[4 + 8 * 7];
2437 int offset, xmmreg;
2439 if (pc == start_pc)
2440 return pc;
2442 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2443 if (start_pc_sal.symtab == NULL
2444 || producer_is_gcc_ge_4 (start_pc_sal.symtab->compunit ()
2445 ->producer ()) < 6
2446 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2447 return pc;
2449 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2450 if (next_sal.line != start_pc_sal.line)
2451 return pc;
2453 /* START_PC can be from overlayed memory, ignored here. */
2454 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2455 return pc;
2457 /* test %al,%al */
2458 if (buf[0] != 0x84 || buf[1] != 0xc0)
2459 return pc;
2460 /* je AFTER */
2461 if (buf[2] != 0x74)
2462 return pc;
2464 offset = 4;
2465 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2467 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2468 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2469 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2470 return pc;
2472 /* 0b01?????? */
2473 if ((buf[offset + 2] & 0xc0) == 0x40)
2475 /* 8-bit displacement. */
2476 offset += 4;
2478 /* 0b10?????? */
2479 else if ((buf[offset + 2] & 0xc0) == 0x80)
2481 /* 32-bit displacement. */
2482 offset += 7;
2484 else
2485 return pc;
2488 /* je AFTER */
2489 if (offset - 4 != buf[3])
2490 return pc;
2492 return next_sal.end;
2495 /* Return PC of first real instruction. */
2497 static CORE_ADDR
2498 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2500 struct amd64_frame_cache cache;
2501 CORE_ADDR pc;
2502 CORE_ADDR func_addr;
2504 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2506 CORE_ADDR post_prologue_pc
2507 = skip_prologue_using_sal (gdbarch, func_addr);
2508 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2510 /* LLVM backend (Clang/Flang) always emits a line note before the
2511 prologue and another one after. We trust clang and newer Intel
2512 compilers to emit usable line notes. */
2513 if (post_prologue_pc
2514 && (cust != NULL
2515 && cust->producer () != nullptr
2516 && (producer_is_llvm (cust->producer ())
2517 || producer_is_icc_ge_19 (cust->producer ()))))
2518 return std::max (start_pc, post_prologue_pc);
2521 amd64_init_frame_cache (&cache);
2522 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2523 &cache);
2524 if (cache.frameless_p)
2525 return start_pc;
2527 return amd64_skip_xmm_prologue (pc, start_pc);
2531 /* Normal frames. */
2533 static void
2534 amd64_frame_cache_1 (const frame_info_ptr &this_frame,
2535 struct amd64_frame_cache *cache)
2537 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2538 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2539 gdb_byte buf[8];
2540 int i;
2542 cache->pc = get_frame_func (this_frame);
2543 if (cache->pc != 0)
2544 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2545 cache);
2547 if (cache->frameless_p)
2549 /* We didn't find a valid frame. If we're at the start of a
2550 function, or somewhere half-way its prologue, the function's
2551 frame probably hasn't been fully setup yet. Try to
2552 reconstruct the base address for the stack frame by looking
2553 at the stack pointer. For truly "frameless" functions this
2554 might work too. */
2556 if (cache->saved_sp_reg != -1)
2558 /* Stack pointer has been saved. */
2559 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2560 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2562 /* We're halfway aligning the stack. */
2563 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2564 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2566 /* This will be added back below. */
2567 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2569 else
2571 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2572 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2573 + cache->sp_offset;
2576 else
2578 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2579 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2582 /* Now that we have the base address for the stack frame we can
2583 calculate the value of %rsp in the calling frame. */
2584 cache->saved_sp = cache->base + 16;
2586 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2587 frame we find it at the same offset from the reconstructed base
2588 address. If we're halfway aligning the stack, %rip is handled
2589 differently (see above). */
2590 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2591 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2593 /* Adjust all the saved registers such that they contain addresses
2594 instead of offsets. */
2595 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2596 if (cache->saved_regs[i] != -1)
2597 cache->saved_regs[i] += cache->base;
2599 cache->base_p = 1;
2602 static struct amd64_frame_cache *
2603 amd64_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2605 struct amd64_frame_cache *cache;
2607 if (*this_cache)
2608 return (struct amd64_frame_cache *) *this_cache;
2610 cache = amd64_alloc_frame_cache ();
2611 *this_cache = cache;
2615 amd64_frame_cache_1 (this_frame, cache);
2617 catch (const gdb_exception_error &ex)
2619 if (ex.error != NOT_AVAILABLE_ERROR)
2620 throw;
2623 return cache;
2626 static enum unwind_stop_reason
2627 amd64_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2628 void **this_cache)
2630 struct amd64_frame_cache *cache =
2631 amd64_frame_cache (this_frame, this_cache);
2633 if (!cache->base_p)
2634 return UNWIND_UNAVAILABLE;
2636 /* This marks the outermost frame. */
2637 if (cache->base == 0)
2638 return UNWIND_OUTERMOST;
2640 return UNWIND_NO_REASON;
2643 static void
2644 amd64_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
2645 struct frame_id *this_id)
2647 struct amd64_frame_cache *cache =
2648 amd64_frame_cache (this_frame, this_cache);
2650 if (!cache->base_p)
2651 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2652 else if (cache->base == 0)
2654 /* This marks the outermost frame. */
2655 return;
2657 else
2658 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2661 static struct value *
2662 amd64_frame_prev_register (const frame_info_ptr &this_frame, void **this_cache,
2663 int regnum)
2665 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2666 struct amd64_frame_cache *cache =
2667 amd64_frame_cache (this_frame, this_cache);
2669 gdb_assert (regnum >= 0);
2671 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2672 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2674 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2675 return frame_unwind_got_memory (this_frame, regnum,
2676 cache->saved_regs[regnum]);
2678 return frame_unwind_got_register (this_frame, regnum, regnum);
2681 static const struct frame_unwind amd64_frame_unwind =
2683 "amd64 prologue",
2684 NORMAL_FRAME,
2685 amd64_frame_unwind_stop_reason,
2686 amd64_frame_this_id,
2687 amd64_frame_prev_register,
2688 NULL,
2689 default_frame_sniffer
2692 /* Generate a bytecode expression to get the value of the saved PC. */
2694 static void
2695 amd64_gen_return_address (struct gdbarch *gdbarch,
2696 struct agent_expr *ax, struct axs_value *value,
2697 CORE_ADDR scope)
2699 /* The following sequence assumes the traditional use of the base
2700 register. */
2701 ax_reg (ax, AMD64_RBP_REGNUM);
2702 ax_const_l (ax, 8);
2703 ax_simple (ax, aop_add);
2704 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2705 value->kind = axs_lvalue_memory;
2709 /* Signal trampolines. */
2711 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2712 64-bit variants. This would require using identical frame caches
2713 on both platforms. */
2715 static struct amd64_frame_cache *
2716 amd64_sigtramp_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2718 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2719 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
2720 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2721 struct amd64_frame_cache *cache;
2722 CORE_ADDR addr;
2723 gdb_byte buf[8];
2724 int i;
2726 if (*this_cache)
2727 return (struct amd64_frame_cache *) *this_cache;
2729 cache = amd64_alloc_frame_cache ();
2733 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2734 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2736 addr = tdep->sigcontext_addr (this_frame);
2737 gdb_assert (tdep->sc_reg_offset);
2738 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2739 for (i = 0; i < tdep->sc_num_regs; i++)
2740 if (tdep->sc_reg_offset[i] != -1)
2741 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2743 cache->base_p = 1;
2745 catch (const gdb_exception_error &ex)
2747 if (ex.error != NOT_AVAILABLE_ERROR)
2748 throw;
2751 *this_cache = cache;
2752 return cache;
2755 static enum unwind_stop_reason
2756 amd64_sigtramp_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2757 void **this_cache)
2759 struct amd64_frame_cache *cache =
2760 amd64_sigtramp_frame_cache (this_frame, this_cache);
2762 if (!cache->base_p)
2763 return UNWIND_UNAVAILABLE;
2765 return UNWIND_NO_REASON;
2768 static void
2769 amd64_sigtramp_frame_this_id (const frame_info_ptr &this_frame,
2770 void **this_cache, struct frame_id *this_id)
2772 struct amd64_frame_cache *cache =
2773 amd64_sigtramp_frame_cache (this_frame, this_cache);
2775 if (!cache->base_p)
2776 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2777 else if (cache->base == 0)
2779 /* This marks the outermost frame. */
2780 return;
2782 else
2783 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2786 static struct value *
2787 amd64_sigtramp_frame_prev_register (const frame_info_ptr &this_frame,
2788 void **this_cache, int regnum)
2790 /* Make sure we've initialized the cache. */
2791 amd64_sigtramp_frame_cache (this_frame, this_cache);
2793 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2796 static int
2797 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2798 const frame_info_ptr &this_frame,
2799 void **this_cache)
2801 gdbarch *arch = get_frame_arch (this_frame);
2802 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (arch);
2804 /* We shouldn't even bother if we don't have a sigcontext_addr
2805 handler. */
2806 if (tdep->sigcontext_addr == NULL)
2807 return 0;
2809 if (tdep->sigtramp_p != NULL)
2811 if (tdep->sigtramp_p (this_frame))
2812 return 1;
2815 if (tdep->sigtramp_start != 0)
2817 CORE_ADDR pc = get_frame_pc (this_frame);
2819 gdb_assert (tdep->sigtramp_end != 0);
2820 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2821 return 1;
2824 return 0;
2827 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2829 "amd64 sigtramp",
2830 SIGTRAMP_FRAME,
2831 amd64_sigtramp_frame_unwind_stop_reason,
2832 amd64_sigtramp_frame_this_id,
2833 amd64_sigtramp_frame_prev_register,
2834 NULL,
2835 amd64_sigtramp_frame_sniffer
2839 static CORE_ADDR
2840 amd64_frame_base_address (const frame_info_ptr &this_frame, void **this_cache)
2842 struct amd64_frame_cache *cache =
2843 amd64_frame_cache (this_frame, this_cache);
2845 return cache->base;
2848 static const struct frame_base amd64_frame_base =
2850 &amd64_frame_unwind,
2851 amd64_frame_base_address,
2852 amd64_frame_base_address,
2853 amd64_frame_base_address
2856 /* Implement core of the stack_frame_destroyed_p gdbarch method. */
2858 static int
2859 amd64_stack_frame_destroyed_p_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
2861 gdb_byte insn;
2863 std::optional<CORE_ADDR> epilogue = find_epilogue_using_linetable (pc);
2865 /* PC is pointing at the next instruction to be executed. If it is
2866 equal to the epilogue start, it means we're right before it starts,
2867 so the stack is still valid. */
2868 if (epilogue)
2869 return pc > epilogue;
2871 if (target_read_memory (pc, &insn, 1))
2872 return 0; /* Can't read memory at pc. */
2874 if (insn != 0xc3) /* 'ret' instruction. */
2875 return 0;
2877 return 1;
2880 /* Normal frames, but in a function epilogue. */
2882 /* Implement the stack_frame_destroyed_p gdbarch method.
2884 The epilogue is defined here as the 'ret' instruction, which will
2885 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2886 the function's stack frame. */
2888 static int
2889 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2891 struct compunit_symtab *cust = find_pc_compunit_symtab (pc);
2893 if (cust != nullptr && cust->producer () != nullptr
2894 && producer_is_llvm (cust->producer ()))
2895 return amd64_stack_frame_destroyed_p_1 (gdbarch, pc);
2897 return 0;
2900 static int
2901 amd64_epilogue_frame_sniffer_1 (const struct frame_unwind *self,
2902 const frame_info_ptr &this_frame,
2903 void **this_prologue_cache, bool override_p)
2905 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2906 CORE_ADDR pc = get_frame_pc (this_frame);
2908 if (frame_relative_level (this_frame) != 0)
2909 /* We're not in the inner frame, so assume we're not in an epilogue. */
2910 return 0;
2912 bool unwind_valid_p
2913 = compunit_epilogue_unwind_valid (find_pc_compunit_symtab (pc));
2914 if (override_p)
2916 if (unwind_valid_p)
2917 /* Don't override the symtab unwinders, skip
2918 "amd64 epilogue override". */
2919 return 0;
2921 else
2923 if (!unwind_valid_p)
2924 /* "amd64 epilogue override" unwinder already ran, skip
2925 "amd64 epilogue". */
2926 return 0;
2929 /* Check whether we're in an epilogue. */
2930 return amd64_stack_frame_destroyed_p_1 (gdbarch, pc);
2933 static int
2934 amd64_epilogue_override_frame_sniffer (const struct frame_unwind *self,
2935 const frame_info_ptr &this_frame,
2936 void **this_prologue_cache)
2938 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2939 true);
2942 static int
2943 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2944 const frame_info_ptr &this_frame,
2945 void **this_prologue_cache)
2947 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2948 false);
2951 static struct amd64_frame_cache *
2952 amd64_epilogue_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
2954 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2955 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2956 struct amd64_frame_cache *cache;
2957 gdb_byte buf[8];
2959 if (*this_cache)
2960 return (struct amd64_frame_cache *) *this_cache;
2962 cache = amd64_alloc_frame_cache ();
2963 *this_cache = cache;
2967 /* Cache base will be %rsp plus cache->sp_offset (-8). */
2968 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2969 cache->base = extract_unsigned_integer (buf, 8,
2970 byte_order) + cache->sp_offset;
2972 /* Cache pc will be the frame func. */
2973 cache->pc = get_frame_func (this_frame);
2975 /* The previous value of %rsp is cache->base plus 16. */
2976 cache->saved_sp = cache->base + 16;
2978 /* The saved %rip will be at cache->base plus 8. */
2979 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2981 cache->base_p = 1;
2983 catch (const gdb_exception_error &ex)
2985 if (ex.error != NOT_AVAILABLE_ERROR)
2986 throw;
2989 return cache;
2992 static enum unwind_stop_reason
2993 amd64_epilogue_frame_unwind_stop_reason (const frame_info_ptr &this_frame,
2994 void **this_cache)
2996 struct amd64_frame_cache *cache
2997 = amd64_epilogue_frame_cache (this_frame, this_cache);
2999 if (!cache->base_p)
3000 return UNWIND_UNAVAILABLE;
3002 return UNWIND_NO_REASON;
3005 static void
3006 amd64_epilogue_frame_this_id (const frame_info_ptr &this_frame,
3007 void **this_cache,
3008 struct frame_id *this_id)
3010 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
3011 this_cache);
3013 if (!cache->base_p)
3014 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
3015 else
3016 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
3019 static const struct frame_unwind amd64_epilogue_override_frame_unwind =
3021 "amd64 epilogue override",
3022 NORMAL_FRAME,
3023 amd64_epilogue_frame_unwind_stop_reason,
3024 amd64_epilogue_frame_this_id,
3025 amd64_frame_prev_register,
3026 NULL,
3027 amd64_epilogue_override_frame_sniffer
3030 static const struct frame_unwind amd64_epilogue_frame_unwind =
3032 "amd64 epilogue",
3033 NORMAL_FRAME,
3034 amd64_epilogue_frame_unwind_stop_reason,
3035 amd64_epilogue_frame_this_id,
3036 amd64_frame_prev_register,
3037 NULL,
3038 amd64_epilogue_frame_sniffer
3041 static struct frame_id
3042 amd64_dummy_id (struct gdbarch *gdbarch, const frame_info_ptr &this_frame)
3044 CORE_ADDR fp;
3046 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
3048 return frame_id_build (fp + 16, get_frame_pc (this_frame));
3051 /* 16 byte align the SP per frame requirements. */
3053 static CORE_ADDR
3054 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3056 return sp & -(CORE_ADDR)16;
3060 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3061 in the floating-point register set REGSET to register cache
3062 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3064 static void
3065 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3066 int regnum, const void *fpregs, size_t len)
3068 struct gdbarch *gdbarch = regcache->arch ();
3069 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3071 gdb_assert (len >= tdep->sizeof_fpregset);
3072 amd64_supply_fxsave (regcache, regnum, fpregs);
3075 /* Collect register REGNUM from the register cache REGCACHE and store
3076 it in the buffer specified by FPREGS and LEN as described by the
3077 floating-point register set REGSET. If REGNUM is -1, do this for
3078 all registers in REGSET. */
3080 static void
3081 amd64_collect_fpregset (const struct regset *regset,
3082 const struct regcache *regcache,
3083 int regnum, void *fpregs, size_t len)
3085 struct gdbarch *gdbarch = regcache->arch ();
3086 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3088 gdb_assert (len >= tdep->sizeof_fpregset);
3089 amd64_collect_fxsave (regcache, regnum, fpregs);
3092 const struct regset amd64_fpregset =
3094 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3098 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3099 %rdi. We expect its value to be a pointer to the jmp_buf structure
3100 from which we extract the address that we will land at. This
3101 address is copied into PC. This routine returns non-zero on
3102 success. */
3104 static int
3105 amd64_get_longjmp_target (const frame_info_ptr &frame, CORE_ADDR *pc)
3107 gdb_byte buf[8];
3108 CORE_ADDR jb_addr;
3109 struct gdbarch *gdbarch = get_frame_arch (frame);
3110 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3111 int jb_pc_offset = tdep->jb_pc_offset;
3112 int len = builtin_type (gdbarch)->builtin_func_ptr->length ();
3114 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3115 longjmp will land. */
3116 if (jb_pc_offset == -1)
3117 return 0;
3119 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3120 jb_addr= extract_typed_address
3121 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3122 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3123 return 0;
3125 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3127 return 1;
3130 static const int amd64_record_regmap[] =
3132 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3133 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3134 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3135 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3136 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3137 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3140 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3142 static bool
3143 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3145 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3146 AMD64_RAX_REGNUM,
3147 AMD64_RIP_REGNUM);
3150 void
3151 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3152 const target_desc *default_tdesc)
3154 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3155 const struct target_desc *tdesc = info.target_desc;
3156 static const char *const stap_integer_prefixes[] = { "$", NULL };
3157 static const char *const stap_register_prefixes[] = { "%", NULL };
3158 static const char *const stap_register_indirection_prefixes[] = { "(",
3159 NULL };
3160 static const char *const stap_register_indirection_suffixes[] = { ")",
3161 NULL };
3163 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3164 floating-point registers. */
3165 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3166 tdep->fpregset = &amd64_fpregset;
3168 if (! tdesc_has_registers (tdesc))
3169 tdesc = default_tdesc;
3170 tdep->tdesc = tdesc;
3172 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3173 tdep->register_names = amd64_register_names;
3175 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3177 tdep->zmmh_register_names = amd64_zmmh_names;
3178 tdep->k_register_names = amd64_k_names;
3179 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3180 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3182 tdep->num_zmm_regs = 32;
3183 tdep->num_xmm_avx512_regs = 16;
3184 tdep->num_ymm_avx512_regs = 16;
3186 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3187 tdep->k0_regnum = AMD64_K0_REGNUM;
3188 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3189 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3192 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3194 tdep->ymmh_register_names = amd64_ymmh_names;
3195 tdep->num_ymm_regs = 16;
3196 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3199 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3201 tdep->mpx_register_names = amd64_mpx_names;
3202 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3203 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3206 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3208 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3211 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3213 tdep->pkeys_register_names = amd64_pkeys_names;
3214 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3215 tdep->num_pkeys_regs = 1;
3218 tdep->num_byte_regs = 20;
3219 tdep->num_word_regs = 16;
3220 tdep->num_dword_regs = 16;
3221 /* Avoid wiring in the MMX registers for now. */
3222 tdep->num_mmx_regs = 0;
3224 set_gdbarch_pseudo_register_read_value (gdbarch,
3225 amd64_pseudo_register_read_value);
3226 set_gdbarch_pseudo_register_write (gdbarch, amd64_pseudo_register_write);
3227 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3228 amd64_ax_pseudo_register_collect);
3230 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3232 /* AMD64 has an FPU and 16 SSE registers. */
3233 tdep->st0_regnum = AMD64_ST0_REGNUM;
3234 tdep->num_xmm_regs = 16;
3236 /* This is what all the fuss is about. */
3237 set_gdbarch_long_bit (gdbarch, 64);
3238 set_gdbarch_long_long_bit (gdbarch, 64);
3239 set_gdbarch_ptr_bit (gdbarch, 64);
3241 /* In contrast to the i386, on AMD64 a `long double' actually takes
3242 up 128 bits, even though it's still based on the i387 extended
3243 floating-point format which has only 80 significant bits. */
3244 set_gdbarch_long_double_bit (gdbarch, 128);
3246 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3248 /* Register numbers of various important registers. */
3249 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3250 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3251 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3252 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3254 /* The "default" register numbering scheme for AMD64 is referred to
3255 as the "DWARF Register Number Mapping" in the System V psABI.
3256 The preferred debugging format for all known AMD64 targets is
3257 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3258 DWARF-1), but we provide the same mapping just in case. This
3259 mapping is also used for stabs, which GCC does support. */
3260 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3261 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3263 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3264 be in use on any of the supported AMD64 targets. */
3266 /* Call dummy code. */
3267 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3268 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3269 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3271 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3272 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3273 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3275 set_gdbarch_return_value_as_value (gdbarch, amd64_return_value);
3277 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3279 tdep->record_regmap = amd64_record_regmap;
3281 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3283 /* Hook the function epilogue frame unwinder. This unwinder is
3284 appended to the list first, so that it supersedes the other
3285 unwinders in function epilogues. */
3286 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_override_frame_unwind);
3288 frame_unwind_append_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3290 /* Hook the prologue-based frame unwinders. */
3291 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3292 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3293 frame_base_set_default (gdbarch, &amd64_frame_base);
3295 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3297 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3299 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3301 set_gdbarch_stack_frame_destroyed_p (gdbarch, amd64_stack_frame_destroyed_p);
3303 /* SystemTap variables and functions. */
3304 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3305 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3306 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3307 stap_register_indirection_prefixes);
3308 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3309 stap_register_indirection_suffixes);
3310 set_gdbarch_stap_is_single_operand (gdbarch,
3311 i386_stap_is_single_operand);
3312 set_gdbarch_stap_parse_special_token (gdbarch,
3313 i386_stap_parse_special_token);
3314 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3315 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3316 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3318 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3319 amd64_in_indirect_branch_thunk);
3321 register_amd64_ravenscar_ops (gdbarch);
3324 /* Initialize ARCH for x86-64, no osabi. */
3326 static void
3327 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3329 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3330 true));
3333 static struct type *
3334 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3336 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3338 switch (regnum - tdep->eax_regnum)
3340 case AMD64_RBP_REGNUM: /* %ebp */
3341 case AMD64_RSP_REGNUM: /* %esp */
3342 return builtin_type (gdbarch)->builtin_data_ptr;
3343 case AMD64_RIP_REGNUM: /* %eip */
3344 return builtin_type (gdbarch)->builtin_func_ptr;
3347 return i386_pseudo_register_type (gdbarch, regnum);
3350 void
3351 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3352 const target_desc *default_tdesc)
3354 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3356 amd64_init_abi (info, gdbarch, default_tdesc);
3358 tdep->num_dword_regs = 17;
3359 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3361 set_gdbarch_long_bit (gdbarch, 32);
3362 set_gdbarch_ptr_bit (gdbarch, 32);
3365 /* Initialize ARCH for x64-32, no osabi. */
3367 static void
3368 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3370 amd64_x32_init_abi (info, arch,
3371 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3374 /* Return the target description for a specified XSAVE feature mask. */
3376 const struct target_desc *
3377 amd64_target_description (uint64_t xcr0, bool segments)
3379 static target_desc *amd64_tdescs \
3380 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3381 target_desc **tdesc;
3383 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3384 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3385 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3386 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3387 [segments ? 1 : 0];
3389 if (*tdesc == NULL)
3390 *tdesc = amd64_create_target_description (xcr0, false, false,
3391 segments);
3393 return *tdesc;
3396 void _initialize_amd64_tdep ();
3397 void
3398 _initialize_amd64_tdep ()
3400 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3401 amd64_none_init_abi);
3402 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3403 amd64_x32_none_init_abi);
3407 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3408 sense that the instruction pointer and data pointer are simply
3409 64-bit offsets into the code segment and the data segment instead
3410 of a selector offset pair. The functions below store the upper 32
3411 bits of these pointers (instead of just the 16-bits of the segment
3412 selector). */
3414 /* Fill register REGNUM in REGCACHE with the appropriate
3415 floating-point or SSE register value from *FXSAVE. If REGNUM is
3416 -1, do this for all registers. This function masks off any of the
3417 reserved bits in *FXSAVE. */
3419 void
3420 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3421 const void *fxsave)
3423 struct gdbarch *gdbarch = regcache->arch ();
3424 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3426 i387_supply_fxsave (regcache, regnum, fxsave);
3428 if (fxsave
3429 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3431 const gdb_byte *regs = (const gdb_byte *) fxsave;
3433 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3434 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3435 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3436 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3440 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3442 void
3443 amd64_supply_xsave (struct regcache *regcache, int regnum,
3444 const void *xsave)
3446 struct gdbarch *gdbarch = regcache->arch ();
3447 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3449 i387_supply_xsave (regcache, regnum, xsave);
3451 if (xsave
3452 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3454 const gdb_byte *regs = (const gdb_byte *) xsave;
3455 ULONGEST clear_bv;
3457 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3459 /* If the FISEG and FOSEG registers have not been initialised yet
3460 (their CLEAR_BV bit is set) then their default values of zero will
3461 have already been setup by I387_SUPPLY_XSAVE. */
3462 if (!(clear_bv & X86_XSTATE_X87))
3464 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3465 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3466 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3467 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3472 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3473 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3474 all registers. This function doesn't touch any of the reserved
3475 bits in *FXSAVE. */
3477 void
3478 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3479 void *fxsave)
3481 struct gdbarch *gdbarch = regcache->arch ();
3482 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3483 gdb_byte *regs = (gdb_byte *) fxsave;
3485 i387_collect_fxsave (regcache, regnum, fxsave);
3487 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3489 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3490 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3491 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3492 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3496 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3498 void
3499 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3500 void *xsave, int gcore)
3502 struct gdbarch *gdbarch = regcache->arch ();
3503 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3504 gdb_byte *regs = (gdb_byte *) xsave;
3506 i387_collect_xsave (regcache, regnum, xsave, gcore);
3508 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3510 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3511 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3512 regs + 12);
3513 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3514 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3515 regs + 20);