Updated Swedish translation for the opcodes directory
[binutils-gdb.git] / gdb / amd64-tdep.c
blobd37497a23458132e03b8479e72aad8c83ae4d316
1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "language.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52 #include "amd64-ravenscar-thread.h"
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
61 /* Register information. */
63 static const char * const amd64_register_names[] =
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
78 "mxcsr",
81 static const char * const amd64_ymm_names[] =
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
89 static const char * const amd64_ymm_avx512_names[] =
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
97 static const char * const amd64_ymmh_names[] =
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 static const char * const amd64_ymmh_avx512_names[] =
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 static const char * const amd64_mpx_names[] =
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 static const char * const amd64_k_names[] =
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
124 static const char * const amd64_zmmh_names[] =
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 static const char * const amd64_zmm_names[] =
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
148 static const char * const amd64_xmm_avx512_names[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
155 static const char * const amd64_pkeys_names[] = {
156 "pkru"
159 /* DWARF Register Number Mapping as defined in the System V psABI,
160 section 3.6. */
162 static int amd64_dwarf_regmap[] =
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
166 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
167 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
169 /* Frame Pointer Register RBP. */
170 AMD64_RBP_REGNUM,
172 /* Stack Pointer Register RSP. */
173 AMD64_RSP_REGNUM,
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM, /* %r8 */
177 AMD64_R9_REGNUM, /* %r9 */
178 AMD64_R10_REGNUM, /* %r10 */
179 AMD64_R11_REGNUM, /* %r11 */
180 AMD64_R12_REGNUM, /* %r12 */
181 AMD64_R13_REGNUM, /* %r13 */
182 AMD64_R14_REGNUM, /* %r14 */
183 AMD64_R15_REGNUM, /* %r15 */
185 /* Return Address RA. Mapped to RIP. */
186 AMD64_RIP_REGNUM,
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
190 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
191 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
192 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
196 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
197 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
198 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
202 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
203 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
204 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
212 /* Control and Status Flags Register. */
213 AMD64_EFLAGS_REGNUM,
215 /* Selector Registers. */
216 AMD64_ES_REGNUM,
217 AMD64_CS_REGNUM,
218 AMD64_SS_REGNUM,
219 AMD64_DS_REGNUM,
220 AMD64_FS_REGNUM,
221 AMD64_GS_REGNUM,
225 /* Segment Base Address Registers. */
231 /* Special Selector Registers. */
235 /* Floating Point Control Registers. */
236 AMD64_MXCSR_REGNUM,
237 AMD64_FCTRL_REGNUM,
238 AMD64_FSTAT_REGNUM
241 static const int amd64_dwarf_regmap_len =
242 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
244 /* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
247 static int
248 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
250 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
251 int ymm0_regnum = tdep->ymm0_regnum;
252 int regnum = -1;
254 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
255 regnum = amd64_dwarf_regmap[reg];
257 if (ymm0_regnum >= 0
258 && i386_xmm_regnum_p (gdbarch, regnum))
259 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
261 return regnum;
264 /* Map architectural register numbers to gdb register numbers. */
266 static const int amd64_arch_regmap[16] =
268 AMD64_RAX_REGNUM, /* %rax */
269 AMD64_RCX_REGNUM, /* %rcx */
270 AMD64_RDX_REGNUM, /* %rdx */
271 AMD64_RBX_REGNUM, /* %rbx */
272 AMD64_RSP_REGNUM, /* %rsp */
273 AMD64_RBP_REGNUM, /* %rbp */
274 AMD64_RSI_REGNUM, /* %rsi */
275 AMD64_RDI_REGNUM, /* %rdi */
276 AMD64_R8_REGNUM, /* %r8 */
277 AMD64_R9_REGNUM, /* %r9 */
278 AMD64_R10_REGNUM, /* %r10 */
279 AMD64_R11_REGNUM, /* %r11 */
280 AMD64_R12_REGNUM, /* %r12 */
281 AMD64_R13_REGNUM, /* %r13 */
282 AMD64_R14_REGNUM, /* %r14 */
283 AMD64_R15_REGNUM /* %r15 */
286 static const int amd64_arch_regmap_len =
287 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
289 /* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
292 static int
293 amd64_arch_reg_to_regnum (int reg)
295 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
297 return amd64_arch_regmap[reg];
300 /* Register names for byte pseudo-registers. */
302 static const char * const amd64_byte_names[] =
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
309 /* Number of lower byte registers. */
310 #define AMD64_NUM_LOWER_BYTE_REGS 16
312 /* Register names for word pseudo-registers. */
314 static const char * const amd64_word_names[] =
316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
320 /* Register names for dword pseudo-registers. */
322 static const char * const amd64_dword_names[] =
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
326 "eip"
329 /* Return the name of register REGNUM. */
331 static const char *
332 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
334 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
335 if (i386_byte_regnum_p (gdbarch, regnum))
336 return amd64_byte_names[regnum - tdep->al_regnum];
337 else if (i386_zmm_regnum_p (gdbarch, regnum))
338 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
339 else if (i386_ymm_regnum_p (gdbarch, regnum))
340 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
341 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
342 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
343 else if (i386_word_regnum_p (gdbarch, regnum))
344 return amd64_word_names[regnum - tdep->ax_regnum];
345 else if (i386_dword_regnum_p (gdbarch, regnum))
346 return amd64_dword_names[regnum - tdep->eax_regnum];
347 else
348 return i386_pseudo_register_name (gdbarch, regnum);
351 static struct value *
352 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
353 readable_regcache *regcache,
354 int regnum)
356 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
358 value *result_value = value::allocate (register_type (gdbarch, regnum));
359 result_value->set_lval (lval_register);
360 VALUE_REGNUM (result_value) = regnum;
361 gdb_byte *buf = result_value->contents_raw ().data ();
363 if (i386_byte_regnum_p (gdbarch, regnum))
365 int gpnum = regnum - tdep->al_regnum;
367 /* Extract (always little endian). */
368 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
370 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
371 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
373 /* Special handling for AH, BH, CH, DH. */
374 register_status status = regcache->raw_read (gpnum, raw_buf);
375 if (status == REG_VALID)
376 memcpy (buf, raw_buf + 1, 1);
377 else
378 result_value->mark_bytes_unavailable (0,
379 result_value->type ()->length ());
381 else
383 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
384 register_status status = regcache->raw_read (gpnum, raw_buf);
385 if (status == REG_VALID)
386 memcpy (buf, raw_buf, 1);
387 else
388 result_value->mark_bytes_unavailable (0,
389 result_value->type ()->length ());
392 else if (i386_dword_regnum_p (gdbarch, regnum))
394 int gpnum = regnum - tdep->eax_regnum;
395 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
396 /* Extract (always little endian). */
397 register_status status = regcache->raw_read (gpnum, raw_buf);
398 if (status == REG_VALID)
399 memcpy (buf, raw_buf, 4);
400 else
401 result_value->mark_bytes_unavailable (0,
402 result_value->type ()->length ());
404 else
405 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
406 result_value);
408 return result_value;
411 static void
412 amd64_pseudo_register_write (struct gdbarch *gdbarch,
413 struct regcache *regcache,
414 int regnum, const gdb_byte *buf)
416 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
418 if (i386_byte_regnum_p (gdbarch, regnum))
420 int gpnum = regnum - tdep->al_regnum;
422 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
424 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
425 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
427 /* Read ... AH, BH, CH, DH. */
428 regcache->raw_read (gpnum, raw_buf);
429 /* ... Modify ... (always little endian). */
430 memcpy (raw_buf + 1, buf, 1);
431 /* ... Write. */
432 regcache->raw_write (gpnum, raw_buf);
434 else
436 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
438 /* Read ... */
439 regcache->raw_read (gpnum, raw_buf);
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf, buf, 1);
442 /* ... Write. */
443 regcache->raw_write (gpnum, raw_buf);
446 else if (i386_dword_regnum_p (gdbarch, regnum))
448 int gpnum = regnum - tdep->eax_regnum;
449 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
451 /* Read ... */
452 regcache->raw_read (gpnum, raw_buf);
453 /* ... Modify ... (always little endian). */
454 memcpy (raw_buf, buf, 4);
455 /* ... Write. */
456 regcache->raw_write (gpnum, raw_buf);
458 else
459 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
462 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
464 static int
465 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
466 struct agent_expr *ax, int regnum)
468 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
470 if (i386_byte_regnum_p (gdbarch, regnum))
472 int gpnum = regnum - tdep->al_regnum;
474 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
475 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
476 else
477 ax_reg_mask (ax, gpnum);
478 return 0;
480 else if (i386_dword_regnum_p (gdbarch, regnum))
482 int gpnum = regnum - tdep->eax_regnum;
484 ax_reg_mask (ax, gpnum);
485 return 0;
487 else
488 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
493 /* Register classes as defined in the psABI. */
495 enum amd64_reg_class
497 AMD64_INTEGER,
498 AMD64_SSE,
499 AMD64_SSEUP,
500 AMD64_X87,
501 AMD64_X87UP,
502 AMD64_COMPLEX_X87,
503 AMD64_NO_CLASS,
504 AMD64_MEMORY
507 /* Return the union class of CLASS1 and CLASS2. See the psABI for
508 details. */
510 static enum amd64_reg_class
511 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
513 /* Rule (a): If both classes are equal, this is the resulting class. */
514 if (class1 == class2)
515 return class1;
517 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
518 is the other class. */
519 if (class1 == AMD64_NO_CLASS)
520 return class2;
521 if (class2 == AMD64_NO_CLASS)
522 return class1;
524 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
525 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
526 return AMD64_MEMORY;
528 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
529 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
530 return AMD64_INTEGER;
532 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
533 MEMORY is used as class. */
534 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
535 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
536 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
537 return AMD64_MEMORY;
539 /* Rule (f): Otherwise class SSE is used. */
540 return AMD64_SSE;
543 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
545 /* Return true if TYPE is a structure or union with unaligned fields. */
547 static bool
548 amd64_has_unaligned_fields (struct type *type)
550 if (type->code () == TYPE_CODE_STRUCT
551 || type->code () == TYPE_CODE_UNION)
553 for (int i = 0; i < type->num_fields (); i++)
555 struct type *subtype = check_typedef (type->field (i).type ());
557 /* Ignore static fields, empty fields (for example nested
558 empty structures), and bitfields (these are handled by
559 the caller). */
560 if (type->field (i).is_static ()
561 || (TYPE_FIELD_BITSIZE (type, i) == 0
562 && subtype->length () == 0)
563 || TYPE_FIELD_PACKED (type, i))
564 continue;
566 int bitpos = type->field (i).loc_bitpos ();
568 if (bitpos % 8 != 0)
569 return true;
571 int align = type_align (subtype);
572 if (align == 0)
573 error (_("could not determine alignment of type"));
575 int bytepos = bitpos / 8;
576 if (bytepos % align != 0)
577 return true;
579 if (amd64_has_unaligned_fields (subtype))
580 return true;
584 return false;
587 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
588 structures and union types, and store the result in THECLASS. */
590 static void
591 amd64_classify_aggregate_field (struct type *type, int i,
592 enum amd64_reg_class theclass[2],
593 unsigned int bitoffset)
595 struct type *subtype = check_typedef (type->field (i).type ());
596 enum amd64_reg_class subclass[2];
597 int bitsize = TYPE_FIELD_BITSIZE (type, i);
599 if (bitsize == 0)
600 bitsize = subtype->length () * 8;
602 /* Ignore static fields, or empty fields, for example nested
603 empty structures.*/
604 if (type->field (i).is_static () || bitsize == 0)
605 return;
607 int bitpos = bitoffset + type->field (i).loc_bitpos ();
608 int pos = bitpos / 64;
609 int endpos = (bitpos + bitsize - 1) / 64;
611 if (subtype->code () == TYPE_CODE_STRUCT
612 || subtype->code () == TYPE_CODE_UNION)
614 /* Each field of an object is classified recursively. */
615 int j;
616 for (j = 0; j < subtype->num_fields (); j++)
617 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
618 return;
621 gdb_assert (pos == 0 || pos == 1);
623 amd64_classify (subtype, subclass);
624 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
625 if (bitsize <= 64 && pos == 0 && endpos == 1)
626 /* This is a bit of an odd case: We have a field that would
627 normally fit in one of the two eightbytes, except that
628 it is placed in a way that this field straddles them.
629 This has been seen with a structure containing an array.
631 The ABI is a bit unclear in this case, but we assume that
632 this field's class (stored in subclass[0]) must also be merged
633 into class[1]. In other words, our field has a piece stored
634 in the second eight-byte, and thus its class applies to
635 the second eight-byte as well.
637 In the case where the field length exceeds 8 bytes,
638 it should not be necessary to merge the field class
639 into class[1]. As LEN > 8, subclass[1] is necessarily
640 different from AMD64_NO_CLASS. If subclass[1] is equal
641 to subclass[0], then the normal class[1]/subclass[1]
642 merging will take care of everything. For subclass[1]
643 to be different from subclass[0], I can only see the case
644 where we have a SSE/SSEUP or X87/X87UP pair, which both
645 use up all 16 bytes of the aggregate, and are already
646 handled just fine (because each portion sits on its own
647 8-byte). */
648 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
649 if (pos == 0)
650 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
653 /* Classify TYPE according to the rules for aggregate (structures and
654 arrays) and union types, and store the result in CLASS. */
656 static void
657 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
659 /* 1. If the size of an object is larger than two times eight bytes, or
660 it is a non-trivial C++ object, or it has unaligned fields, then it
661 has class memory.
663 It is important that the trivially_copyable check is before the
664 unaligned fields check, as C++ classes with virtual base classes
665 will have fields (for the virtual base classes) with non-constant
666 loc_bitpos attributes, which will cause an assert to trigger within
667 the unaligned field check. As classes with virtual bases are not
668 trivially copyable, checking that first avoids this problem. */
669 if (TYPE_HAS_DYNAMIC_LENGTH (type)
670 || type->length () > 16
671 || !language_pass_by_reference (type).trivially_copyable
672 || amd64_has_unaligned_fields (type))
674 theclass[0] = theclass[1] = AMD64_MEMORY;
675 return;
678 /* 2. Both eightbytes get initialized to class NO_CLASS. */
679 theclass[0] = theclass[1] = AMD64_NO_CLASS;
681 /* 3. Each field of an object is classified recursively so that
682 always two fields are considered. The resulting class is
683 calculated according to the classes of the fields in the
684 eightbyte: */
686 if (type->code () == TYPE_CODE_ARRAY)
688 struct type *subtype = check_typedef (type->target_type ());
690 /* All fields in an array have the same type. */
691 amd64_classify (subtype, theclass);
692 if (type->length () > 8 && theclass[1] == AMD64_NO_CLASS)
693 theclass[1] = theclass[0];
695 else
697 int i;
699 /* Structure or union. */
700 gdb_assert (type->code () == TYPE_CODE_STRUCT
701 || type->code () == TYPE_CODE_UNION);
703 for (i = 0; i < type->num_fields (); i++)
704 amd64_classify_aggregate_field (type, i, theclass, 0);
707 /* 4. Then a post merger cleanup is done: */
709 /* Rule (a): If one of the classes is MEMORY, the whole argument is
710 passed in memory. */
711 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
712 theclass[0] = theclass[1] = AMD64_MEMORY;
714 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
715 SSE. */
716 if (theclass[0] == AMD64_SSEUP)
717 theclass[0] = AMD64_SSE;
718 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
719 theclass[1] = AMD64_SSE;
722 /* Classify TYPE, and store the result in CLASS. */
724 static void
725 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
727 enum type_code code = type->code ();
728 int len = type->length ();
730 theclass[0] = theclass[1] = AMD64_NO_CLASS;
732 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
733 long, long long, and pointers are in the INTEGER class. Similarly,
734 range types, used by languages such as Ada, are also in the INTEGER
735 class. */
736 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
737 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
738 || code == TYPE_CODE_CHAR
739 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
740 && (len == 1 || len == 2 || len == 4 || len == 8))
741 theclass[0] = AMD64_INTEGER;
743 /* Arguments of types _Float16, float, double, _Decimal32, _Decimal64 and
744 __m64 are in class SSE. */
745 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
746 && (len == 2 || len == 4 || len == 8))
747 /* FIXME: __m64 . */
748 theclass[0] = AMD64_SSE;
750 /* Arguments of types __float128, _Decimal128 and __m128 are split into
751 two halves. The least significant ones belong to class SSE, the most
752 significant one to class SSEUP. */
753 else if (code == TYPE_CODE_DECFLOAT && len == 16)
754 /* FIXME: __float128, __m128. */
755 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
757 /* The 64-bit mantissa of arguments of type long double belongs to
758 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
759 class X87UP. */
760 else if (code == TYPE_CODE_FLT && len == 16)
761 /* Class X87 and X87UP. */
762 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
764 /* Arguments of complex T - where T is one of the types _Float16, float or
765 double - get treated as if they are implemented as:
767 struct complexT {
768 T real;
769 T imag;
773 else if (code == TYPE_CODE_COMPLEX && (len == 8 || len == 4))
774 theclass[0] = AMD64_SSE;
775 else if (code == TYPE_CODE_COMPLEX && len == 16)
776 theclass[0] = theclass[1] = AMD64_SSE;
778 /* A variable of type complex long double is classified as type
779 COMPLEX_X87. */
780 else if (code == TYPE_CODE_COMPLEX && len == 32)
781 theclass[0] = AMD64_COMPLEX_X87;
783 /* Aggregates. */
784 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
785 || code == TYPE_CODE_UNION)
786 amd64_classify_aggregate (type, theclass);
789 static enum return_value_convention
790 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
791 struct type *type, struct regcache *regcache,
792 struct value **read_value, const gdb_byte *writebuf)
794 enum amd64_reg_class theclass[2];
795 int len = type->length ();
796 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
797 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
798 int integer_reg = 0;
799 int sse_reg = 0;
800 int i;
802 gdb_assert (!(read_value && writebuf));
804 /* 1. Classify the return type with the classification algorithm. */
805 amd64_classify (type, theclass);
807 /* 2. If the type has class MEMORY, then the caller provides space
808 for the return value and passes the address of this storage in
809 %rdi as if it were the first argument to the function. In effect,
810 this address becomes a hidden first argument.
812 On return %rax will contain the address that has been passed in
813 by the caller in %rdi. */
814 if (theclass[0] == AMD64_MEMORY)
816 /* As indicated by the comment above, the ABI guarantees that we
817 can always find the return value just after the function has
818 returned. */
820 if (read_value != nullptr)
822 ULONGEST addr;
824 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
825 *read_value = value_at_non_lval (type, addr);
828 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
831 gdb_byte *readbuf = nullptr;
832 if (read_value != nullptr)
834 *read_value = value::allocate (type);
835 readbuf = (*read_value)->contents_raw ().data ();
838 /* 8. If the class is COMPLEX_X87, the real part of the value is
839 returned in %st0 and the imaginary part in %st1. */
840 if (theclass[0] == AMD64_COMPLEX_X87)
842 if (readbuf)
844 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
845 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
848 if (writebuf)
850 i387_return_value (gdbarch, regcache);
851 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
852 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
854 /* Fix up the tag word such that both %st(0) and %st(1) are
855 marked as valid. */
856 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
859 return RETURN_VALUE_REGISTER_CONVENTION;
862 gdb_assert (theclass[1] != AMD64_MEMORY);
863 gdb_assert (len <= 16);
865 for (i = 0; len > 0; i++, len -= 8)
867 int regnum = -1;
868 int offset = 0;
870 switch (theclass[i])
872 case AMD64_INTEGER:
873 /* 3. If the class is INTEGER, the next available register
874 of the sequence %rax, %rdx is used. */
875 regnum = integer_regnum[integer_reg++];
876 break;
878 case AMD64_SSE:
879 /* 4. If the class is SSE, the next available SSE register
880 of the sequence %xmm0, %xmm1 is used. */
881 regnum = sse_regnum[sse_reg++];
882 break;
884 case AMD64_SSEUP:
885 /* 5. If the class is SSEUP, the eightbyte is passed in the
886 upper half of the last used SSE register. */
887 gdb_assert (sse_reg > 0);
888 regnum = sse_regnum[sse_reg - 1];
889 offset = 8;
890 break;
892 case AMD64_X87:
893 /* 6. If the class is X87, the value is returned on the X87
894 stack in %st0 as 80-bit x87 number. */
895 regnum = AMD64_ST0_REGNUM;
896 if (writebuf)
897 i387_return_value (gdbarch, regcache);
898 break;
900 case AMD64_X87UP:
901 /* 7. If the class is X87UP, the value is returned together
902 with the previous X87 value in %st0. */
903 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
904 regnum = AMD64_ST0_REGNUM;
905 offset = 8;
906 len = 2;
907 break;
909 case AMD64_NO_CLASS:
910 continue;
912 default:
913 gdb_assert (!"Unexpected register class.");
916 gdb_assert (regnum != -1);
918 if (readbuf)
919 regcache->raw_read_part (regnum, offset, std::min (len, 8),
920 readbuf + i * 8);
921 if (writebuf)
922 regcache->raw_write_part (regnum, offset, std::min (len, 8),
923 writebuf + i * 8);
926 return RETURN_VALUE_REGISTER_CONVENTION;
930 static CORE_ADDR
931 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
932 CORE_ADDR sp, function_call_return_method return_method)
934 static int integer_regnum[] =
936 AMD64_RDI_REGNUM, /* %rdi */
937 AMD64_RSI_REGNUM, /* %rsi */
938 AMD64_RDX_REGNUM, /* %rdx */
939 AMD64_RCX_REGNUM, /* %rcx */
940 AMD64_R8_REGNUM, /* %r8 */
941 AMD64_R9_REGNUM /* %r9 */
943 static int sse_regnum[] =
945 /* %xmm0 ... %xmm7 */
946 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
947 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
948 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
949 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
951 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
952 int num_stack_args = 0;
953 int num_elements = 0;
954 int element = 0;
955 int integer_reg = 0;
956 int sse_reg = 0;
957 int i;
959 /* Reserve a register for the "hidden" argument. */
960 if (return_method == return_method_struct)
961 integer_reg++;
963 for (i = 0; i < nargs; i++)
965 struct type *type = args[i]->type ();
966 int len = type->length ();
967 enum amd64_reg_class theclass[2];
968 int needed_integer_regs = 0;
969 int needed_sse_regs = 0;
970 int j;
972 /* Classify argument. */
973 amd64_classify (type, theclass);
975 /* Calculate the number of integer and SSE registers needed for
976 this argument. */
977 for (j = 0; j < 2; j++)
979 if (theclass[j] == AMD64_INTEGER)
980 needed_integer_regs++;
981 else if (theclass[j] == AMD64_SSE)
982 needed_sse_regs++;
985 /* Check whether enough registers are available, and if the
986 argument should be passed in registers at all. */
987 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
988 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
989 || (needed_integer_regs == 0 && needed_sse_regs == 0))
991 /* The argument will be passed on the stack. */
992 num_elements += ((len + 7) / 8);
993 stack_args[num_stack_args++] = args[i];
995 else
997 /* The argument will be passed in registers. */
998 const gdb_byte *valbuf = args[i]->contents ().data ();
999 gdb_byte buf[8];
1001 gdb_assert (len <= 16);
1003 for (j = 0; len > 0; j++, len -= 8)
1005 int regnum = -1;
1006 int offset = 0;
1008 switch (theclass[j])
1010 case AMD64_INTEGER:
1011 regnum = integer_regnum[integer_reg++];
1012 break;
1014 case AMD64_SSE:
1015 regnum = sse_regnum[sse_reg++];
1016 break;
1018 case AMD64_SSEUP:
1019 gdb_assert (sse_reg > 0);
1020 regnum = sse_regnum[sse_reg - 1];
1021 offset = 8;
1022 break;
1024 case AMD64_NO_CLASS:
1025 continue;
1027 default:
1028 gdb_assert (!"Unexpected register class.");
1031 gdb_assert (regnum != -1);
1032 memset (buf, 0, sizeof buf);
1033 memcpy (buf, valbuf + j * 8, std::min (len, 8));
1034 regcache->raw_write_part (regnum, offset, 8, buf);
1039 /* Allocate space for the arguments on the stack. */
1040 sp -= num_elements * 8;
1042 /* The psABI says that "The end of the input argument area shall be
1043 aligned on a 16 byte boundary." */
1044 sp &= ~0xf;
1046 /* Write out the arguments to the stack. */
1047 for (i = 0; i < num_stack_args; i++)
1049 struct type *type = stack_args[i]->type ();
1050 const gdb_byte *valbuf = stack_args[i]->contents ().data ();
1051 int len = type->length ();
1053 write_memory (sp + element * 8, valbuf, len);
1054 element += ((len + 7) / 8);
1057 /* The psABI says that "For calls that may call functions that use
1058 varargs or stdargs (prototype-less calls or calls to functions
1059 containing ellipsis (...) in the declaration) %al is used as
1060 hidden argument to specify the number of SSE registers used. */
1061 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1062 return sp;
1065 static CORE_ADDR
1066 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1067 struct regcache *regcache, CORE_ADDR bp_addr,
1068 int nargs, struct value **args, CORE_ADDR sp,
1069 function_call_return_method return_method,
1070 CORE_ADDR struct_addr)
1072 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1073 gdb_byte buf[8];
1075 /* BND registers can be in arbitrary values at the moment of the
1076 inferior call. This can cause boundary violations that are not
1077 due to a real bug or even desired by the user. The best to be done
1078 is set the BND registers to allow access to the whole memory, INIT
1079 state, before pushing the inferior call. */
1080 i387_reset_bnd_regs (gdbarch, regcache);
1082 /* Pass arguments. */
1083 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1085 /* Pass "hidden" argument". */
1086 if (return_method == return_method_struct)
1088 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1089 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1092 /* Store return address. */
1093 sp -= 8;
1094 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1095 write_memory (sp, buf, 8);
1097 /* Finally, update the stack pointer... */
1098 store_unsigned_integer (buf, 8, byte_order, sp);
1099 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1101 /* ...and fake a frame pointer. */
1102 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1104 return sp + 16;
1107 /* Displaced instruction handling. */
1109 /* A partially decoded instruction.
1110 This contains enough details for displaced stepping purposes. */
1112 struct amd64_insn
1114 /* The number of opcode bytes. */
1115 int opcode_len;
1116 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1117 not present. */
1118 int enc_prefix_offset;
1119 /* The offset to the first opcode byte. */
1120 int opcode_offset;
1121 /* The offset to the modrm byte or -1 if not present. */
1122 int modrm_offset;
1124 /* The raw instruction. */
1125 gdb_byte *raw_insn;
1128 struct amd64_displaced_step_copy_insn_closure
1129 : public displaced_step_copy_insn_closure
1131 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
1132 : insn_buf (insn_buf_len, 0)
1135 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1136 int tmp_used = 0;
1137 int tmp_regno;
1138 ULONGEST tmp_save;
1140 /* Details of the instruction. */
1141 struct amd64_insn insn_details;
1143 /* The possibly modified insn. */
1144 gdb::byte_vector insn_buf;
1147 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1148 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1149 at which point delete these in favor of libopcodes' versions). */
1151 static const unsigned char onebyte_has_modrm[256] = {
1152 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1153 /* ------------------------------- */
1154 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1155 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1156 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1157 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1158 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1159 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1160 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1161 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1162 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1163 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1164 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1165 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1166 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1167 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1168 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1169 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1170 /* ------------------------------- */
1171 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1174 static const unsigned char twobyte_has_modrm[256] = {
1175 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1176 /* ------------------------------- */
1177 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1178 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1179 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1180 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1181 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1182 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1183 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1184 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1185 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1186 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1187 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1188 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1189 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1190 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1191 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1192 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1193 /* ------------------------------- */
1194 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1197 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1199 static int
1200 rex_prefix_p (gdb_byte pfx)
1202 return REX_PREFIX_P (pfx);
1205 /* True if PFX is the start of the 2-byte VEX prefix. */
1207 static bool
1208 vex2_prefix_p (gdb_byte pfx)
1210 return pfx == 0xc5;
1213 /* True if PFX is the start of the 3-byte VEX prefix. */
1215 static bool
1216 vex3_prefix_p (gdb_byte pfx)
1218 return pfx == 0xc4;
1221 /* Skip the legacy instruction prefixes in INSN.
1222 We assume INSN is properly sentineled so we don't have to worry
1223 about falling off the end of the buffer. */
1225 static gdb_byte *
1226 amd64_skip_prefixes (gdb_byte *insn)
1228 while (1)
1230 switch (*insn)
1232 case DATA_PREFIX_OPCODE:
1233 case ADDR_PREFIX_OPCODE:
1234 case CS_PREFIX_OPCODE:
1235 case DS_PREFIX_OPCODE:
1236 case ES_PREFIX_OPCODE:
1237 case FS_PREFIX_OPCODE:
1238 case GS_PREFIX_OPCODE:
1239 case SS_PREFIX_OPCODE:
1240 case LOCK_PREFIX_OPCODE:
1241 case REPE_PREFIX_OPCODE:
1242 case REPNE_PREFIX_OPCODE:
1243 ++insn;
1244 continue;
1245 default:
1246 break;
1248 break;
1251 return insn;
1254 /* Return an integer register (other than RSP) that is unused as an input
1255 operand in INSN.
1256 In order to not require adding a rex prefix if the insn doesn't already
1257 have one, the result is restricted to RAX ... RDI, sans RSP.
1258 The register numbering of the result follows architecture ordering,
1259 e.g. RDI = 7. */
1261 static int
1262 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1264 /* 1 bit for each reg */
1265 int used_regs_mask = 0;
1267 /* There can be at most 3 int regs used as inputs in an insn, and we have
1268 7 to choose from (RAX ... RDI, sans RSP).
1269 This allows us to take a conservative approach and keep things simple.
1270 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1271 that implicitly specify RAX. */
1273 /* Avoid RAX. */
1274 used_regs_mask |= 1 << EAX_REG_NUM;
1275 /* Similarily avoid RDX, implicit operand in divides. */
1276 used_regs_mask |= 1 << EDX_REG_NUM;
1277 /* Avoid RSP. */
1278 used_regs_mask |= 1 << ESP_REG_NUM;
1280 /* If the opcode is one byte long and there's no ModRM byte,
1281 assume the opcode specifies a register. */
1282 if (details->opcode_len == 1 && details->modrm_offset == -1)
1283 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1285 /* Mark used regs in the modrm/sib bytes. */
1286 if (details->modrm_offset != -1)
1288 int modrm = details->raw_insn[details->modrm_offset];
1289 int mod = MODRM_MOD_FIELD (modrm);
1290 int reg = MODRM_REG_FIELD (modrm);
1291 int rm = MODRM_RM_FIELD (modrm);
1292 int have_sib = mod != 3 && rm == 4;
1294 /* Assume the reg field of the modrm byte specifies a register. */
1295 used_regs_mask |= 1 << reg;
1297 if (have_sib)
1299 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1300 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1301 used_regs_mask |= 1 << base;
1302 used_regs_mask |= 1 << idx;
1304 else
1306 used_regs_mask |= 1 << rm;
1310 gdb_assert (used_regs_mask < 256);
1311 gdb_assert (used_regs_mask != 255);
1313 /* Finally, find a free reg. */
1315 int i;
1317 for (i = 0; i < 8; ++i)
1319 if (! (used_regs_mask & (1 << i)))
1320 return i;
1323 /* We shouldn't get here. */
1324 internal_error (_("unable to find free reg"));
1328 /* Extract the details of INSN that we need. */
1330 static void
1331 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1333 gdb_byte *start = insn;
1334 int need_modrm;
1336 details->raw_insn = insn;
1338 details->opcode_len = -1;
1339 details->enc_prefix_offset = -1;
1340 details->opcode_offset = -1;
1341 details->modrm_offset = -1;
1343 /* Skip legacy instruction prefixes. */
1344 insn = amd64_skip_prefixes (insn);
1346 /* Skip REX/VEX instruction encoding prefixes. */
1347 if (rex_prefix_p (*insn))
1349 details->enc_prefix_offset = insn - start;
1350 ++insn;
1352 else if (vex2_prefix_p (*insn))
1354 /* Don't record the offset in this case because this prefix has
1355 no REX.B equivalent. */
1356 insn += 2;
1358 else if (vex3_prefix_p (*insn))
1360 details->enc_prefix_offset = insn - start;
1361 insn += 3;
1364 details->opcode_offset = insn - start;
1366 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1368 /* Two or three-byte opcode. */
1369 ++insn;
1370 need_modrm = twobyte_has_modrm[*insn];
1372 /* Check for three-byte opcode. */
1373 switch (*insn)
1375 case 0x24:
1376 case 0x25:
1377 case 0x38:
1378 case 0x3a:
1379 case 0x7a:
1380 case 0x7b:
1381 ++insn;
1382 details->opcode_len = 3;
1383 break;
1384 default:
1385 details->opcode_len = 2;
1386 break;
1389 else
1391 /* One-byte opcode. */
1392 need_modrm = onebyte_has_modrm[*insn];
1393 details->opcode_len = 1;
1396 if (need_modrm)
1398 ++insn;
1399 details->modrm_offset = insn - start;
1403 /* Update %rip-relative addressing in INSN.
1405 %rip-relative addressing only uses a 32-bit displacement.
1406 32 bits is not enough to be guaranteed to cover the distance between where
1407 the real instruction is and where its copy is.
1408 Convert the insn to use base+disp addressing.
1409 We set base = pc + insn_length so we can leave disp unchanged. */
1411 static void
1412 fixup_riprel (struct gdbarch *gdbarch,
1413 amd64_displaced_step_copy_insn_closure *dsc,
1414 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1416 const struct amd64_insn *insn_details = &dsc->insn_details;
1417 int modrm_offset = insn_details->modrm_offset;
1418 CORE_ADDR rip_base;
1419 int insn_length;
1420 int arch_tmp_regno, tmp_regno;
1421 ULONGEST orig_value;
1423 /* Compute the rip-relative address. */
1424 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1425 dsc->insn_buf.size (), from);
1426 rip_base = from + insn_length;
1428 /* We need a register to hold the address.
1429 Pick one not used in the insn.
1430 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1431 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1432 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1434 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1435 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1437 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1438 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1439 is not r8-r15. */
1440 if (insn_details->enc_prefix_offset != -1)
1442 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1443 if (rex_prefix_p (pfx[0]))
1444 pfx[0] &= ~REX_B;
1445 else if (vex3_prefix_p (pfx[0]))
1446 pfx[1] |= VEX3_NOT_B;
1447 else
1448 gdb_assert_not_reached ("unhandled prefix");
1451 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1452 dsc->tmp_regno = tmp_regno;
1453 dsc->tmp_save = orig_value;
1454 dsc->tmp_used = 1;
1456 /* Convert the ModRM field to be base+disp. */
1457 dsc->insn_buf[modrm_offset] &= ~0xc7;
1458 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1460 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1462 displaced_debug_printf ("%%rip-relative addressing used.");
1463 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1464 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1465 paddress (gdbarch, rip_base));
1468 static void
1469 fixup_displaced_copy (struct gdbarch *gdbarch,
1470 amd64_displaced_step_copy_insn_closure *dsc,
1471 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1473 const struct amd64_insn *details = &dsc->insn_details;
1475 if (details->modrm_offset != -1)
1477 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1479 if ((modrm & 0xc7) == 0x05)
1481 /* The insn uses rip-relative addressing.
1482 Deal with it. */
1483 fixup_riprel (gdbarch, dsc, from, to, regs);
1488 displaced_step_copy_insn_closure_up
1489 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1490 CORE_ADDR from, CORE_ADDR to,
1491 struct regcache *regs)
1493 int len = gdbarch_max_insn_length (gdbarch);
1494 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1495 continually watch for running off the end of the buffer. */
1496 int fixup_sentinel_space = len;
1497 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1498 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
1499 gdb_byte *buf = &dsc->insn_buf[0];
1500 struct amd64_insn *details = &dsc->insn_details;
1502 read_memory (from, buf, len);
1504 /* Set up the sentinel space so we don't have to worry about running
1505 off the end of the buffer. An excessive number of leading prefixes
1506 could otherwise cause this. */
1507 memset (buf + len, 0, fixup_sentinel_space);
1509 amd64_get_insn_details (buf, details);
1511 /* GDB may get control back after the insn after the syscall.
1512 Presumably this is a kernel bug.
1513 If this is a syscall, make sure there's a nop afterwards. */
1515 int syscall_length;
1517 if (amd64_syscall_p (details, &syscall_length))
1518 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1521 /* Modify the insn to cope with the address where it will be executed from.
1522 In particular, handle any rip-relative addressing. */
1523 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1525 write_memory (to, buf, len);
1527 displaced_debug_printf ("copy %s->%s: %s",
1528 paddress (gdbarch, from), paddress (gdbarch, to),
1529 bytes_to_string (buf, len).c_str ());
1531 /* This is a work around for a problem with g++ 4.8. */
1532 return displaced_step_copy_insn_closure_up (dsc.release ());
1535 static int
1536 amd64_absolute_jmp_p (const struct amd64_insn *details)
1538 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1540 if (insn[0] == 0xff)
1542 /* jump near, absolute indirect (/4) */
1543 if ((insn[1] & 0x38) == 0x20)
1544 return 1;
1546 /* jump far, absolute indirect (/5) */
1547 if ((insn[1] & 0x38) == 0x28)
1548 return 1;
1551 return 0;
1554 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1556 static int
1557 amd64_jmp_p (const struct amd64_insn *details)
1559 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1561 /* jump short, relative. */
1562 if (insn[0] == 0xeb)
1563 return 1;
1565 /* jump near, relative. */
1566 if (insn[0] == 0xe9)
1567 return 1;
1569 return amd64_absolute_jmp_p (details);
1572 static int
1573 amd64_absolute_call_p (const struct amd64_insn *details)
1575 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1577 if (insn[0] == 0xff)
1579 /* Call near, absolute indirect (/2) */
1580 if ((insn[1] & 0x38) == 0x10)
1581 return 1;
1583 /* Call far, absolute indirect (/3) */
1584 if ((insn[1] & 0x38) == 0x18)
1585 return 1;
1588 return 0;
1591 static int
1592 amd64_ret_p (const struct amd64_insn *details)
1594 /* NOTE: gcc can emit "repz ; ret". */
1595 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1597 switch (insn[0])
1599 case 0xc2: /* ret near, pop N bytes */
1600 case 0xc3: /* ret near */
1601 case 0xca: /* ret far, pop N bytes */
1602 case 0xcb: /* ret far */
1603 case 0xcf: /* iret */
1604 return 1;
1606 default:
1607 return 0;
1611 static int
1612 amd64_call_p (const struct amd64_insn *details)
1614 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1616 if (amd64_absolute_call_p (details))
1617 return 1;
1619 /* call near, relative */
1620 if (insn[0] == 0xe8)
1621 return 1;
1623 return 0;
1626 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1627 length in bytes. Otherwise, return zero. */
1629 static int
1630 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1632 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1634 if (insn[0] == 0x0f && insn[1] == 0x05)
1636 *lengthp = 2;
1637 return 1;
1640 return 0;
1643 /* Classify the instruction at ADDR using PRED.
1644 Throw an error if the memory can't be read. */
1646 static int
1647 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1648 int (*pred) (const struct amd64_insn *))
1650 struct amd64_insn details;
1652 gdb::byte_vector buf (gdbarch_max_insn_length (gdbarch));
1654 read_code (addr, buf.data (), buf.size ());
1655 amd64_get_insn_details (buf.data (), &details);
1657 int classification = pred (&details);
1659 return classification;
1662 /* The gdbarch insn_is_call method. */
1664 static int
1665 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1667 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1670 /* The gdbarch insn_is_ret method. */
1672 static int
1673 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1675 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1678 /* The gdbarch insn_is_jump method. */
1680 static int
1681 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1683 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1686 /* Fix up the state of registers and memory after having single-stepped
1687 a displaced instruction. */
1689 void
1690 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1691 struct displaced_step_copy_insn_closure *dsc_,
1692 CORE_ADDR from, CORE_ADDR to,
1693 struct regcache *regs, bool completed_p)
1695 amd64_displaced_step_copy_insn_closure *dsc
1696 = (amd64_displaced_step_copy_insn_closure *) dsc_;
1697 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1698 /* The offset we applied to the instruction's address. */
1699 ULONGEST insn_offset = to - from;
1700 gdb_byte *insn = dsc->insn_buf.data ();
1701 const struct amd64_insn *insn_details = &dsc->insn_details;
1703 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1704 paddress (gdbarch, from), paddress (gdbarch, to),
1705 insn[0], insn[1]);
1707 /* If we used a tmp reg, restore it. */
1709 if (dsc->tmp_used)
1711 displaced_debug_printf ("restoring reg %d to %s",
1712 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1713 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1716 /* The list of issues to contend with here is taken from
1717 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1718 Yay for Free Software! */
1720 /* Relocate the %rip back to the program's instruction stream,
1721 if necessary. */
1723 /* Except in the case of absolute or indirect jump or call
1724 instructions, or a return instruction, the new rip is relative to
1725 the displaced instruction; make it relative to the original insn.
1726 Well, signal handler returns don't need relocation either, but we use the
1727 value of %rip to recognize those; see below. */
1728 if (!completed_p
1729 || (!amd64_absolute_jmp_p (insn_details)
1730 && !amd64_absolute_call_p (insn_details)
1731 && !amd64_ret_p (insn_details)))
1733 int insn_len;
1735 CORE_ADDR pc = regcache_read_pc (regs);
1737 /* A signal trampoline system call changes the %rip, resuming
1738 execution of the main program after the signal handler has
1739 returned. That makes them like 'return' instructions; we
1740 shouldn't relocate %rip.
1742 But most system calls don't, and we do need to relocate %rip.
1744 Our heuristic for distinguishing these cases: if stepping
1745 over the system call instruction left control directly after
1746 the instruction, the we relocate --- control almost certainly
1747 doesn't belong in the displaced copy. Otherwise, we assume
1748 the instruction has put control where it belongs, and leave
1749 it unrelocated. Goodness help us if there are PC-relative
1750 system calls. */
1751 if (amd64_syscall_p (insn_details, &insn_len)
1752 /* GDB can get control back after the insn after the syscall.
1753 Presumably this is a kernel bug. Fixup ensures its a nop, we
1754 add one to the length for it. */
1755 && (pc < to || pc > (to + insn_len + 1)))
1756 displaced_debug_printf ("syscall changed %%rip; not relocating");
1757 else
1759 CORE_ADDR rip = pc - insn_offset;
1761 /* If we just stepped over a breakpoint insn, we don't backup
1762 the pc on purpose; this is to match behaviour without
1763 stepping. */
1765 regcache_write_pc (regs, rip);
1767 displaced_debug_printf ("relocated %%rip from %s to %s",
1768 paddress (gdbarch, pc),
1769 paddress (gdbarch, rip));
1773 /* If the instruction was PUSHFL, then the TF bit will be set in the
1774 pushed value, and should be cleared. We'll leave this for later,
1775 since GDB already messes up the TF flag when stepping over a
1776 pushfl. */
1778 /* If the instruction was a call, the return address now atop the
1779 stack is the address following the copied instruction. We need
1780 to make it the address following the original instruction. */
1781 if (completed_p && amd64_call_p (insn_details))
1783 ULONGEST rsp;
1784 ULONGEST retaddr;
1785 const ULONGEST retaddr_len = 8;
1787 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1788 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1789 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1790 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1792 displaced_debug_printf ("relocated return addr at %s to %s",
1793 paddress (gdbarch, rsp),
1794 paddress (gdbarch, retaddr));
1798 /* If the instruction INSN uses RIP-relative addressing, return the
1799 offset into the raw INSN where the displacement to be adjusted is
1800 found. Returns 0 if the instruction doesn't use RIP-relative
1801 addressing. */
1803 static int
1804 rip_relative_offset (struct amd64_insn *insn)
1806 if (insn->modrm_offset != -1)
1808 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1810 if ((modrm & 0xc7) == 0x05)
1812 /* The displacement is found right after the ModRM byte. */
1813 return insn->modrm_offset + 1;
1817 return 0;
1820 static void
1821 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1823 target_write_memory (*to, buf, len);
1824 *to += len;
1827 static void
1828 amd64_relocate_instruction (struct gdbarch *gdbarch,
1829 CORE_ADDR *to, CORE_ADDR oldloc)
1831 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1832 int len = gdbarch_max_insn_length (gdbarch);
1833 /* Extra space for sentinels. */
1834 int fixup_sentinel_space = len;
1835 gdb::byte_vector buf (len + fixup_sentinel_space);
1836 struct amd64_insn insn_details;
1837 int offset = 0;
1838 LONGEST rel32, newrel;
1839 gdb_byte *insn;
1840 int insn_length;
1842 read_memory (oldloc, buf.data (), len);
1844 /* Set up the sentinel space so we don't have to worry about running
1845 off the end of the buffer. An excessive number of leading prefixes
1846 could otherwise cause this. */
1847 memset (buf.data () + len, 0, fixup_sentinel_space);
1849 insn = buf.data ();
1850 amd64_get_insn_details (insn, &insn_details);
1852 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1854 /* Skip legacy instruction prefixes. */
1855 insn = amd64_skip_prefixes (insn);
1857 /* Adjust calls with 32-bit relative addresses as push/jump, with
1858 the address pushed being the location where the original call in
1859 the user program would return to. */
1860 if (insn[0] == 0xe8)
1862 gdb_byte push_buf[32];
1863 CORE_ADDR ret_addr;
1864 int i = 0;
1866 /* Where "ret" in the original code will return to. */
1867 ret_addr = oldloc + insn_length;
1869 /* If pushing an address higher than or equal to 0x80000000,
1870 avoid 'pushq', as that sign extends its 32-bit operand, which
1871 would be incorrect. */
1872 if (ret_addr <= 0x7fffffff)
1874 push_buf[0] = 0x68; /* pushq $... */
1875 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1876 i = 5;
1878 else
1880 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1881 push_buf[i++] = 0x83;
1882 push_buf[i++] = 0xec;
1883 push_buf[i++] = 0x08;
1885 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1886 push_buf[i++] = 0x04;
1887 push_buf[i++] = 0x24;
1888 store_unsigned_integer (&push_buf[i], 4, byte_order,
1889 ret_addr & 0xffffffff);
1890 i += 4;
1892 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1893 push_buf[i++] = 0x44;
1894 push_buf[i++] = 0x24;
1895 push_buf[i++] = 0x04;
1896 store_unsigned_integer (&push_buf[i], 4, byte_order,
1897 ret_addr >> 32);
1898 i += 4;
1900 gdb_assert (i <= sizeof (push_buf));
1901 /* Push the push. */
1902 append_insns (to, i, push_buf);
1904 /* Convert the relative call to a relative jump. */
1905 insn[0] = 0xe9;
1907 /* Adjust the destination offset. */
1908 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1909 newrel = (oldloc - *to) + rel32;
1910 store_signed_integer (insn + 1, 4, byte_order, newrel);
1912 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1913 hex_string (rel32), paddress (gdbarch, oldloc),
1914 hex_string (newrel), paddress (gdbarch, *to));
1916 /* Write the adjusted jump into its displaced location. */
1917 append_insns (to, 5, insn);
1918 return;
1921 offset = rip_relative_offset (&insn_details);
1922 if (!offset)
1924 /* Adjust jumps with 32-bit relative addresses. Calls are
1925 already handled above. */
1926 if (insn[0] == 0xe9)
1927 offset = 1;
1928 /* Adjust conditional jumps. */
1929 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1930 offset = 2;
1933 if (offset)
1935 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1936 newrel = (oldloc - *to) + rel32;
1937 store_signed_integer (insn + offset, 4, byte_order, newrel);
1938 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1939 hex_string (rel32), paddress (gdbarch, oldloc),
1940 hex_string (newrel), paddress (gdbarch, *to));
1943 /* Write the adjusted instruction into its displaced location. */
1944 append_insns (to, insn_length, buf.data ());
1948 /* The maximum number of saved registers. This should include %rip. */
1949 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1951 struct amd64_frame_cache
1953 /* Base address. */
1954 CORE_ADDR base;
1955 int base_p;
1956 CORE_ADDR sp_offset;
1957 CORE_ADDR pc;
1959 /* Saved registers. */
1960 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1961 CORE_ADDR saved_sp;
1962 int saved_sp_reg;
1964 /* Do we have a frame? */
1965 int frameless_p;
1968 /* Initialize a frame cache. */
1970 static void
1971 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1973 int i;
1975 /* Base address. */
1976 cache->base = 0;
1977 cache->base_p = 0;
1978 cache->sp_offset = -8;
1979 cache->pc = 0;
1981 /* Saved registers. We initialize these to -1 since zero is a valid
1982 offset (that's where %rbp is supposed to be stored).
1983 The values start out as being offsets, and are later converted to
1984 addresses (at which point -1 is interpreted as an address, still meaning
1985 "invalid"). */
1986 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1987 cache->saved_regs[i] = -1;
1988 cache->saved_sp = 0;
1989 cache->saved_sp_reg = -1;
1991 /* Frameless until proven otherwise. */
1992 cache->frameless_p = 1;
1995 /* Allocate and initialize a frame cache. */
1997 static struct amd64_frame_cache *
1998 amd64_alloc_frame_cache (void)
2000 struct amd64_frame_cache *cache;
2002 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
2003 amd64_init_frame_cache (cache);
2004 return cache;
2007 /* GCC 4.4 and later, can put code in the prologue to realign the
2008 stack pointer. Check whether PC points to such code, and update
2009 CACHE accordingly. Return the first instruction after the code
2010 sequence or CURRENT_PC, whichever is smaller. If we don't
2011 recognize the code, return PC. */
2013 static CORE_ADDR
2014 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2015 struct amd64_frame_cache *cache)
2017 /* There are 2 code sequences to re-align stack before the frame
2018 gets set up:
2020 1. Use a caller-saved saved register:
2022 leaq 8(%rsp), %reg
2023 andq $-XXX, %rsp
2024 pushq -8(%reg)
2026 2. Use a callee-saved saved register:
2028 pushq %reg
2029 leaq 16(%rsp), %reg
2030 andq $-XXX, %rsp
2031 pushq -8(%reg)
2033 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2035 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2036 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2039 gdb_byte buf[18];
2040 int reg, r;
2041 int offset, offset_and;
2043 if (target_read_code (pc, buf, sizeof buf))
2044 return pc;
2046 /* Check caller-saved saved register. The first instruction has
2047 to be "leaq 8(%rsp), %reg". */
2048 if ((buf[0] & 0xfb) == 0x48
2049 && buf[1] == 0x8d
2050 && buf[3] == 0x24
2051 && buf[4] == 0x8)
2053 /* MOD must be binary 10 and R/M must be binary 100. */
2054 if ((buf[2] & 0xc7) != 0x44)
2055 return pc;
2057 /* REG has register number. */
2058 reg = (buf[2] >> 3) & 7;
2060 /* Check the REX.R bit. */
2061 if (buf[0] == 0x4c)
2062 reg += 8;
2064 offset = 5;
2066 else
2068 /* Check callee-saved saved register. The first instruction
2069 has to be "pushq %reg". */
2070 reg = 0;
2071 if ((buf[0] & 0xf8) == 0x50)
2072 offset = 0;
2073 else if ((buf[0] & 0xf6) == 0x40
2074 && (buf[1] & 0xf8) == 0x50)
2076 /* Check the REX.B bit. */
2077 if ((buf[0] & 1) != 0)
2078 reg = 8;
2080 offset = 1;
2082 else
2083 return pc;
2085 /* Get register. */
2086 reg += buf[offset] & 0x7;
2088 offset++;
2090 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2091 if ((buf[offset] & 0xfb) != 0x48
2092 || buf[offset + 1] != 0x8d
2093 || buf[offset + 3] != 0x24
2094 || buf[offset + 4] != 0x10)
2095 return pc;
2097 /* MOD must be binary 10 and R/M must be binary 100. */
2098 if ((buf[offset + 2] & 0xc7) != 0x44)
2099 return pc;
2101 /* REG has register number. */
2102 r = (buf[offset + 2] >> 3) & 7;
2104 /* Check the REX.R bit. */
2105 if (buf[offset] == 0x4c)
2106 r += 8;
2108 /* Registers in pushq and leaq have to be the same. */
2109 if (reg != r)
2110 return pc;
2112 offset += 5;
2115 /* Rigister can't be %rsp nor %rbp. */
2116 if (reg == 4 || reg == 5)
2117 return pc;
2119 /* The next instruction has to be "andq $-XXX, %rsp". */
2120 if (buf[offset] != 0x48
2121 || buf[offset + 2] != 0xe4
2122 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2123 return pc;
2125 offset_and = offset;
2126 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2128 /* The next instruction has to be "pushq -8(%reg)". */
2129 r = 0;
2130 if (buf[offset] == 0xff)
2131 offset++;
2132 else if ((buf[offset] & 0xf6) == 0x40
2133 && buf[offset + 1] == 0xff)
2135 /* Check the REX.B bit. */
2136 if ((buf[offset] & 0x1) != 0)
2137 r = 8;
2138 offset += 2;
2140 else
2141 return pc;
2143 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2144 01. */
2145 if (buf[offset + 1] != 0xf8
2146 || (buf[offset] & 0xf8) != 0x70)
2147 return pc;
2149 /* R/M has register. */
2150 r += buf[offset] & 7;
2152 /* Registers in leaq and pushq have to be the same. */
2153 if (reg != r)
2154 return pc;
2156 if (current_pc > pc + offset_and)
2157 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2159 return std::min (pc + offset + 2, current_pc);
2162 /* Similar to amd64_analyze_stack_align for x32. */
2164 static CORE_ADDR
2165 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2166 struct amd64_frame_cache *cache)
2168 /* There are 2 code sequences to re-align stack before the frame
2169 gets set up:
2171 1. Use a caller-saved saved register:
2173 leaq 8(%rsp), %reg
2174 andq $-XXX, %rsp
2175 pushq -8(%reg)
2179 [addr32] leal 8(%rsp), %reg
2180 andl $-XXX, %esp
2181 [addr32] pushq -8(%reg)
2183 2. Use a callee-saved saved register:
2185 pushq %reg
2186 leaq 16(%rsp), %reg
2187 andq $-XXX, %rsp
2188 pushq -8(%reg)
2192 pushq %reg
2193 [addr32] leal 16(%rsp), %reg
2194 andl $-XXX, %esp
2195 [addr32] pushq -8(%reg)
2197 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2199 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2200 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2202 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2204 0x83 0xe4 0xf0 andl $-16, %esp
2205 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2208 gdb_byte buf[19];
2209 int reg, r;
2210 int offset, offset_and;
2212 if (target_read_memory (pc, buf, sizeof buf))
2213 return pc;
2215 /* Skip optional addr32 prefix. */
2216 offset = buf[0] == 0x67 ? 1 : 0;
2218 /* Check caller-saved saved register. The first instruction has
2219 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2220 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2221 && buf[offset + 1] == 0x8d
2222 && buf[offset + 3] == 0x24
2223 && buf[offset + 4] == 0x8)
2225 /* MOD must be binary 10 and R/M must be binary 100. */
2226 if ((buf[offset + 2] & 0xc7) != 0x44)
2227 return pc;
2229 /* REG has register number. */
2230 reg = (buf[offset + 2] >> 3) & 7;
2232 /* Check the REX.R bit. */
2233 if ((buf[offset] & 0x4) != 0)
2234 reg += 8;
2236 offset += 5;
2238 else
2240 /* Check callee-saved saved register. The first instruction
2241 has to be "pushq %reg". */
2242 reg = 0;
2243 if ((buf[offset] & 0xf6) == 0x40
2244 && (buf[offset + 1] & 0xf8) == 0x50)
2246 /* Check the REX.B bit. */
2247 if ((buf[offset] & 1) != 0)
2248 reg = 8;
2250 offset += 1;
2252 else if ((buf[offset] & 0xf8) != 0x50)
2253 return pc;
2255 /* Get register. */
2256 reg += buf[offset] & 0x7;
2258 offset++;
2260 /* Skip optional addr32 prefix. */
2261 if (buf[offset] == 0x67)
2262 offset++;
2264 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2265 "leal 16(%rsp), %reg". */
2266 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2267 || buf[offset + 1] != 0x8d
2268 || buf[offset + 3] != 0x24
2269 || buf[offset + 4] != 0x10)
2270 return pc;
2272 /* MOD must be binary 10 and R/M must be binary 100. */
2273 if ((buf[offset + 2] & 0xc7) != 0x44)
2274 return pc;
2276 /* REG has register number. */
2277 r = (buf[offset + 2] >> 3) & 7;
2279 /* Check the REX.R bit. */
2280 if ((buf[offset] & 0x4) != 0)
2281 r += 8;
2283 /* Registers in pushq and leaq have to be the same. */
2284 if (reg != r)
2285 return pc;
2287 offset += 5;
2290 /* Rigister can't be %rsp nor %rbp. */
2291 if (reg == 4 || reg == 5)
2292 return pc;
2294 /* The next instruction may be "andq $-XXX, %rsp" or
2295 "andl $-XXX, %esp". */
2296 if (buf[offset] != 0x48)
2297 offset--;
2299 if (buf[offset + 2] != 0xe4
2300 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2301 return pc;
2303 offset_and = offset;
2304 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2306 /* Skip optional addr32 prefix. */
2307 if (buf[offset] == 0x67)
2308 offset++;
2310 /* The next instruction has to be "pushq -8(%reg)". */
2311 r = 0;
2312 if (buf[offset] == 0xff)
2313 offset++;
2314 else if ((buf[offset] & 0xf6) == 0x40
2315 && buf[offset + 1] == 0xff)
2317 /* Check the REX.B bit. */
2318 if ((buf[offset] & 0x1) != 0)
2319 r = 8;
2320 offset += 2;
2322 else
2323 return pc;
2325 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2326 01. */
2327 if (buf[offset + 1] != 0xf8
2328 || (buf[offset] & 0xf8) != 0x70)
2329 return pc;
2331 /* R/M has register. */
2332 r += buf[offset] & 7;
2334 /* Registers in leaq and pushq have to be the same. */
2335 if (reg != r)
2336 return pc;
2338 if (current_pc > pc + offset_and)
2339 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2341 return std::min (pc + offset + 2, current_pc);
2344 /* Do a limited analysis of the prologue at PC and update CACHE
2345 accordingly. Bail out early if CURRENT_PC is reached. Return the
2346 address where the analysis stopped.
2348 We will handle only functions beginning with:
2350 pushq %rbp 0x55
2351 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2353 or (for the X32 ABI):
2355 pushq %rbp 0x55
2356 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2358 The `endbr64` instruction can be found before these sequences, and will be
2359 skipped if found.
2361 Any function that doesn't start with one of these sequences will be
2362 assumed to have no prologue and thus no valid frame pointer in
2363 %rbp. */
2365 static CORE_ADDR
2366 amd64_analyze_prologue (struct gdbarch *gdbarch,
2367 CORE_ADDR pc, CORE_ADDR current_pc,
2368 struct amd64_frame_cache *cache)
2370 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2371 /* The `endbr64` instruction. */
2372 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2373 /* There are two variations of movq %rsp, %rbp. */
2374 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2375 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2376 /* Ditto for movl %esp, %ebp. */
2377 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2378 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2380 gdb_byte buf[3];
2381 gdb_byte op;
2383 if (current_pc <= pc)
2384 return current_pc;
2386 if (gdbarch_ptr_bit (gdbarch) == 32)
2387 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2388 else
2389 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2391 op = read_code_unsigned_integer (pc, 1, byte_order);
2393 /* Check for the `endbr64` instruction, skip it if found. */
2394 if (op == endbr64[0])
2396 read_code (pc + 1, buf, 3);
2398 if (memcmp (buf, &endbr64[1], 3) == 0)
2399 pc += 4;
2401 op = read_code_unsigned_integer (pc, 1, byte_order);
2404 if (current_pc <= pc)
2405 return current_pc;
2407 if (op == 0x55) /* pushq %rbp */
2409 /* Take into account that we've executed the `pushq %rbp' that
2410 starts this instruction sequence. */
2411 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2412 cache->sp_offset += 8;
2414 /* If that's all, return now. */
2415 if (current_pc <= pc + 1)
2416 return current_pc;
2418 read_code (pc + 1, buf, 3);
2420 /* Check for `movq %rsp, %rbp'. */
2421 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2422 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2424 /* OK, we actually have a frame. */
2425 cache->frameless_p = 0;
2426 return pc + 4;
2429 /* For X32, also check for `movl %esp, %ebp'. */
2430 if (gdbarch_ptr_bit (gdbarch) == 32)
2432 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2433 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2435 /* OK, we actually have a frame. */
2436 cache->frameless_p = 0;
2437 return pc + 3;
2441 return pc + 1;
2444 return pc;
2447 /* Work around false termination of prologue - GCC PR debug/48827.
2449 START_PC is the first instruction of a function, PC is its minimal already
2450 determined advanced address. Function returns PC if it has nothing to do.
2452 84 c0 test %al,%al
2453 74 23 je after
2454 <-- here is 0 lines advance - the false prologue end marker.
2455 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2456 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2457 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2458 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2459 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2460 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2461 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2462 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2463 after: */
2465 static CORE_ADDR
2466 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2468 struct symtab_and_line start_pc_sal, next_sal;
2469 gdb_byte buf[4 + 8 * 7];
2470 int offset, xmmreg;
2472 if (pc == start_pc)
2473 return pc;
2475 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2476 if (start_pc_sal.symtab == NULL
2477 || producer_is_gcc_ge_4 (start_pc_sal.symtab->compunit ()
2478 ->producer ()) < 6
2479 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2480 return pc;
2482 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2483 if (next_sal.line != start_pc_sal.line)
2484 return pc;
2486 /* START_PC can be from overlayed memory, ignored here. */
2487 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2488 return pc;
2490 /* test %al,%al */
2491 if (buf[0] != 0x84 || buf[1] != 0xc0)
2492 return pc;
2493 /* je AFTER */
2494 if (buf[2] != 0x74)
2495 return pc;
2497 offset = 4;
2498 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2500 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2501 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2502 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2503 return pc;
2505 /* 0b01?????? */
2506 if ((buf[offset + 2] & 0xc0) == 0x40)
2508 /* 8-bit displacement. */
2509 offset += 4;
2511 /* 0b10?????? */
2512 else if ((buf[offset + 2] & 0xc0) == 0x80)
2514 /* 32-bit displacement. */
2515 offset += 7;
2517 else
2518 return pc;
2521 /* je AFTER */
2522 if (offset - 4 != buf[3])
2523 return pc;
2525 return next_sal.end;
2528 /* Return PC of first real instruction. */
2530 static CORE_ADDR
2531 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2533 struct amd64_frame_cache cache;
2534 CORE_ADDR pc;
2535 CORE_ADDR func_addr;
2537 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2539 CORE_ADDR post_prologue_pc
2540 = skip_prologue_using_sal (gdbarch, func_addr);
2541 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2543 /* LLVM backend (Clang/Flang) always emits a line note before the
2544 prologue and another one after. We trust clang and newer Intel
2545 compilers to emit usable line notes. */
2546 if (post_prologue_pc
2547 && (cust != NULL
2548 && cust->producer () != nullptr
2549 && (producer_is_llvm (cust->producer ())
2550 || producer_is_icc_ge_19 (cust->producer ()))))
2551 return std::max (start_pc, post_prologue_pc);
2554 amd64_init_frame_cache (&cache);
2555 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2556 &cache);
2557 if (cache.frameless_p)
2558 return start_pc;
2560 return amd64_skip_xmm_prologue (pc, start_pc);
2564 /* Normal frames. */
2566 static void
2567 amd64_frame_cache_1 (frame_info_ptr this_frame,
2568 struct amd64_frame_cache *cache)
2570 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2571 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2572 gdb_byte buf[8];
2573 int i;
2575 cache->pc = get_frame_func (this_frame);
2576 if (cache->pc != 0)
2577 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2578 cache);
2580 if (cache->frameless_p)
2582 /* We didn't find a valid frame. If we're at the start of a
2583 function, or somewhere half-way its prologue, the function's
2584 frame probably hasn't been fully setup yet. Try to
2585 reconstruct the base address for the stack frame by looking
2586 at the stack pointer. For truly "frameless" functions this
2587 might work too. */
2589 if (cache->saved_sp_reg != -1)
2591 /* Stack pointer has been saved. */
2592 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2593 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2595 /* We're halfway aligning the stack. */
2596 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2597 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2599 /* This will be added back below. */
2600 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2602 else
2604 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2605 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2606 + cache->sp_offset;
2609 else
2611 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2612 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2615 /* Now that we have the base address for the stack frame we can
2616 calculate the value of %rsp in the calling frame. */
2617 cache->saved_sp = cache->base + 16;
2619 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2620 frame we find it at the same offset from the reconstructed base
2621 address. If we're halfway aligning the stack, %rip is handled
2622 differently (see above). */
2623 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2624 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2626 /* Adjust all the saved registers such that they contain addresses
2627 instead of offsets. */
2628 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2629 if (cache->saved_regs[i] != -1)
2630 cache->saved_regs[i] += cache->base;
2632 cache->base_p = 1;
2635 static struct amd64_frame_cache *
2636 amd64_frame_cache (frame_info_ptr this_frame, void **this_cache)
2638 struct amd64_frame_cache *cache;
2640 if (*this_cache)
2641 return (struct amd64_frame_cache *) *this_cache;
2643 cache = amd64_alloc_frame_cache ();
2644 *this_cache = cache;
2648 amd64_frame_cache_1 (this_frame, cache);
2650 catch (const gdb_exception_error &ex)
2652 if (ex.error != NOT_AVAILABLE_ERROR)
2653 throw;
2656 return cache;
2659 static enum unwind_stop_reason
2660 amd64_frame_unwind_stop_reason (frame_info_ptr this_frame,
2661 void **this_cache)
2663 struct amd64_frame_cache *cache =
2664 amd64_frame_cache (this_frame, this_cache);
2666 if (!cache->base_p)
2667 return UNWIND_UNAVAILABLE;
2669 /* This marks the outermost frame. */
2670 if (cache->base == 0)
2671 return UNWIND_OUTERMOST;
2673 return UNWIND_NO_REASON;
2676 static void
2677 amd64_frame_this_id (frame_info_ptr this_frame, void **this_cache,
2678 struct frame_id *this_id)
2680 struct amd64_frame_cache *cache =
2681 amd64_frame_cache (this_frame, this_cache);
2683 if (!cache->base_p)
2684 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2685 else if (cache->base == 0)
2687 /* This marks the outermost frame. */
2688 return;
2690 else
2691 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2694 static struct value *
2695 amd64_frame_prev_register (frame_info_ptr this_frame, void **this_cache,
2696 int regnum)
2698 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2699 struct amd64_frame_cache *cache =
2700 amd64_frame_cache (this_frame, this_cache);
2702 gdb_assert (regnum >= 0);
2704 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2705 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2707 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2708 return frame_unwind_got_memory (this_frame, regnum,
2709 cache->saved_regs[regnum]);
2711 return frame_unwind_got_register (this_frame, regnum, regnum);
2714 static const struct frame_unwind amd64_frame_unwind =
2716 "amd64 prologue",
2717 NORMAL_FRAME,
2718 amd64_frame_unwind_stop_reason,
2719 amd64_frame_this_id,
2720 amd64_frame_prev_register,
2721 NULL,
2722 default_frame_sniffer
2725 /* Generate a bytecode expression to get the value of the saved PC. */
2727 static void
2728 amd64_gen_return_address (struct gdbarch *gdbarch,
2729 struct agent_expr *ax, struct axs_value *value,
2730 CORE_ADDR scope)
2732 /* The following sequence assumes the traditional use of the base
2733 register. */
2734 ax_reg (ax, AMD64_RBP_REGNUM);
2735 ax_const_l (ax, 8);
2736 ax_simple (ax, aop_add);
2737 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2738 value->kind = axs_lvalue_memory;
2742 /* Signal trampolines. */
2744 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2745 64-bit variants. This would require using identical frame caches
2746 on both platforms. */
2748 static struct amd64_frame_cache *
2749 amd64_sigtramp_frame_cache (frame_info_ptr this_frame, void **this_cache)
2751 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2752 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
2753 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2754 struct amd64_frame_cache *cache;
2755 CORE_ADDR addr;
2756 gdb_byte buf[8];
2757 int i;
2759 if (*this_cache)
2760 return (struct amd64_frame_cache *) *this_cache;
2762 cache = amd64_alloc_frame_cache ();
2766 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2767 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2769 addr = tdep->sigcontext_addr (this_frame);
2770 gdb_assert (tdep->sc_reg_offset);
2771 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2772 for (i = 0; i < tdep->sc_num_regs; i++)
2773 if (tdep->sc_reg_offset[i] != -1)
2774 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2776 cache->base_p = 1;
2778 catch (const gdb_exception_error &ex)
2780 if (ex.error != NOT_AVAILABLE_ERROR)
2781 throw;
2784 *this_cache = cache;
2785 return cache;
2788 static enum unwind_stop_reason
2789 amd64_sigtramp_frame_unwind_stop_reason (frame_info_ptr this_frame,
2790 void **this_cache)
2792 struct amd64_frame_cache *cache =
2793 amd64_sigtramp_frame_cache (this_frame, this_cache);
2795 if (!cache->base_p)
2796 return UNWIND_UNAVAILABLE;
2798 return UNWIND_NO_REASON;
2801 static void
2802 amd64_sigtramp_frame_this_id (frame_info_ptr this_frame,
2803 void **this_cache, struct frame_id *this_id)
2805 struct amd64_frame_cache *cache =
2806 amd64_sigtramp_frame_cache (this_frame, this_cache);
2808 if (!cache->base_p)
2809 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2810 else if (cache->base == 0)
2812 /* This marks the outermost frame. */
2813 return;
2815 else
2816 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2819 static struct value *
2820 amd64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
2821 void **this_cache, int regnum)
2823 /* Make sure we've initialized the cache. */
2824 amd64_sigtramp_frame_cache (this_frame, this_cache);
2826 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2829 static int
2830 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2831 frame_info_ptr this_frame,
2832 void **this_cache)
2834 gdbarch *arch = get_frame_arch (this_frame);
2835 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (arch);
2837 /* We shouldn't even bother if we don't have a sigcontext_addr
2838 handler. */
2839 if (tdep->sigcontext_addr == NULL)
2840 return 0;
2842 if (tdep->sigtramp_p != NULL)
2844 if (tdep->sigtramp_p (this_frame))
2845 return 1;
2848 if (tdep->sigtramp_start != 0)
2850 CORE_ADDR pc = get_frame_pc (this_frame);
2852 gdb_assert (tdep->sigtramp_end != 0);
2853 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2854 return 1;
2857 return 0;
2860 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2862 "amd64 sigtramp",
2863 SIGTRAMP_FRAME,
2864 amd64_sigtramp_frame_unwind_stop_reason,
2865 amd64_sigtramp_frame_this_id,
2866 amd64_sigtramp_frame_prev_register,
2867 NULL,
2868 amd64_sigtramp_frame_sniffer
2872 static CORE_ADDR
2873 amd64_frame_base_address (frame_info_ptr this_frame, void **this_cache)
2875 struct amd64_frame_cache *cache =
2876 amd64_frame_cache (this_frame, this_cache);
2878 return cache->base;
2881 static const struct frame_base amd64_frame_base =
2883 &amd64_frame_unwind,
2884 amd64_frame_base_address,
2885 amd64_frame_base_address,
2886 amd64_frame_base_address
2889 /* Normal frames, but in a function epilogue. */
2891 /* Implement the stack_frame_destroyed_p gdbarch method.
2893 The epilogue is defined here as the 'ret' instruction, which will
2894 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2895 the function's stack frame. */
2897 static int
2898 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2900 gdb_byte insn;
2902 if (target_read_memory (pc, &insn, 1))
2903 return 0; /* Can't read memory at pc. */
2905 if (insn != 0xc3) /* 'ret' instruction. */
2906 return 0;
2908 return 1;
2911 static int
2912 amd64_epilogue_frame_sniffer_1 (const struct frame_unwind *self,
2913 frame_info_ptr this_frame,
2914 void **this_prologue_cache, bool override_p)
2916 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2917 CORE_ADDR pc = get_frame_pc (this_frame);
2919 if (frame_relative_level (this_frame) != 0)
2920 /* We're not in the inner frame, so assume we're not in an epilogue. */
2921 return 0;
2923 bool unwind_valid_p
2924 = compunit_epilogue_unwind_valid (find_pc_compunit_symtab (pc));
2925 if (override_p)
2927 if (unwind_valid_p)
2928 /* Don't override the symtab unwinders, skip
2929 "amd64 epilogue override". */
2930 return 0;
2932 else
2934 if (!unwind_valid_p)
2935 /* "amd64 epilogue override" unwinder already ran, skip
2936 "amd64 epilogue". */
2937 return 0;
2940 /* Check whether we're in an epilogue. */
2941 return amd64_stack_frame_destroyed_p (gdbarch, pc);
2944 static int
2945 amd64_epilogue_override_frame_sniffer (const struct frame_unwind *self,
2946 frame_info_ptr this_frame,
2947 void **this_prologue_cache)
2949 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2950 true);
2953 static int
2954 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2955 frame_info_ptr this_frame,
2956 void **this_prologue_cache)
2958 return amd64_epilogue_frame_sniffer_1 (self, this_frame, this_prologue_cache,
2959 false);
2962 static struct amd64_frame_cache *
2963 amd64_epilogue_frame_cache (frame_info_ptr this_frame, void **this_cache)
2965 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2966 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2967 struct amd64_frame_cache *cache;
2968 gdb_byte buf[8];
2970 if (*this_cache)
2971 return (struct amd64_frame_cache *) *this_cache;
2973 cache = amd64_alloc_frame_cache ();
2974 *this_cache = cache;
2978 /* Cache base will be %rsp plus cache->sp_offset (-8). */
2979 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2980 cache->base = extract_unsigned_integer (buf, 8,
2981 byte_order) + cache->sp_offset;
2983 /* Cache pc will be the frame func. */
2984 cache->pc = get_frame_func (this_frame);
2986 /* The previous value of %rsp is cache->base plus 16. */
2987 cache->saved_sp = cache->base + 16;
2989 /* The saved %rip will be at cache->base plus 8. */
2990 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2992 cache->base_p = 1;
2994 catch (const gdb_exception_error &ex)
2996 if (ex.error != NOT_AVAILABLE_ERROR)
2997 throw;
3000 return cache;
3003 static enum unwind_stop_reason
3004 amd64_epilogue_frame_unwind_stop_reason (frame_info_ptr this_frame,
3005 void **this_cache)
3007 struct amd64_frame_cache *cache
3008 = amd64_epilogue_frame_cache (this_frame, this_cache);
3010 if (!cache->base_p)
3011 return UNWIND_UNAVAILABLE;
3013 return UNWIND_NO_REASON;
3016 static void
3017 amd64_epilogue_frame_this_id (frame_info_ptr this_frame,
3018 void **this_cache,
3019 struct frame_id *this_id)
3021 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
3022 this_cache);
3024 if (!cache->base_p)
3025 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
3026 else
3027 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
3030 static const struct frame_unwind amd64_epilogue_override_frame_unwind =
3032 "amd64 epilogue override",
3033 NORMAL_FRAME,
3034 amd64_epilogue_frame_unwind_stop_reason,
3035 amd64_epilogue_frame_this_id,
3036 amd64_frame_prev_register,
3037 NULL,
3038 amd64_epilogue_override_frame_sniffer
3041 static const struct frame_unwind amd64_epilogue_frame_unwind =
3043 "amd64 epilogue",
3044 NORMAL_FRAME,
3045 amd64_epilogue_frame_unwind_stop_reason,
3046 amd64_epilogue_frame_this_id,
3047 amd64_frame_prev_register,
3048 NULL,
3049 amd64_epilogue_frame_sniffer
3052 static struct frame_id
3053 amd64_dummy_id (struct gdbarch *gdbarch, frame_info_ptr this_frame)
3055 CORE_ADDR fp;
3057 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
3059 return frame_id_build (fp + 16, get_frame_pc (this_frame));
3062 /* 16 byte align the SP per frame requirements. */
3064 static CORE_ADDR
3065 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3067 return sp & -(CORE_ADDR)16;
3071 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3072 in the floating-point register set REGSET to register cache
3073 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3075 static void
3076 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3077 int regnum, const void *fpregs, size_t len)
3079 struct gdbarch *gdbarch = regcache->arch ();
3080 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3082 gdb_assert (len >= tdep->sizeof_fpregset);
3083 amd64_supply_fxsave (regcache, regnum, fpregs);
3086 /* Collect register REGNUM from the register cache REGCACHE and store
3087 it in the buffer specified by FPREGS and LEN as described by the
3088 floating-point register set REGSET. If REGNUM is -1, do this for
3089 all registers in REGSET. */
3091 static void
3092 amd64_collect_fpregset (const struct regset *regset,
3093 const struct regcache *regcache,
3094 int regnum, void *fpregs, size_t len)
3096 struct gdbarch *gdbarch = regcache->arch ();
3097 const i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3099 gdb_assert (len >= tdep->sizeof_fpregset);
3100 amd64_collect_fxsave (regcache, regnum, fpregs);
3103 const struct regset amd64_fpregset =
3105 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3109 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3110 %rdi. We expect its value to be a pointer to the jmp_buf structure
3111 from which we extract the address that we will land at. This
3112 address is copied into PC. This routine returns non-zero on
3113 success. */
3115 static int
3116 amd64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
3118 gdb_byte buf[8];
3119 CORE_ADDR jb_addr;
3120 struct gdbarch *gdbarch = get_frame_arch (frame);
3121 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3122 int jb_pc_offset = tdep->jb_pc_offset;
3123 int len = builtin_type (gdbarch)->builtin_func_ptr->length ();
3125 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3126 longjmp will land. */
3127 if (jb_pc_offset == -1)
3128 return 0;
3130 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3131 jb_addr= extract_typed_address
3132 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3133 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3134 return 0;
3136 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3138 return 1;
3141 static const int amd64_record_regmap[] =
3143 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3144 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3145 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3146 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3147 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3148 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3151 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3153 static bool
3154 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3156 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3157 AMD64_RAX_REGNUM,
3158 AMD64_RIP_REGNUM);
3161 void
3162 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3163 const target_desc *default_tdesc)
3165 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3166 const struct target_desc *tdesc = info.target_desc;
3167 static const char *const stap_integer_prefixes[] = { "$", NULL };
3168 static const char *const stap_register_prefixes[] = { "%", NULL };
3169 static const char *const stap_register_indirection_prefixes[] = { "(",
3170 NULL };
3171 static const char *const stap_register_indirection_suffixes[] = { ")",
3172 NULL };
3174 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3175 floating-point registers. */
3176 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3177 tdep->fpregset = &amd64_fpregset;
3179 if (! tdesc_has_registers (tdesc))
3180 tdesc = default_tdesc;
3181 tdep->tdesc = tdesc;
3183 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3184 tdep->register_names = amd64_register_names;
3186 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3188 tdep->zmmh_register_names = amd64_zmmh_names;
3189 tdep->k_register_names = amd64_k_names;
3190 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3191 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3193 tdep->num_zmm_regs = 32;
3194 tdep->num_xmm_avx512_regs = 16;
3195 tdep->num_ymm_avx512_regs = 16;
3197 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3198 tdep->k0_regnum = AMD64_K0_REGNUM;
3199 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3200 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3203 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3205 tdep->ymmh_register_names = amd64_ymmh_names;
3206 tdep->num_ymm_regs = 16;
3207 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3210 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3212 tdep->mpx_register_names = amd64_mpx_names;
3213 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3214 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3217 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3219 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3222 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3224 tdep->pkeys_register_names = amd64_pkeys_names;
3225 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3226 tdep->num_pkeys_regs = 1;
3229 tdep->num_byte_regs = 20;
3230 tdep->num_word_regs = 16;
3231 tdep->num_dword_regs = 16;
3232 /* Avoid wiring in the MMX registers for now. */
3233 tdep->num_mmx_regs = 0;
3235 set_gdbarch_pseudo_register_read_value (gdbarch,
3236 amd64_pseudo_register_read_value);
3237 set_gdbarch_pseudo_register_write (gdbarch,
3238 amd64_pseudo_register_write);
3239 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3240 amd64_ax_pseudo_register_collect);
3242 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3244 /* AMD64 has an FPU and 16 SSE registers. */
3245 tdep->st0_regnum = AMD64_ST0_REGNUM;
3246 tdep->num_xmm_regs = 16;
3248 /* This is what all the fuss is about. */
3249 set_gdbarch_long_bit (gdbarch, 64);
3250 set_gdbarch_long_long_bit (gdbarch, 64);
3251 set_gdbarch_ptr_bit (gdbarch, 64);
3253 /* In contrast to the i386, on AMD64 a `long double' actually takes
3254 up 128 bits, even though it's still based on the i387 extended
3255 floating-point format which has only 80 significant bits. */
3256 set_gdbarch_long_double_bit (gdbarch, 128);
3258 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3260 /* Register numbers of various important registers. */
3261 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3262 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3263 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3264 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3266 /* The "default" register numbering scheme for AMD64 is referred to
3267 as the "DWARF Register Number Mapping" in the System V psABI.
3268 The preferred debugging format for all known AMD64 targets is
3269 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3270 DWARF-1), but we provide the same mapping just in case. This
3271 mapping is also used for stabs, which GCC does support. */
3272 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3273 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3275 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3276 be in use on any of the supported AMD64 targets. */
3278 /* Call dummy code. */
3279 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3280 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3281 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3283 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3284 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3285 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3287 set_gdbarch_return_value_as_value (gdbarch, amd64_return_value);
3289 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3291 tdep->record_regmap = amd64_record_regmap;
3293 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3295 /* Hook the function epilogue frame unwinder. This unwinder is
3296 appended to the list first, so that it supercedes the other
3297 unwinders in function epilogues. */
3298 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_override_frame_unwind);
3300 frame_unwind_append_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3302 /* Hook the prologue-based frame unwinders. */
3303 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3304 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3305 frame_base_set_default (gdbarch, &amd64_frame_base);
3307 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3309 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3311 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3313 /* SystemTap variables and functions. */
3314 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3315 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3316 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3317 stap_register_indirection_prefixes);
3318 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3319 stap_register_indirection_suffixes);
3320 set_gdbarch_stap_is_single_operand (gdbarch,
3321 i386_stap_is_single_operand);
3322 set_gdbarch_stap_parse_special_token (gdbarch,
3323 i386_stap_parse_special_token);
3324 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3325 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3326 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3328 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3329 amd64_in_indirect_branch_thunk);
3331 register_amd64_ravenscar_ops (gdbarch);
3334 /* Initialize ARCH for x86-64, no osabi. */
3336 static void
3337 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3339 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3340 true));
3343 static struct type *
3344 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3346 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3348 switch (regnum - tdep->eax_regnum)
3350 case AMD64_RBP_REGNUM: /* %ebp */
3351 case AMD64_RSP_REGNUM: /* %esp */
3352 return builtin_type (gdbarch)->builtin_data_ptr;
3353 case AMD64_RIP_REGNUM: /* %eip */
3354 return builtin_type (gdbarch)->builtin_func_ptr;
3357 return i386_pseudo_register_type (gdbarch, regnum);
3360 void
3361 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3362 const target_desc *default_tdesc)
3364 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3366 amd64_init_abi (info, gdbarch, default_tdesc);
3368 tdep->num_dword_regs = 17;
3369 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3371 set_gdbarch_long_bit (gdbarch, 32);
3372 set_gdbarch_ptr_bit (gdbarch, 32);
3375 /* Initialize ARCH for x64-32, no osabi. */
3377 static void
3378 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3380 amd64_x32_init_abi (info, arch,
3381 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3384 /* Return the target description for a specified XSAVE feature mask. */
3386 const struct target_desc *
3387 amd64_target_description (uint64_t xcr0, bool segments)
3389 static target_desc *amd64_tdescs \
3390 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3391 target_desc **tdesc;
3393 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3394 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3395 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3396 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3397 [segments ? 1 : 0];
3399 if (*tdesc == NULL)
3400 *tdesc = amd64_create_target_description (xcr0, false, false,
3401 segments);
3403 return *tdesc;
3406 void _initialize_amd64_tdep ();
3407 void
3408 _initialize_amd64_tdep ()
3410 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3411 amd64_none_init_abi);
3412 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3413 amd64_x32_none_init_abi);
3417 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3418 sense that the instruction pointer and data pointer are simply
3419 64-bit offsets into the code segment and the data segment instead
3420 of a selector offset pair. The functions below store the upper 32
3421 bits of these pointers (instead of just the 16-bits of the segment
3422 selector). */
3424 /* Fill register REGNUM in REGCACHE with the appropriate
3425 floating-point or SSE register value from *FXSAVE. If REGNUM is
3426 -1, do this for all registers. This function masks off any of the
3427 reserved bits in *FXSAVE. */
3429 void
3430 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3431 const void *fxsave)
3433 struct gdbarch *gdbarch = regcache->arch ();
3434 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3436 i387_supply_fxsave (regcache, regnum, fxsave);
3438 if (fxsave
3439 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3441 const gdb_byte *regs = (const gdb_byte *) fxsave;
3443 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3444 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3445 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3446 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3450 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3452 void
3453 amd64_supply_xsave (struct regcache *regcache, int regnum,
3454 const void *xsave)
3456 struct gdbarch *gdbarch = regcache->arch ();
3457 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3459 i387_supply_xsave (regcache, regnum, xsave);
3461 if (xsave
3462 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3464 const gdb_byte *regs = (const gdb_byte *) xsave;
3465 ULONGEST clear_bv;
3467 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3469 /* If the FISEG and FOSEG registers have not been initialised yet
3470 (their CLEAR_BV bit is set) then their default values of zero will
3471 have already been setup by I387_SUPPLY_XSAVE. */
3472 if (!(clear_bv & X86_XSTATE_X87))
3474 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3475 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3476 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3477 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3482 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3483 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3484 all registers. This function doesn't touch any of the reserved
3485 bits in *FXSAVE. */
3487 void
3488 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3489 void *fxsave)
3491 struct gdbarch *gdbarch = regcache->arch ();
3492 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3493 gdb_byte *regs = (gdb_byte *) fxsave;
3495 i387_collect_fxsave (regcache, regnum, fxsave);
3497 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3499 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3500 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3501 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3502 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3506 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3508 void
3509 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3510 void *xsave, int gcore)
3512 struct gdbarch *gdbarch = regcache->arch ();
3513 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
3514 gdb_byte *regs = (gdb_byte *) xsave;
3516 i387_collect_xsave (regcache, regnum, xsave, gcore);
3518 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3520 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3521 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3522 regs + 12);
3523 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3524 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3525 regs + 20);