1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2014 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template
*start
;
103 const insn_template
*end
;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem
; /* codes register or memory operand */
111 unsigned int reg
; /* codes register operand (or extended opcode) */
112 unsigned int mode
; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte
;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name
; /* arch name */
132 unsigned int len
; /* arch string length */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags flags
; /* cpu feature flags */
135 unsigned int skip
; /* show_arch should skip this. */
136 unsigned int negated
; /* turn off indicated flags. */
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_check (int);
147 static void set_cpu_arch (int);
149 static void pe_directive_secrel (int);
151 static void signed_cons (int);
152 static char *output_invalid (int c
);
153 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
155 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS
*);
160 static int i386_intel_parse_name (const char *, expressionS
*);
161 static const reg_entry
*parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template
*match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry
*build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS
*, offsetT
);
180 static void output_disp (fragS
*, offsetT
);
182 static void s_bss (int);
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
188 static const char *default_arch
= DEFAULT_ARCH
;
190 /* This struct describes rounding control and SAE in the instruction. */
204 static struct RC_Operation rc_op
;
206 /* The struct describes masking, applied to OPERAND in the instruction.
207 MASK is a pointer to the corresponding mask register. ZEROING tells
208 whether merging or zeroing mask is used. */
209 struct Mask_Operation
211 const reg_entry
*mask
;
212 unsigned int zeroing
;
213 /* The operand where this operation is associated. */
217 static struct Mask_Operation mask_op
;
219 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
221 struct Broadcast_Operation
223 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
226 /* Index of broadcasted operand. */
230 static struct Broadcast_Operation broadcast_op
;
235 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
236 unsigned char bytes
[4];
238 /* Destination or source register specifier. */
239 const reg_entry
*register_specifier
;
242 /* 'md_assemble ()' gathers together information and puts it into a
249 const reg_entry
*regs
;
254 operand_size_mismatch
,
255 operand_type_mismatch
,
256 register_type_mismatch
,
257 number_of_operands_mismatch
,
258 invalid_instruction_suffix
,
261 unsupported_with_intel_mnemonic
,
264 invalid_vsib_address
,
265 invalid_vector_register_set
,
266 unsupported_vector_index_register
,
267 unsupported_broadcast
,
268 broadcast_not_on_src_operand
,
271 mask_not_on_destination
,
274 rc_sae_operand_not_last_imm
,
275 invalid_register_operand
,
281 /* TM holds the template for the insn were currently assembling. */
284 /* SUFFIX holds the instruction size suffix for byte, word, dword
285 or qword, if given. */
288 /* OPERANDS gives the number of given operands. */
289 unsigned int operands
;
291 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
292 of given register, displacement, memory operands and immediate
294 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
296 /* TYPES [i] is the type (see above #defines) which tells us how to
297 use OP[i] for the corresponding operand. */
298 i386_operand_type types
[MAX_OPERANDS
];
300 /* Displacement expression, immediate expression, or register for each
302 union i386_op op
[MAX_OPERANDS
];
304 /* Flags for operands. */
305 unsigned int flags
[MAX_OPERANDS
];
306 #define Operand_PCrel 1
308 /* Relocation type for operand */
309 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
311 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
312 the base index byte below. */
313 const reg_entry
*base_reg
;
314 const reg_entry
*index_reg
;
315 unsigned int log2_scale_factor
;
317 /* SEG gives the seg_entries of this insn. They are zero unless
318 explicit segment overrides are given. */
319 const seg_entry
*seg
[2];
321 /* PREFIX holds all the given prefix opcodes (usually null).
322 PREFIXES is the number of prefix opcodes. */
323 unsigned int prefixes
;
324 unsigned char prefix
[MAX_PREFIXES
];
326 /* RM and SIB are the modrm byte and the sib byte where the
327 addressing modes of this insn are encoded. */
334 /* Masking attributes. */
335 struct Mask_Operation
*mask
;
337 /* Rounding control and SAE attributes. */
338 struct RC_Operation
*rounding
;
340 /* Broadcasting attributes. */
341 struct Broadcast_Operation
*broadcast
;
343 /* Compressed disp8*N attribute. */
344 unsigned int memshift
;
346 /* Swap operand in encoding. */
347 unsigned int swap_operand
;
349 /* Prefer 8bit or 32bit displacement in encoding. */
352 disp_encoding_default
= 0,
358 const char *rep_prefix
;
361 const char *hle_prefix
;
363 /* Have BND prefix. */
364 const char *bnd_prefix
;
366 /* Need VREX to support upper 16 registers. */
370 enum i386_error error
;
373 typedef struct _i386_insn i386_insn
;
375 /* Link RC type with corresponding string, that'll be looked for in
384 static const struct RC_name RC_NamesTable
[] =
386 { rne
, STRING_COMMA_LEN ("rn-sae") },
387 { rd
, STRING_COMMA_LEN ("rd-sae") },
388 { ru
, STRING_COMMA_LEN ("ru-sae") },
389 { rz
, STRING_COMMA_LEN ("rz-sae") },
390 { saeonly
, STRING_COMMA_LEN ("sae") },
393 /* List of chars besides those in app.c:symbol_chars that can start an
394 operand. Used to prevent the scrubber eating vital white-space. */
395 const char extra_symbol_chars
[] = "*%-([{"
404 #if (defined (TE_I386AIX) \
405 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
406 && !defined (TE_GNU) \
407 && !defined (TE_LINUX) \
408 && !defined (TE_NACL) \
409 && !defined (TE_NETWARE) \
410 && !defined (TE_FreeBSD) \
411 && !defined (TE_DragonFly) \
412 && !defined (TE_NetBSD)))
413 /* This array holds the chars that always start a comment. If the
414 pre-processor is disabled, these aren't very useful. The option
415 --divide will remove '/' from this list. */
416 const char *i386_comment_chars
= "#/";
417 #define SVR4_COMMENT_CHARS 1
418 #define PREFIX_SEPARATOR '\\'
421 const char *i386_comment_chars
= "#";
422 #define PREFIX_SEPARATOR '/'
425 /* This array holds the chars that only start a comment at the beginning of
426 a line. If the line seems to have the form '# 123 filename'
427 .line and .file directives will appear in the pre-processed output.
428 Note that input_file.c hand checks for '#' at the beginning of the
429 first line of the input file. This is because the compiler outputs
430 #NO_APP at the beginning of its output.
431 Also note that comments started like this one will always work if
432 '/' isn't otherwise defined. */
433 const char line_comment_chars
[] = "#/";
435 const char line_separator_chars
[] = ";";
437 /* Chars that can be used to separate mant from exp in floating point
439 const char EXP_CHARS
[] = "eE";
441 /* Chars that mean this number is a floating point constant
444 const char FLT_CHARS
[] = "fFdDxX";
446 /* Tables for lexical analysis. */
447 static char mnemonic_chars
[256];
448 static char register_chars
[256];
449 static char operand_chars
[256];
450 static char identifier_chars
[256];
451 static char digit_chars
[256];
453 /* Lexical macros. */
454 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
455 #define is_operand_char(x) (operand_chars[(unsigned char) x])
456 #define is_register_char(x) (register_chars[(unsigned char) x])
457 #define is_space_char(x) ((x) == ' ')
458 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
459 #define is_digit_char(x) (digit_chars[(unsigned char) x])
461 /* All non-digit non-letter characters that may occur in an operand. */
462 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
464 /* md_assemble() always leaves the strings it's passed unaltered. To
465 effect this we maintain a stack of saved characters that we've smashed
466 with '\0's (indicating end of strings for various sub-fields of the
467 assembler instruction). */
468 static char save_stack
[32];
469 static char *save_stack_p
;
470 #define END_STRING_AND_SAVE(s) \
471 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
472 #define RESTORE_END_STRING(s) \
473 do { *(s) = *--save_stack_p; } while (0)
475 /* The instruction we're assembling. */
478 /* Possible templates for current insn. */
479 static const templates
*current_templates
;
481 /* Per instruction expressionS buffers: max displacements & immediates. */
482 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
483 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
485 /* Current operand we are working on. */
486 static int this_operand
= -1;
488 /* We support four different modes. FLAG_CODE variable is used to distinguish
496 static enum flag_code flag_code
;
497 static unsigned int object_64bit
;
498 static unsigned int disallow_64bit_reloc
;
499 static int use_rela_relocations
= 0;
501 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
502 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
503 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
505 /* The ELF ABI to use. */
513 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
516 #if defined (TE_PE) || defined (TE_PEP)
517 /* Use big object file format. */
518 static int use_big_obj
= 0;
521 /* 1 for intel syntax,
523 static int intel_syntax
= 0;
525 /* 1 for intel mnemonic,
526 0 if att mnemonic. */
527 static int intel_mnemonic
= !SYSV386_COMPAT
;
529 /* 1 if support old (<= 2.8.1) versions of gcc. */
530 static int old_gcc
= OLDGCC_COMPAT
;
532 /* 1 if pseudo registers are permitted. */
533 static int allow_pseudo_reg
= 0;
535 /* 1 if register prefix % not required. */
536 static int allow_naked_reg
= 0;
538 /* 1 if the assembler should add BND prefix for all control-tranferring
539 instructions supporting it, even if this prefix wasn't specified
541 static int add_bnd_prefix
= 0;
543 /* 1 if pseudo index register, eiz/riz, is allowed . */
544 static int allow_index_reg
= 0;
546 static enum check_kind
552 sse_check
, operand_check
= check_warning
;
554 /* Register prefix used for error message. */
555 static const char *register_prefix
= "%";
557 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
558 leave, push, and pop instructions so that gcc has the same stack
559 frame as in 32 bit mode. */
560 static char stackop_size
= '\0';
562 /* Non-zero to optimize code alignment. */
563 int optimize_align_code
= 1;
565 /* Non-zero to quieten some warnings. */
566 static int quiet_warnings
= 0;
569 static const char *cpu_arch_name
= NULL
;
570 static char *cpu_sub_arch_name
= NULL
;
572 /* CPU feature flags. */
573 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
575 /* If we have selected a cpu we are generating instructions for. */
576 static int cpu_arch_tune_set
= 0;
578 /* Cpu we are generating instructions for. */
579 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
581 /* CPU feature flags of cpu we are generating instructions for. */
582 static i386_cpu_flags cpu_arch_tune_flags
;
584 /* CPU instruction set architecture used. */
585 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
587 /* CPU feature flags of instruction set architecture used. */
588 i386_cpu_flags cpu_arch_isa_flags
;
590 /* If set, conditional jumps are not automatically promoted to handle
591 larger than a byte offset. */
592 static unsigned int no_cond_jump_promotion
= 0;
594 /* Encode SSE instructions with VEX prefix. */
595 static unsigned int sse2avx
;
597 /* Encode scalar AVX instructions with specific vector length. */
604 /* Encode scalar EVEX LIG instructions with specific vector length. */
612 /* Encode EVEX WIG instructions with specific evex.w. */
619 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
620 static symbolS
*GOT_symbol
;
622 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
623 unsigned int x86_dwarf2_return_column
;
625 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
626 int x86_cie_data_alignment
;
628 /* Interface to relax_segment.
629 There are 3 major relax states for 386 jump insns because the
630 different types of jumps add different sizes to frags when we're
631 figuring out what sort of jump to choose to reach a given label. */
634 #define UNCOND_JUMP 0
636 #define COND_JUMP86 2
641 #define SMALL16 (SMALL | CODE16)
643 #define BIG16 (BIG | CODE16)
647 #define INLINE __inline__
653 #define ENCODE_RELAX_STATE(type, size) \
654 ((relax_substateT) (((type) << 2) | (size)))
655 #define TYPE_FROM_RELAX_STATE(s) \
657 #define DISP_SIZE_FROM_RELAX_STATE(s) \
658 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
660 /* This table is used by relax_frag to promote short jumps to long
661 ones where necessary. SMALL (short) jumps may be promoted to BIG
662 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
663 don't allow a short jump in a 32 bit code segment to be promoted to
664 a 16 bit offset jump because it's slower (requires data size
665 prefix), and doesn't work, unless the destination is in the bottom
666 64k of the code segment (The top 16 bits of eip are zeroed). */
668 const relax_typeS md_relax_table
[] =
671 1) most positive reach of this state,
672 2) most negative reach of this state,
673 3) how many bytes this mode will have in the variable part of the frag
674 4) which index into the table to try if we can't fit into this one. */
676 /* UNCOND_JUMP states. */
677 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
678 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
679 /* dword jmp adds 4 bytes to frag:
680 0 extra opcode bytes, 4 displacement bytes. */
682 /* word jmp adds 2 byte2 to frag:
683 0 extra opcode bytes, 2 displacement bytes. */
686 /* COND_JUMP states. */
687 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
688 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
689 /* dword conditionals adds 5 bytes to frag:
690 1 extra opcode byte, 4 displacement bytes. */
692 /* word conditionals add 3 bytes to frag:
693 1 extra opcode byte, 2 displacement bytes. */
696 /* COND_JUMP86 states. */
697 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
698 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
699 /* dword conditionals adds 5 bytes to frag:
700 1 extra opcode byte, 4 displacement bytes. */
702 /* word conditionals add 4 bytes to frag:
703 1 displacement byte and a 3 byte long branch insn. */
707 static const arch_entry cpu_arch
[] =
709 /* Do not replace the first two entries - i386_target_format()
710 relies on them being there in this order. */
711 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
712 CPU_GENERIC32_FLAGS
, 0, 0 },
713 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
714 CPU_GENERIC64_FLAGS
, 0, 0 },
715 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
716 CPU_NONE_FLAGS
, 0, 0 },
717 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
718 CPU_I186_FLAGS
, 0, 0 },
719 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
720 CPU_I286_FLAGS
, 0, 0 },
721 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
722 CPU_I386_FLAGS
, 0, 0 },
723 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
724 CPU_I486_FLAGS
, 0, 0 },
725 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
726 CPU_I586_FLAGS
, 0, 0 },
727 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
728 CPU_I686_FLAGS
, 0, 0 },
729 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
730 CPU_I586_FLAGS
, 0, 0 },
731 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
732 CPU_PENTIUMPRO_FLAGS
, 0, 0 },
733 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
734 CPU_P2_FLAGS
, 0, 0 },
735 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
736 CPU_P3_FLAGS
, 0, 0 },
737 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
738 CPU_P4_FLAGS
, 0, 0 },
739 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
740 CPU_CORE_FLAGS
, 0, 0 },
741 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
742 CPU_NOCONA_FLAGS
, 0, 0 },
743 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
744 CPU_CORE_FLAGS
, 1, 0 },
745 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
746 CPU_CORE_FLAGS
, 0, 0 },
747 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
748 CPU_CORE2_FLAGS
, 1, 0 },
749 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
750 CPU_CORE2_FLAGS
, 0, 0 },
751 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
752 CPU_COREI7_FLAGS
, 0, 0 },
753 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
754 CPU_L1OM_FLAGS
, 0, 0 },
755 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
756 CPU_K1OM_FLAGS
, 0, 0 },
757 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
758 CPU_K6_FLAGS
, 0, 0 },
759 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
760 CPU_K6_2_FLAGS
, 0, 0 },
761 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
762 CPU_ATHLON_FLAGS
, 0, 0 },
763 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
764 CPU_K8_FLAGS
, 1, 0 },
765 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
766 CPU_K8_FLAGS
, 0, 0 },
767 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
768 CPU_K8_FLAGS
, 0, 0 },
769 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
770 CPU_AMDFAM10_FLAGS
, 0, 0 },
771 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
772 CPU_BDVER1_FLAGS
, 0, 0 },
773 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
774 CPU_BDVER2_FLAGS
, 0, 0 },
775 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
776 CPU_BDVER3_FLAGS
, 0, 0 },
777 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
778 CPU_BDVER4_FLAGS
, 0, 0 },
779 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
780 CPU_BTVER1_FLAGS
, 0, 0 },
781 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
782 CPU_BTVER2_FLAGS
, 0, 0 },
783 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
784 CPU_8087_FLAGS
, 0, 0 },
785 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
786 CPU_287_FLAGS
, 0, 0 },
787 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
788 CPU_387_FLAGS
, 0, 0 },
789 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN
,
790 CPU_ANY87_FLAGS
, 0, 1 },
791 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
792 CPU_MMX_FLAGS
, 0, 0 },
793 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN
,
794 CPU_3DNOWA_FLAGS
, 0, 1 },
795 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
796 CPU_SSE_FLAGS
, 0, 0 },
797 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
798 CPU_SSE2_FLAGS
, 0, 0 },
799 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
800 CPU_SSE3_FLAGS
, 0, 0 },
801 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
802 CPU_SSSE3_FLAGS
, 0, 0 },
803 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
804 CPU_SSE4_1_FLAGS
, 0, 0 },
805 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
806 CPU_SSE4_2_FLAGS
, 0, 0 },
807 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
808 CPU_SSE4_2_FLAGS
, 0, 0 },
809 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN
,
810 CPU_ANY_SSE_FLAGS
, 0, 1 },
811 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
812 CPU_AVX_FLAGS
, 0, 0 },
813 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
814 CPU_AVX2_FLAGS
, 0, 0 },
815 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
816 CPU_AVX512F_FLAGS
, 0, 0 },
817 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
818 CPU_AVX512CD_FLAGS
, 0, 0 },
819 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
820 CPU_AVX512ER_FLAGS
, 0, 0 },
821 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
822 CPU_AVX512PF_FLAGS
, 0, 0 },
823 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN
,
824 CPU_ANY_AVX_FLAGS
, 0, 1 },
825 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
826 CPU_VMX_FLAGS
, 0, 0 },
827 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
828 CPU_VMFUNC_FLAGS
, 0, 0 },
829 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
830 CPU_SMX_FLAGS
, 0, 0 },
831 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
832 CPU_XSAVE_FLAGS
, 0, 0 },
833 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
834 CPU_XSAVEOPT_FLAGS
, 0, 0 },
835 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
836 CPU_AES_FLAGS
, 0, 0 },
837 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
838 CPU_PCLMUL_FLAGS
, 0, 0 },
839 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
840 CPU_PCLMUL_FLAGS
, 1, 0 },
841 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
842 CPU_FSGSBASE_FLAGS
, 0, 0 },
843 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
844 CPU_RDRND_FLAGS
, 0, 0 },
845 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
846 CPU_F16C_FLAGS
, 0, 0 },
847 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
848 CPU_BMI2_FLAGS
, 0, 0 },
849 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
850 CPU_FMA_FLAGS
, 0, 0 },
851 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
852 CPU_FMA4_FLAGS
, 0, 0 },
853 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
854 CPU_XOP_FLAGS
, 0, 0 },
855 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
856 CPU_LWP_FLAGS
, 0, 0 },
857 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
858 CPU_MOVBE_FLAGS
, 0, 0 },
859 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
860 CPU_CX16_FLAGS
, 0, 0 },
861 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
862 CPU_EPT_FLAGS
, 0, 0 },
863 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
864 CPU_LZCNT_FLAGS
, 0, 0 },
865 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
866 CPU_HLE_FLAGS
, 0, 0 },
867 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
868 CPU_RTM_FLAGS
, 0, 0 },
869 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
870 CPU_INVPCID_FLAGS
, 0, 0 },
871 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
872 CPU_CLFLUSH_FLAGS
, 0, 0 },
873 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
874 CPU_NOP_FLAGS
, 0, 0 },
875 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
876 CPU_SYSCALL_FLAGS
, 0, 0 },
877 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
878 CPU_RDTSCP_FLAGS
, 0, 0 },
879 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
880 CPU_3DNOW_FLAGS
, 0, 0 },
881 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
882 CPU_3DNOWA_FLAGS
, 0, 0 },
883 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
884 CPU_PADLOCK_FLAGS
, 0, 0 },
885 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
886 CPU_SVME_FLAGS
, 1, 0 },
887 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
888 CPU_SVME_FLAGS
, 0, 0 },
889 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
890 CPU_SSE4A_FLAGS
, 0, 0 },
891 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
892 CPU_ABM_FLAGS
, 0, 0 },
893 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
894 CPU_BMI_FLAGS
, 0, 0 },
895 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
896 CPU_TBM_FLAGS
, 0, 0 },
897 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
898 CPU_ADX_FLAGS
, 0, 0 },
899 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
900 CPU_RDSEED_FLAGS
, 0, 0 },
901 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
902 CPU_PRFCHW_FLAGS
, 0, 0 },
903 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
904 CPU_SMAP_FLAGS
, 0, 0 },
905 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
906 CPU_MPX_FLAGS
, 0, 0 },
907 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
908 CPU_SHA_FLAGS
, 0, 0 },
909 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
910 CPU_CLFLUSHOPT_FLAGS
, 0, 0 },
911 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
912 CPU_XSAVEC_FLAGS
, 0, 0 },
913 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
914 CPU_XSAVES_FLAGS
, 0, 0 },
915 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
916 CPU_PREFETCHWT1_FLAGS
, 0, 0 },
920 /* Like s_lcomm_internal in gas/read.c but the alignment string
921 is allowed to be optional. */
924 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
931 && *input_line_pointer
== ',')
933 align
= parse_align (needs_align
- 1);
935 if (align
== (addressT
) -1)
950 bss_alloc (symbolP
, size
, align
);
955 pe_lcomm (int needs_align
)
957 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
961 const pseudo_typeS md_pseudo_table
[] =
963 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
964 {"align", s_align_bytes
, 0},
966 {"align", s_align_ptwo
, 0},
968 {"arch", set_cpu_arch
, 0},
972 {"lcomm", pe_lcomm
, 1},
974 {"ffloat", float_cons
, 'f'},
975 {"dfloat", float_cons
, 'd'},
976 {"tfloat", float_cons
, 'x'},
978 {"slong", signed_cons
, 4},
979 {"noopt", s_ignore
, 0},
980 {"optim", s_ignore
, 0},
981 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
982 {"code16", set_code_flag
, CODE_16BIT
},
983 {"code32", set_code_flag
, CODE_32BIT
},
984 {"code64", set_code_flag
, CODE_64BIT
},
985 {"intel_syntax", set_intel_syntax
, 1},
986 {"att_syntax", set_intel_syntax
, 0},
987 {"intel_mnemonic", set_intel_mnemonic
, 1},
988 {"att_mnemonic", set_intel_mnemonic
, 0},
989 {"allow_index_reg", set_allow_index_reg
, 1},
990 {"disallow_index_reg", set_allow_index_reg
, 0},
991 {"sse_check", set_check
, 0},
992 {"operand_check", set_check
, 1},
993 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
994 {"largecomm", handle_large_common
, 0},
996 {"file", (void (*) (int)) dwarf2_directive_file
, 0},
997 {"loc", dwarf2_directive_loc
, 0},
998 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1001 {"secrel32", pe_directive_secrel
, 0},
1006 /* For interface with expression (). */
1007 extern char *input_line_pointer
;
1009 /* Hash table for instruction mnemonic lookup. */
1010 static struct hash_control
*op_hash
;
1012 /* Hash table for register lookup. */
1013 static struct hash_control
*reg_hash
;
1016 i386_align_code (fragS
*fragP
, int count
)
1018 /* Various efficient no-op patterns for aligning code labels.
1019 Note: Don't try to assemble the instructions in the comments.
1020 0L and 0w are not legal. */
1021 static const char f32_1
[] =
1023 static const char f32_2
[] =
1024 {0x66,0x90}; /* xchg %ax,%ax */
1025 static const char f32_3
[] =
1026 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1027 static const char f32_4
[] =
1028 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1029 static const char f32_5
[] =
1031 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1032 static const char f32_6
[] =
1033 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1034 static const char f32_7
[] =
1035 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1036 static const char f32_8
[] =
1038 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1039 static const char f32_9
[] =
1040 {0x89,0xf6, /* movl %esi,%esi */
1041 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1042 static const char f32_10
[] =
1043 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1044 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1045 static const char f32_11
[] =
1046 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1047 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1048 static const char f32_12
[] =
1049 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1050 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1051 static const char f32_13
[] =
1052 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1053 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1054 static const char f32_14
[] =
1055 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1056 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1057 static const char f16_3
[] =
1058 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1059 static const char f16_4
[] =
1060 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1061 static const char f16_5
[] =
1063 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1064 static const char f16_6
[] =
1065 {0x89,0xf6, /* mov %si,%si */
1066 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1067 static const char f16_7
[] =
1068 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1069 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1070 static const char f16_8
[] =
1071 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1072 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1073 static const char jump_31
[] =
1074 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1075 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1076 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1077 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1078 static const char *const f32_patt
[] = {
1079 f32_1
, f32_2
, f32_3
, f32_4
, f32_5
, f32_6
, f32_7
, f32_8
,
1080 f32_9
, f32_10
, f32_11
, f32_12
, f32_13
, f32_14
1082 static const char *const f16_patt
[] = {
1083 f32_1
, f32_2
, f16_3
, f16_4
, f16_5
, f16_6
, f16_7
, f16_8
1085 /* nopl (%[re]ax) */
1086 static const char alt_3
[] =
1088 /* nopl 0(%[re]ax) */
1089 static const char alt_4
[] =
1090 {0x0f,0x1f,0x40,0x00};
1091 /* nopl 0(%[re]ax,%[re]ax,1) */
1092 static const char alt_5
[] =
1093 {0x0f,0x1f,0x44,0x00,0x00};
1094 /* nopw 0(%[re]ax,%[re]ax,1) */
1095 static const char alt_6
[] =
1096 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1097 /* nopl 0L(%[re]ax) */
1098 static const char alt_7
[] =
1099 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1100 /* nopl 0L(%[re]ax,%[re]ax,1) */
1101 static const char alt_8
[] =
1102 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1103 /* nopw 0L(%[re]ax,%[re]ax,1) */
1104 static const char alt_9
[] =
1105 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1106 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1107 static const char alt_10
[] =
1108 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1110 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1111 static const char alt_long_11
[] =
1113 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1116 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1117 static const char alt_long_12
[] =
1120 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1124 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1125 static const char alt_long_13
[] =
1129 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1134 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1135 static const char alt_long_14
[] =
1140 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1146 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1147 static const char alt_long_15
[] =
1153 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1154 /* nopl 0(%[re]ax,%[re]ax,1)
1155 nopw 0(%[re]ax,%[re]ax,1) */
1156 static const char alt_short_11
[] =
1157 {0x0f,0x1f,0x44,0x00,0x00,
1158 0x66,0x0f,0x1f,0x44,0x00,0x00};
1159 /* nopw 0(%[re]ax,%[re]ax,1)
1160 nopw 0(%[re]ax,%[re]ax,1) */
1161 static const char alt_short_12
[] =
1162 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1163 0x66,0x0f,0x1f,0x44,0x00,0x00};
1164 /* nopw 0(%[re]ax,%[re]ax,1)
1166 static const char alt_short_13
[] =
1167 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1168 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1171 static const char alt_short_14
[] =
1172 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1173 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1175 nopl 0L(%[re]ax,%[re]ax,1) */
1176 static const char alt_short_15
[] =
1177 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1178 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1179 static const char *const alt_short_patt
[] = {
1180 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1181 alt_9
, alt_10
, alt_short_11
, alt_short_12
, alt_short_13
,
1182 alt_short_14
, alt_short_15
1184 static const char *const alt_long_patt
[] = {
1185 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1186 alt_9
, alt_10
, alt_long_11
, alt_long_12
, alt_long_13
,
1187 alt_long_14
, alt_long_15
1190 /* Only align for at least a positive non-zero boundary. */
1191 if (count
<= 0 || count
> MAX_MEM_FOR_RS_ALIGN_CODE
)
1194 /* We need to decide which NOP sequence to use for 32bit and
1195 64bit. When -mtune= is used:
1197 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1198 PROCESSOR_GENERIC32, f32_patt will be used.
1199 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1200 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1201 PROCESSOR_GENERIC64, alt_long_patt will be used.
1202 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1203 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1206 When -mtune= isn't used, alt_long_patt will be used if
1207 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1210 When -march= or .arch is used, we can't use anything beyond
1211 cpu_arch_isa_flags. */
1213 if (flag_code
== CODE_16BIT
)
1217 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1219 /* Adjust jump offset. */
1220 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1223 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1224 f16_patt
[count
- 1], count
);
1228 const char *const *patt
= NULL
;
1230 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1232 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1233 switch (cpu_arch_tune
)
1235 case PROCESSOR_UNKNOWN
:
1236 /* We use cpu_arch_isa_flags to check if we SHOULD
1237 optimize with nops. */
1238 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1239 patt
= alt_long_patt
;
1243 case PROCESSOR_PENTIUM4
:
1244 case PROCESSOR_NOCONA
:
1245 case PROCESSOR_CORE
:
1246 case PROCESSOR_CORE2
:
1247 case PROCESSOR_COREI7
:
1248 case PROCESSOR_L1OM
:
1249 case PROCESSOR_K1OM
:
1250 case PROCESSOR_GENERIC64
:
1251 patt
= alt_long_patt
;
1254 case PROCESSOR_ATHLON
:
1256 case PROCESSOR_AMDFAM10
:
1259 patt
= alt_short_patt
;
1261 case PROCESSOR_I386
:
1262 case PROCESSOR_I486
:
1263 case PROCESSOR_PENTIUM
:
1264 case PROCESSOR_PENTIUMPRO
:
1265 case PROCESSOR_GENERIC32
:
1272 switch (fragP
->tc_frag_data
.tune
)
1274 case PROCESSOR_UNKNOWN
:
1275 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1276 PROCESSOR_UNKNOWN. */
1280 case PROCESSOR_I386
:
1281 case PROCESSOR_I486
:
1282 case PROCESSOR_PENTIUM
:
1284 case PROCESSOR_ATHLON
:
1286 case PROCESSOR_AMDFAM10
:
1289 case PROCESSOR_GENERIC32
:
1290 /* We use cpu_arch_isa_flags to check if we CAN optimize
1292 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1293 patt
= alt_short_patt
;
1297 case PROCESSOR_PENTIUMPRO
:
1298 case PROCESSOR_PENTIUM4
:
1299 case PROCESSOR_NOCONA
:
1300 case PROCESSOR_CORE
:
1301 case PROCESSOR_CORE2
:
1302 case PROCESSOR_COREI7
:
1303 case PROCESSOR_L1OM
:
1304 case PROCESSOR_K1OM
:
1305 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1306 patt
= alt_long_patt
;
1310 case PROCESSOR_GENERIC64
:
1311 patt
= alt_long_patt
;
1316 if (patt
== f32_patt
)
1318 /* If the padding is less than 15 bytes, we use the normal
1319 ones. Otherwise, we use a jump instruction and adjust
1323 /* For 64bit, the limit is 3 bytes. */
1324 if (flag_code
== CODE_64BIT
1325 && fragP
->tc_frag_data
.isa_flags
.bitfield
.cpulm
)
1330 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1331 patt
[count
- 1], count
);
1334 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1336 /* Adjust jump offset. */
1337 fragP
->fr_literal
[fragP
->fr_fix
+ 1] = count
- 2;
1342 /* Maximum length of an instruction is 15 byte. If the
1343 padding is greater than 15 bytes and we don't use jump,
1344 we have to break it into smaller pieces. */
1345 int padding
= count
;
1346 while (padding
> 15)
1349 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
+ padding
,
1354 memcpy (fragP
->fr_literal
+ fragP
->fr_fix
,
1355 patt
[padding
- 1], padding
);
1358 fragP
->fr_var
= count
;
1362 operand_type_all_zero (const union i386_operand_type
*x
)
1364 switch (ARRAY_SIZE(x
->array
))
1373 return !x
->array
[0];
1380 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1382 switch (ARRAY_SIZE(x
->array
))
1397 operand_type_equal (const union i386_operand_type
*x
,
1398 const union i386_operand_type
*y
)
1400 switch (ARRAY_SIZE(x
->array
))
1403 if (x
->array
[2] != y
->array
[2])
1406 if (x
->array
[1] != y
->array
[1])
1409 return x
->array
[0] == y
->array
[0];
1417 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1419 switch (ARRAY_SIZE(x
->array
))
1428 return !x
->array
[0];
1435 cpu_flags_set (union i386_cpu_flags
*x
, unsigned int v
)
1437 switch (ARRAY_SIZE(x
->array
))
1452 cpu_flags_equal (const union i386_cpu_flags
*x
,
1453 const union i386_cpu_flags
*y
)
1455 switch (ARRAY_SIZE(x
->array
))
1458 if (x
->array
[2] != y
->array
[2])
1461 if (x
->array
[1] != y
->array
[1])
1464 return x
->array
[0] == y
->array
[0];
1472 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1474 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1475 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1478 static INLINE i386_cpu_flags
1479 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1481 switch (ARRAY_SIZE (x
.array
))
1484 x
.array
[2] &= y
.array
[2];
1486 x
.array
[1] &= y
.array
[1];
1488 x
.array
[0] &= y
.array
[0];
1496 static INLINE i386_cpu_flags
1497 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1499 switch (ARRAY_SIZE (x
.array
))
1502 x
.array
[2] |= y
.array
[2];
1504 x
.array
[1] |= y
.array
[1];
1506 x
.array
[0] |= y
.array
[0];
1514 static INLINE i386_cpu_flags
1515 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1517 switch (ARRAY_SIZE (x
.array
))
1520 x
.array
[2] &= ~y
.array
[2];
1522 x
.array
[1] &= ~y
.array
[1];
1524 x
.array
[0] &= ~y
.array
[0];
1532 #define CPU_FLAGS_ARCH_MATCH 0x1
1533 #define CPU_FLAGS_64BIT_MATCH 0x2
1534 #define CPU_FLAGS_AES_MATCH 0x4
1535 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1536 #define CPU_FLAGS_AVX_MATCH 0x10
1538 #define CPU_FLAGS_32BIT_MATCH \
1539 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1540 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1541 #define CPU_FLAGS_PERFECT_MATCH \
1542 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1544 /* Return CPU flags match bits. */
1547 cpu_flags_match (const insn_template
*t
)
1549 i386_cpu_flags x
= t
->cpu_flags
;
1550 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1552 x
.bitfield
.cpu64
= 0;
1553 x
.bitfield
.cpuno64
= 0;
1555 if (cpu_flags_all_zero (&x
))
1557 /* This instruction is available on all archs. */
1558 match
|= CPU_FLAGS_32BIT_MATCH
;
1562 /* This instruction is available only on some archs. */
1563 i386_cpu_flags cpu
= cpu_arch_flags
;
1565 cpu
.bitfield
.cpu64
= 0;
1566 cpu
.bitfield
.cpuno64
= 0;
1567 cpu
= cpu_flags_and (x
, cpu
);
1568 if (!cpu_flags_all_zero (&cpu
))
1570 if (x
.bitfield
.cpuavx
)
1572 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1573 if (cpu
.bitfield
.cpuavx
)
1575 /* Check SSE2AVX. */
1576 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1578 match
|= (CPU_FLAGS_ARCH_MATCH
1579 | CPU_FLAGS_AVX_MATCH
);
1581 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1582 match
|= CPU_FLAGS_AES_MATCH
;
1584 if (!x
.bitfield
.cpupclmul
1585 || cpu
.bitfield
.cpupclmul
)
1586 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1590 match
|= CPU_FLAGS_ARCH_MATCH
;
1593 match
|= CPU_FLAGS_32BIT_MATCH
;
1599 static INLINE i386_operand_type
1600 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1602 switch (ARRAY_SIZE (x
.array
))
1605 x
.array
[2] &= y
.array
[2];
1607 x
.array
[1] &= y
.array
[1];
1609 x
.array
[0] &= y
.array
[0];
1617 static INLINE i386_operand_type
1618 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1620 switch (ARRAY_SIZE (x
.array
))
1623 x
.array
[2] |= y
.array
[2];
1625 x
.array
[1] |= y
.array
[1];
1627 x
.array
[0] |= y
.array
[0];
1635 static INLINE i386_operand_type
1636 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1638 switch (ARRAY_SIZE (x
.array
))
1641 x
.array
[2] ^= y
.array
[2];
1643 x
.array
[1] ^= y
.array
[1];
1645 x
.array
[0] ^= y
.array
[0];
1653 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1654 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1655 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1656 static const i386_operand_type inoutportreg
1657 = OPERAND_TYPE_INOUTPORTREG
;
1658 static const i386_operand_type reg16_inoutportreg
1659 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1660 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1661 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1662 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1663 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1664 static const i386_operand_type anydisp
1665 = OPERAND_TYPE_ANYDISP
;
1666 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1667 static const i386_operand_type regymm
= OPERAND_TYPE_REGYMM
;
1668 static const i386_operand_type regzmm
= OPERAND_TYPE_REGZMM
;
1669 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1670 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1671 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1672 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1673 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1674 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1675 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1676 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1677 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1678 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1679 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1690 operand_type_check (i386_operand_type t
, enum operand_type c
)
1695 return (t
.bitfield
.reg8
1698 || t
.bitfield
.reg64
);
1701 return (t
.bitfield
.imm8
1705 || t
.bitfield
.imm32s
1706 || t
.bitfield
.imm64
);
1709 return (t
.bitfield
.disp8
1710 || t
.bitfield
.disp16
1711 || t
.bitfield
.disp32
1712 || t
.bitfield
.disp32s
1713 || t
.bitfield
.disp64
);
1716 return (t
.bitfield
.disp8
1717 || t
.bitfield
.disp16
1718 || t
.bitfield
.disp32
1719 || t
.bitfield
.disp32s
1720 || t
.bitfield
.disp64
1721 || t
.bitfield
.baseindex
);
1730 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1731 operand J for instruction template T. */
1734 match_reg_size (const insn_template
*t
, unsigned int j
)
1736 return !((i
.types
[j
].bitfield
.byte
1737 && !t
->operand_types
[j
].bitfield
.byte
)
1738 || (i
.types
[j
].bitfield
.word
1739 && !t
->operand_types
[j
].bitfield
.word
)
1740 || (i
.types
[j
].bitfield
.dword
1741 && !t
->operand_types
[j
].bitfield
.dword
)
1742 || (i
.types
[j
].bitfield
.qword
1743 && !t
->operand_types
[j
].bitfield
.qword
));
1746 /* Return 1 if there is no conflict in any size on operand J for
1747 instruction template T. */
1750 match_mem_size (const insn_template
*t
, unsigned int j
)
1752 return (match_reg_size (t
, j
)
1753 && !((i
.types
[j
].bitfield
.unspecified
1754 && !t
->operand_types
[j
].bitfield
.unspecified
)
1755 || (i
.types
[j
].bitfield
.fword
1756 && !t
->operand_types
[j
].bitfield
.fword
)
1757 || (i
.types
[j
].bitfield
.tbyte
1758 && !t
->operand_types
[j
].bitfield
.tbyte
)
1759 || (i
.types
[j
].bitfield
.xmmword
1760 && !t
->operand_types
[j
].bitfield
.xmmword
)
1761 || (i
.types
[j
].bitfield
.ymmword
1762 && !t
->operand_types
[j
].bitfield
.ymmword
)
1763 || (i
.types
[j
].bitfield
.zmmword
1764 && !t
->operand_types
[j
].bitfield
.zmmword
)));
1767 /* Return 1 if there is no size conflict on any operands for
1768 instruction template T. */
1771 operand_size_match (const insn_template
*t
)
1776 /* Don't check jump instructions. */
1777 if (t
->opcode_modifier
.jump
1778 || t
->opcode_modifier
.jumpbyte
1779 || t
->opcode_modifier
.jumpdword
1780 || t
->opcode_modifier
.jumpintersegment
)
1783 /* Check memory and accumulator operand size. */
1784 for (j
= 0; j
< i
.operands
; j
++)
1786 if (t
->operand_types
[j
].bitfield
.anysize
)
1789 if (t
->operand_types
[j
].bitfield
.acc
&& !match_reg_size (t
, j
))
1795 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
1804 else if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
1807 i
.error
= operand_size_mismatch
;
1811 /* Check reverse. */
1812 gas_assert (i
.operands
== 2);
1815 for (j
= 0; j
< 2; j
++)
1817 if (t
->operand_types
[j
].bitfield
.acc
1818 && !match_reg_size (t
, j
? 0 : 1))
1821 if (i
.types
[j
].bitfield
.mem
1822 && !match_mem_size (t
, j
? 0 : 1))
1830 operand_type_match (i386_operand_type overlap
,
1831 i386_operand_type given
)
1833 i386_operand_type temp
= overlap
;
1835 temp
.bitfield
.jumpabsolute
= 0;
1836 temp
.bitfield
.unspecified
= 0;
1837 temp
.bitfield
.byte
= 0;
1838 temp
.bitfield
.word
= 0;
1839 temp
.bitfield
.dword
= 0;
1840 temp
.bitfield
.fword
= 0;
1841 temp
.bitfield
.qword
= 0;
1842 temp
.bitfield
.tbyte
= 0;
1843 temp
.bitfield
.xmmword
= 0;
1844 temp
.bitfield
.ymmword
= 0;
1845 temp
.bitfield
.zmmword
= 0;
1846 if (operand_type_all_zero (&temp
))
1849 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
1850 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
1854 i
.error
= operand_type_mismatch
;
1858 /* If given types g0 and g1 are registers they must be of the same type
1859 unless the expected operand type register overlap is null.
1860 Note that Acc in a template matches every size of reg. */
1863 operand_type_register_match (i386_operand_type m0
,
1864 i386_operand_type g0
,
1865 i386_operand_type t0
,
1866 i386_operand_type m1
,
1867 i386_operand_type g1
,
1868 i386_operand_type t1
)
1870 if (!operand_type_check (g0
, reg
))
1873 if (!operand_type_check (g1
, reg
))
1876 if (g0
.bitfield
.reg8
== g1
.bitfield
.reg8
1877 && g0
.bitfield
.reg16
== g1
.bitfield
.reg16
1878 && g0
.bitfield
.reg32
== g1
.bitfield
.reg32
1879 && g0
.bitfield
.reg64
== g1
.bitfield
.reg64
)
1882 if (m0
.bitfield
.acc
)
1884 t0
.bitfield
.reg8
= 1;
1885 t0
.bitfield
.reg16
= 1;
1886 t0
.bitfield
.reg32
= 1;
1887 t0
.bitfield
.reg64
= 1;
1890 if (m1
.bitfield
.acc
)
1892 t1
.bitfield
.reg8
= 1;
1893 t1
.bitfield
.reg16
= 1;
1894 t1
.bitfield
.reg32
= 1;
1895 t1
.bitfield
.reg64
= 1;
1898 if (!(t0
.bitfield
.reg8
& t1
.bitfield
.reg8
)
1899 && !(t0
.bitfield
.reg16
& t1
.bitfield
.reg16
)
1900 && !(t0
.bitfield
.reg32
& t1
.bitfield
.reg32
)
1901 && !(t0
.bitfield
.reg64
& t1
.bitfield
.reg64
))
1904 i
.error
= register_type_mismatch
;
1909 static INLINE
unsigned int
1910 register_number (const reg_entry
*r
)
1912 unsigned int nr
= r
->reg_num
;
1914 if (r
->reg_flags
& RegRex
)
1920 static INLINE
unsigned int
1921 mode_from_disp_size (i386_operand_type t
)
1923 if (t
.bitfield
.disp8
|| t
.bitfield
.vec_disp8
)
1925 else if (t
.bitfield
.disp16
1926 || t
.bitfield
.disp32
1927 || t
.bitfield
.disp32s
)
1934 fits_in_signed_byte (offsetT num
)
1936 return (num
>= -128) && (num
<= 127);
1940 fits_in_unsigned_byte (offsetT num
)
1942 return (num
& 0xff) == num
;
1946 fits_in_unsigned_word (offsetT num
)
1948 return (num
& 0xffff) == num
;
1952 fits_in_signed_word (offsetT num
)
1954 return (-32768 <= num
) && (num
<= 32767);
1958 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED
)
1963 return (!(((offsetT
) -1 << 31) & num
)
1964 || (((offsetT
) -1 << 31) & num
) == ((offsetT
) -1 << 31));
1966 } /* fits_in_signed_long() */
1969 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED
)
1974 return (num
& (((offsetT
) 2 << 31) - 1)) == num
;
1976 } /* fits_in_unsigned_long() */
1979 fits_in_vec_disp8 (offsetT num
)
1981 int shift
= i
.memshift
;
1987 mask
= (1 << shift
) - 1;
1989 /* Return 0 if NUM isn't properly aligned. */
1993 /* Check if NUM will fit in 8bit after shift. */
1994 return fits_in_signed_byte (num
>> shift
);
1998 fits_in_imm4 (offsetT num
)
2000 return (num
& 0xf) == num
;
2003 static i386_operand_type
2004 smallest_imm_type (offsetT num
)
2006 i386_operand_type t
;
2008 operand_type_set (&t
, 0);
2009 t
.bitfield
.imm64
= 1;
2011 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2013 /* This code is disabled on the 486 because all the Imm1 forms
2014 in the opcode table are slower on the i486. They're the
2015 versions with the implicitly specified single-position
2016 displacement, which has another syntax if you really want to
2018 t
.bitfield
.imm1
= 1;
2019 t
.bitfield
.imm8
= 1;
2020 t
.bitfield
.imm8s
= 1;
2021 t
.bitfield
.imm16
= 1;
2022 t
.bitfield
.imm32
= 1;
2023 t
.bitfield
.imm32s
= 1;
2025 else if (fits_in_signed_byte (num
))
2027 t
.bitfield
.imm8
= 1;
2028 t
.bitfield
.imm8s
= 1;
2029 t
.bitfield
.imm16
= 1;
2030 t
.bitfield
.imm32
= 1;
2031 t
.bitfield
.imm32s
= 1;
2033 else if (fits_in_unsigned_byte (num
))
2035 t
.bitfield
.imm8
= 1;
2036 t
.bitfield
.imm16
= 1;
2037 t
.bitfield
.imm32
= 1;
2038 t
.bitfield
.imm32s
= 1;
2040 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2042 t
.bitfield
.imm16
= 1;
2043 t
.bitfield
.imm32
= 1;
2044 t
.bitfield
.imm32s
= 1;
2046 else if (fits_in_signed_long (num
))
2048 t
.bitfield
.imm32
= 1;
2049 t
.bitfield
.imm32s
= 1;
2051 else if (fits_in_unsigned_long (num
))
2052 t
.bitfield
.imm32
= 1;
2058 offset_in_range (offsetT val
, int size
)
2064 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2065 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2066 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2068 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2074 /* If BFD64, sign extend val for 32bit address mode. */
2075 if (flag_code
!= CODE_64BIT
2076 || i
.prefix
[ADDR_PREFIX
])
2077 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2078 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2081 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2083 char buf1
[40], buf2
[40];
2085 sprint_value (buf1
, val
);
2086 sprint_value (buf2
, val
& mask
);
2087 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2101 a. PREFIX_EXIST if attempting to add a prefix where one from the
2102 same class already exists.
2103 b. PREFIX_LOCK if lock prefix is added.
2104 c. PREFIX_REP if rep/repne prefix is added.
2105 d. PREFIX_OTHER if other prefix is added.
2108 static enum PREFIX_GROUP
2109 add_prefix (unsigned int prefix
)
2111 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2114 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2115 && flag_code
== CODE_64BIT
)
2117 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2118 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2119 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2130 case CS_PREFIX_OPCODE
:
2131 case DS_PREFIX_OPCODE
:
2132 case ES_PREFIX_OPCODE
:
2133 case FS_PREFIX_OPCODE
:
2134 case GS_PREFIX_OPCODE
:
2135 case SS_PREFIX_OPCODE
:
2139 case REPNE_PREFIX_OPCODE
:
2140 case REPE_PREFIX_OPCODE
:
2145 case LOCK_PREFIX_OPCODE
:
2154 case ADDR_PREFIX_OPCODE
:
2158 case DATA_PREFIX_OPCODE
:
2162 if (i
.prefix
[q
] != 0)
2170 i
.prefix
[q
] |= prefix
;
2173 as_bad (_("same type of prefix used twice"));
2179 update_code_flag (int value
, int check
)
2181 PRINTF_LIKE ((*as_error
));
2183 flag_code
= (enum flag_code
) value
;
2184 if (flag_code
== CODE_64BIT
)
2186 cpu_arch_flags
.bitfield
.cpu64
= 1;
2187 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2191 cpu_arch_flags
.bitfield
.cpu64
= 0;
2192 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2194 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2197 as_error
= as_fatal
;
2200 (*as_error
) (_("64bit mode not supported on `%s'."),
2201 cpu_arch_name
? cpu_arch_name
: default_arch
);
2203 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2206 as_error
= as_fatal
;
2209 (*as_error
) (_("32bit mode not supported on `%s'."),
2210 cpu_arch_name
? cpu_arch_name
: default_arch
);
2212 stackop_size
= '\0';
2216 set_code_flag (int value
)
2218 update_code_flag (value
, 0);
2222 set_16bit_gcc_code_flag (int new_code_flag
)
2224 flag_code
= (enum flag_code
) new_code_flag
;
2225 if (flag_code
!= CODE_16BIT
)
2227 cpu_arch_flags
.bitfield
.cpu64
= 0;
2228 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2229 stackop_size
= LONG_MNEM_SUFFIX
;
2233 set_intel_syntax (int syntax_flag
)
2235 /* Find out if register prefixing is specified. */
2236 int ask_naked_reg
= 0;
2239 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2241 char *string
= input_line_pointer
;
2242 int e
= get_symbol_end ();
2244 if (strcmp (string
, "prefix") == 0)
2246 else if (strcmp (string
, "noprefix") == 0)
2249 as_bad (_("bad argument to syntax directive."));
2250 *input_line_pointer
= e
;
2252 demand_empty_rest_of_line ();
2254 intel_syntax
= syntax_flag
;
2256 if (ask_naked_reg
== 0)
2257 allow_naked_reg
= (intel_syntax
2258 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2260 allow_naked_reg
= (ask_naked_reg
< 0);
2262 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2264 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2265 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2266 register_prefix
= allow_naked_reg
? "" : "%";
2270 set_intel_mnemonic (int mnemonic_flag
)
2272 intel_mnemonic
= mnemonic_flag
;
2276 set_allow_index_reg (int flag
)
2278 allow_index_reg
= flag
;
2282 set_check (int what
)
2284 enum check_kind
*kind
;
2289 kind
= &operand_check
;
2300 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2302 char *string
= input_line_pointer
;
2303 int e
= get_symbol_end ();
2305 if (strcmp (string
, "none") == 0)
2307 else if (strcmp (string
, "warning") == 0)
2308 *kind
= check_warning
;
2309 else if (strcmp (string
, "error") == 0)
2310 *kind
= check_error
;
2312 as_bad (_("bad argument to %s_check directive."), str
);
2313 *input_line_pointer
= e
;
2316 as_bad (_("missing argument for %s_check directive"), str
);
2318 demand_empty_rest_of_line ();
2322 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2323 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2325 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2326 static const char *arch
;
2328 /* Intel LIOM is only supported on ELF. */
2334 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2335 use default_arch. */
2336 arch
= cpu_arch_name
;
2338 arch
= default_arch
;
2341 /* If we are targeting Intel L1OM, we must enable it. */
2342 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2343 || new_flag
.bitfield
.cpul1om
)
2346 /* If we are targeting Intel K1OM, we must enable it. */
2347 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2348 || new_flag
.bitfield
.cpuk1om
)
2351 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2356 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2360 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2362 char *string
= input_line_pointer
;
2363 int e
= get_symbol_end ();
2365 i386_cpu_flags flags
;
2367 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2369 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2371 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2375 cpu_arch_name
= cpu_arch
[j
].name
;
2376 cpu_sub_arch_name
= NULL
;
2377 cpu_arch_flags
= cpu_arch
[j
].flags
;
2378 if (flag_code
== CODE_64BIT
)
2380 cpu_arch_flags
.bitfield
.cpu64
= 1;
2381 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2385 cpu_arch_flags
.bitfield
.cpu64
= 0;
2386 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2388 cpu_arch_isa
= cpu_arch
[j
].type
;
2389 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2390 if (!cpu_arch_tune_set
)
2392 cpu_arch_tune
= cpu_arch_isa
;
2393 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2398 if (!cpu_arch
[j
].negated
)
2399 flags
= cpu_flags_or (cpu_arch_flags
,
2402 flags
= cpu_flags_and_not (cpu_arch_flags
,
2404 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2406 if (cpu_sub_arch_name
)
2408 char *name
= cpu_sub_arch_name
;
2409 cpu_sub_arch_name
= concat (name
,
2411 (const char *) NULL
);
2415 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2416 cpu_arch_flags
= flags
;
2417 cpu_arch_isa_flags
= flags
;
2419 *input_line_pointer
= e
;
2420 demand_empty_rest_of_line ();
2424 if (j
>= ARRAY_SIZE (cpu_arch
))
2425 as_bad (_("no such architecture: `%s'"), string
);
2427 *input_line_pointer
= e
;
2430 as_bad (_("missing cpu architecture"));
2432 no_cond_jump_promotion
= 0;
2433 if (*input_line_pointer
== ','
2434 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2436 char *string
= ++input_line_pointer
;
2437 int e
= get_symbol_end ();
2439 if (strcmp (string
, "nojumps") == 0)
2440 no_cond_jump_promotion
= 1;
2441 else if (strcmp (string
, "jumps") == 0)
2444 as_bad (_("no such architecture modifier: `%s'"), string
);
2446 *input_line_pointer
= e
;
2449 demand_empty_rest_of_line ();
2452 enum bfd_architecture
2455 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2457 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2458 || flag_code
!= CODE_64BIT
)
2459 as_fatal (_("Intel L1OM is 64bit ELF only"));
2460 return bfd_arch_l1om
;
2462 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2464 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2465 || flag_code
!= CODE_64BIT
)
2466 as_fatal (_("Intel K1OM is 64bit ELF only"));
2467 return bfd_arch_k1om
;
2470 return bfd_arch_i386
;
2476 if (!strncmp (default_arch
, "x86_64", 6))
2478 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2480 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2481 || default_arch
[6] != '\0')
2482 as_fatal (_("Intel L1OM is 64bit ELF only"));
2483 return bfd_mach_l1om
;
2485 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2487 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2488 || default_arch
[6] != '\0')
2489 as_fatal (_("Intel K1OM is 64bit ELF only"));
2490 return bfd_mach_k1om
;
2492 else if (default_arch
[6] == '\0')
2493 return bfd_mach_x86_64
;
2495 return bfd_mach_x64_32
;
2497 else if (!strcmp (default_arch
, "i386"))
2498 return bfd_mach_i386_i386
;
2500 as_fatal (_("unknown architecture"));
2506 const char *hash_err
;
2508 /* Initialize op_hash hash table. */
2509 op_hash
= hash_new ();
2512 const insn_template
*optab
;
2513 templates
*core_optab
;
2515 /* Setup for loop. */
2517 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2518 core_optab
->start
= optab
;
2523 if (optab
->name
== NULL
2524 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2526 /* different name --> ship out current template list;
2527 add to hash table; & begin anew. */
2528 core_optab
->end
= optab
;
2529 hash_err
= hash_insert (op_hash
,
2531 (void *) core_optab
);
2534 as_fatal (_("can't hash %s: %s"),
2538 if (optab
->name
== NULL
)
2540 core_optab
= (templates
*) xmalloc (sizeof (templates
));
2541 core_optab
->start
= optab
;
2546 /* Initialize reg_hash hash table. */
2547 reg_hash
= hash_new ();
2549 const reg_entry
*regtab
;
2550 unsigned int regtab_size
= i386_regtab_size
;
2552 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2554 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2556 as_fatal (_("can't hash %s: %s"),
2562 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2567 for (c
= 0; c
< 256; c
++)
2572 mnemonic_chars
[c
] = c
;
2573 register_chars
[c
] = c
;
2574 operand_chars
[c
] = c
;
2576 else if (ISLOWER (c
))
2578 mnemonic_chars
[c
] = c
;
2579 register_chars
[c
] = c
;
2580 operand_chars
[c
] = c
;
2582 else if (ISUPPER (c
))
2584 mnemonic_chars
[c
] = TOLOWER (c
);
2585 register_chars
[c
] = mnemonic_chars
[c
];
2586 operand_chars
[c
] = c
;
2588 else if (c
== '{' || c
== '}')
2589 operand_chars
[c
] = c
;
2591 if (ISALPHA (c
) || ISDIGIT (c
))
2592 identifier_chars
[c
] = c
;
2595 identifier_chars
[c
] = c
;
2596 operand_chars
[c
] = c
;
2601 identifier_chars
['@'] = '@';
2604 identifier_chars
['?'] = '?';
2605 operand_chars
['?'] = '?';
2607 digit_chars
['-'] = '-';
2608 mnemonic_chars
['_'] = '_';
2609 mnemonic_chars
['-'] = '-';
2610 mnemonic_chars
['.'] = '.';
2611 identifier_chars
['_'] = '_';
2612 identifier_chars
['.'] = '.';
2614 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2615 operand_chars
[(unsigned char) *p
] = *p
;
2618 if (flag_code
== CODE_64BIT
)
2620 #if defined (OBJ_COFF) && defined (TE_PE)
2621 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2624 x86_dwarf2_return_column
= 16;
2626 x86_cie_data_alignment
= -8;
2630 x86_dwarf2_return_column
= 8;
2631 x86_cie_data_alignment
= -4;
2636 i386_print_statistics (FILE *file
)
2638 hash_print_statistics (file
, "i386 opcode", op_hash
);
2639 hash_print_statistics (file
, "i386 register", reg_hash
);
2644 /* Debugging routines for md_assemble. */
2645 static void pte (insn_template
*);
2646 static void pt (i386_operand_type
);
2647 static void pe (expressionS
*);
2648 static void ps (symbolS
*);
2651 pi (char *line
, i386_insn
*x
)
2655 fprintf (stdout
, "%s: template ", line
);
2657 fprintf (stdout
, " address: base %s index %s scale %x\n",
2658 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2659 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2660 x
->log2_scale_factor
);
2661 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2662 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2663 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2664 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2665 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2666 (x
->rex
& REX_W
) != 0,
2667 (x
->rex
& REX_R
) != 0,
2668 (x
->rex
& REX_X
) != 0,
2669 (x
->rex
& REX_B
) != 0);
2670 for (j
= 0; j
< x
->operands
; j
++)
2672 fprintf (stdout
, " #%d: ", j
+ 1);
2674 fprintf (stdout
, "\n");
2675 if (x
->types
[j
].bitfield
.reg8
2676 || x
->types
[j
].bitfield
.reg16
2677 || x
->types
[j
].bitfield
.reg32
2678 || x
->types
[j
].bitfield
.reg64
2679 || x
->types
[j
].bitfield
.regmmx
2680 || x
->types
[j
].bitfield
.regxmm
2681 || x
->types
[j
].bitfield
.regymm
2682 || x
->types
[j
].bitfield
.regzmm
2683 || x
->types
[j
].bitfield
.sreg2
2684 || x
->types
[j
].bitfield
.sreg3
2685 || x
->types
[j
].bitfield
.control
2686 || x
->types
[j
].bitfield
.debug
2687 || x
->types
[j
].bitfield
.test
)
2688 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2689 if (operand_type_check (x
->types
[j
], imm
))
2691 if (operand_type_check (x
->types
[j
], disp
))
2692 pe (x
->op
[j
].disps
);
2697 pte (insn_template
*t
)
2700 fprintf (stdout
, " %d operands ", t
->operands
);
2701 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2702 if (t
->extension_opcode
!= None
)
2703 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2704 if (t
->opcode_modifier
.d
)
2705 fprintf (stdout
, "D");
2706 if (t
->opcode_modifier
.w
)
2707 fprintf (stdout
, "W");
2708 fprintf (stdout
, "\n");
2709 for (j
= 0; j
< t
->operands
; j
++)
2711 fprintf (stdout
, " #%d type ", j
+ 1);
2712 pt (t
->operand_types
[j
]);
2713 fprintf (stdout
, "\n");
2720 fprintf (stdout
, " operation %d\n", e
->X_op
);
2721 fprintf (stdout
, " add_number %ld (%lx)\n",
2722 (long) e
->X_add_number
, (long) e
->X_add_number
);
2723 if (e
->X_add_symbol
)
2725 fprintf (stdout
, " add_symbol ");
2726 ps (e
->X_add_symbol
);
2727 fprintf (stdout
, "\n");
2731 fprintf (stdout
, " op_symbol ");
2732 ps (e
->X_op_symbol
);
2733 fprintf (stdout
, "\n");
2740 fprintf (stdout
, "%s type %s%s",
2742 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
2743 segment_name (S_GET_SEGMENT (s
)));
2746 static struct type_name
2748 i386_operand_type mask
;
2751 const type_names
[] =
2753 { OPERAND_TYPE_REG8
, "r8" },
2754 { OPERAND_TYPE_REG16
, "r16" },
2755 { OPERAND_TYPE_REG32
, "r32" },
2756 { OPERAND_TYPE_REG64
, "r64" },
2757 { OPERAND_TYPE_IMM8
, "i8" },
2758 { OPERAND_TYPE_IMM8
, "i8s" },
2759 { OPERAND_TYPE_IMM16
, "i16" },
2760 { OPERAND_TYPE_IMM32
, "i32" },
2761 { OPERAND_TYPE_IMM32S
, "i32s" },
2762 { OPERAND_TYPE_IMM64
, "i64" },
2763 { OPERAND_TYPE_IMM1
, "i1" },
2764 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
2765 { OPERAND_TYPE_DISP8
, "d8" },
2766 { OPERAND_TYPE_DISP16
, "d16" },
2767 { OPERAND_TYPE_DISP32
, "d32" },
2768 { OPERAND_TYPE_DISP32S
, "d32s" },
2769 { OPERAND_TYPE_DISP64
, "d64" },
2770 { OPERAND_TYPE_VEC_DISP8
, "Vector d8" },
2771 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
2772 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
2773 { OPERAND_TYPE_CONTROL
, "control reg" },
2774 { OPERAND_TYPE_TEST
, "test reg" },
2775 { OPERAND_TYPE_DEBUG
, "debug reg" },
2776 { OPERAND_TYPE_FLOATREG
, "FReg" },
2777 { OPERAND_TYPE_FLOATACC
, "FAcc" },
2778 { OPERAND_TYPE_SREG2
, "SReg2" },
2779 { OPERAND_TYPE_SREG3
, "SReg3" },
2780 { OPERAND_TYPE_ACC
, "Acc" },
2781 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
2782 { OPERAND_TYPE_REGMMX
, "rMMX" },
2783 { OPERAND_TYPE_REGXMM
, "rXMM" },
2784 { OPERAND_TYPE_REGYMM
, "rYMM" },
2785 { OPERAND_TYPE_REGZMM
, "rZMM" },
2786 { OPERAND_TYPE_REGMASK
, "Mask reg" },
2787 { OPERAND_TYPE_ESSEG
, "es" },
2791 pt (i386_operand_type t
)
2794 i386_operand_type a
;
2796 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
2798 a
= operand_type_and (t
, type_names
[j
].mask
);
2799 if (!operand_type_all_zero (&a
))
2800 fprintf (stdout
, "%s, ", type_names
[j
].name
);
2805 #endif /* DEBUG386 */
2807 static bfd_reloc_code_real_type
2808 reloc (unsigned int size
,
2812 bfd_reloc_code_real_type other
)
2814 if (other
!= NO_RELOC
)
2816 reloc_howto_type
*rel
;
2821 case BFD_RELOC_X86_64_GOT32
:
2822 return BFD_RELOC_X86_64_GOT64
;
2824 case BFD_RELOC_X86_64_PLTOFF64
:
2825 return BFD_RELOC_X86_64_PLTOFF64
;
2827 case BFD_RELOC_X86_64_GOTPC32
:
2828 other
= BFD_RELOC_X86_64_GOTPC64
;
2830 case BFD_RELOC_X86_64_GOTPCREL
:
2831 other
= BFD_RELOC_X86_64_GOTPCREL64
;
2833 case BFD_RELOC_X86_64_TPOFF32
:
2834 other
= BFD_RELOC_X86_64_TPOFF64
;
2836 case BFD_RELOC_X86_64_DTPOFF32
:
2837 other
= BFD_RELOC_X86_64_DTPOFF64
;
2843 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2844 if (other
== BFD_RELOC_SIZE32
)
2847 return BFD_RELOC_SIZE64
;
2849 as_bad (_("there are no pc-relative size relocations"));
2853 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2854 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
2857 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
2859 as_bad (_("unknown relocation (%u)"), other
);
2860 else if (size
!= bfd_get_reloc_size (rel
))
2861 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2862 bfd_get_reloc_size (rel
),
2864 else if (pcrel
&& !rel
->pc_relative
)
2865 as_bad (_("non-pc-relative relocation for pc-relative field"));
2866 else if ((rel
->complain_on_overflow
== complain_overflow_signed
2868 || (rel
->complain_on_overflow
== complain_overflow_unsigned
2870 as_bad (_("relocated field and relocation type differ in signedness"));
2879 as_bad (_("there are no unsigned pc-relative relocations"));
2882 case 1: return BFD_RELOC_8_PCREL
;
2883 case 2: return BFD_RELOC_16_PCREL
;
2884 case 4: return (bnd_prefix
&& object_64bit
2885 ? BFD_RELOC_X86_64_PC32_BND
2886 : BFD_RELOC_32_PCREL
);
2887 case 8: return BFD_RELOC_64_PCREL
;
2889 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
2896 case 4: return BFD_RELOC_X86_64_32S
;
2901 case 1: return BFD_RELOC_8
;
2902 case 2: return BFD_RELOC_16
;
2903 case 4: return BFD_RELOC_32
;
2904 case 8: return BFD_RELOC_64
;
2906 as_bad (_("cannot do %s %u byte relocation"),
2907 sign
> 0 ? "signed" : "unsigned", size
);
2913 /* Here we decide which fixups can be adjusted to make them relative to
2914 the beginning of the section instead of the symbol. Basically we need
2915 to make sure that the dynamic relocations are done correctly, so in
2916 some cases we force the original symbol to be used. */
2919 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
2921 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2925 /* Don't adjust pc-relative references to merge sections in 64-bit
2927 if (use_rela_relocations
2928 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
2932 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2933 and changed later by validate_fix. */
2934 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
2935 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
2938 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2939 for size relocations. */
2940 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
2941 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
2942 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
2943 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
2944 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
2945 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
2946 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
2947 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
2948 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
2949 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
2950 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
2951 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
2952 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
2953 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
2954 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
2955 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
2956 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
2957 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
2958 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
2959 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
2960 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
2961 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
2962 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
2963 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
2964 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
2965 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
2966 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
2967 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
2968 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
2969 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
2976 intel_float_operand (const char *mnemonic
)
2978 /* Note that the value returned is meaningful only for opcodes with (memory)
2979 operands, hence the code here is free to improperly handle opcodes that
2980 have no operands (for better performance and smaller code). */
2982 if (mnemonic
[0] != 'f')
2983 return 0; /* non-math */
2985 switch (mnemonic
[1])
2987 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2988 the fs segment override prefix not currently handled because no
2989 call path can make opcodes without operands get here */
2991 return 2 /* integer op */;
2993 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
2994 return 3; /* fldcw/fldenv */
2997 if (mnemonic
[2] != 'o' /* fnop */)
2998 return 3; /* non-waiting control op */
3001 if (mnemonic
[2] == 's')
3002 return 3; /* frstor/frstpm */
3005 if (mnemonic
[2] == 'a')
3006 return 3; /* fsave */
3007 if (mnemonic
[2] == 't')
3009 switch (mnemonic
[3])
3011 case 'c': /* fstcw */
3012 case 'd': /* fstdw */
3013 case 'e': /* fstenv */
3014 case 's': /* fsts[gw] */
3020 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3021 return 0; /* fxsave/fxrstor are not really math ops */
3028 /* Build the VEX prefix. */
3031 build_vex_prefix (const insn_template
*t
)
3033 unsigned int register_specifier
;
3034 unsigned int implied_prefix
;
3035 unsigned int vector_length
;
3037 /* Check register specifier. */
3038 if (i
.vex
.register_specifier
)
3040 register_specifier
=
3041 ~register_number (i
.vex
.register_specifier
) & 0xf;
3042 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3045 register_specifier
= 0xf;
3047 /* Use 2-byte VEX prefix by swappping destination and source
3050 && i
.operands
== i
.reg_operands
3051 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3052 && i
.tm
.opcode_modifier
.s
3055 unsigned int xchg
= i
.operands
- 1;
3056 union i386_op temp_op
;
3057 i386_operand_type temp_type
;
3059 temp_type
= i
.types
[xchg
];
3060 i
.types
[xchg
] = i
.types
[0];
3061 i
.types
[0] = temp_type
;
3062 temp_op
= i
.op
[xchg
];
3063 i
.op
[xchg
] = i
.op
[0];
3066 gas_assert (i
.rm
.mode
== 3);
3070 i
.rm
.regmem
= i
.rm
.reg
;
3073 /* Use the next insn. */
3077 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3078 vector_length
= avxscalar
;
3080 vector_length
= i
.tm
.opcode_modifier
.vex
== VEX256
? 1 : 0;
3082 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3087 case DATA_PREFIX_OPCODE
:
3090 case REPE_PREFIX_OPCODE
:
3093 case REPNE_PREFIX_OPCODE
:
3100 /* Use 2-byte VEX prefix if possible. */
3101 if (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3102 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3103 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3105 /* 2-byte VEX prefix. */
3109 i
.vex
.bytes
[0] = 0xc5;
3111 /* Check the REX.R bit. */
3112 r
= (i
.rex
& REX_R
) ? 0 : 1;
3113 i
.vex
.bytes
[1] = (r
<< 7
3114 | register_specifier
<< 3
3115 | vector_length
<< 2
3120 /* 3-byte VEX prefix. */
3125 switch (i
.tm
.opcode_modifier
.vexopcode
)
3129 i
.vex
.bytes
[0] = 0xc4;
3133 i
.vex
.bytes
[0] = 0xc4;
3137 i
.vex
.bytes
[0] = 0xc4;
3141 i
.vex
.bytes
[0] = 0x8f;
3145 i
.vex
.bytes
[0] = 0x8f;
3149 i
.vex
.bytes
[0] = 0x8f;
3155 /* The high 3 bits of the second VEX byte are 1's compliment
3156 of RXB bits from REX. */
3157 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3159 /* Check the REX.W bit. */
3160 w
= (i
.rex
& REX_W
) ? 1 : 0;
3161 if (i
.tm
.opcode_modifier
.vexw
)
3166 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3170 i
.vex
.bytes
[2] = (w
<< 7
3171 | register_specifier
<< 3
3172 | vector_length
<< 2
3177 /* Build the EVEX prefix. */
3180 build_evex_prefix (void)
3182 unsigned int register_specifier
;
3183 unsigned int implied_prefix
;
3185 rex_byte vrex_used
= 0;
3187 /* Check register specifier. */
3188 if (i
.vex
.register_specifier
)
3190 gas_assert ((i
.vrex
& REX_X
) == 0);
3192 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3193 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3194 register_specifier
+= 8;
3195 /* The upper 16 registers are encoded in the fourth byte of the
3197 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3198 i
.vex
.bytes
[3] = 0x8;
3199 register_specifier
= ~register_specifier
& 0xf;
3203 register_specifier
= 0xf;
3205 /* Encode upper 16 vector index register in the fourth byte of
3207 if (!(i
.vrex
& REX_X
))
3208 i
.vex
.bytes
[3] = 0x8;
3213 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3218 case DATA_PREFIX_OPCODE
:
3221 case REPE_PREFIX_OPCODE
:
3224 case REPNE_PREFIX_OPCODE
:
3231 /* 4 byte EVEX prefix. */
3233 i
.vex
.bytes
[0] = 0x62;
3236 switch (i
.tm
.opcode_modifier
.vexopcode
)
3252 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3254 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3256 /* The fifth bit of the second EVEX byte is 1's compliment of the
3257 REX_R bit in VREX. */
3258 if (!(i
.vrex
& REX_R
))
3259 i
.vex
.bytes
[1] |= 0x10;
3263 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3265 /* When all operands are registers, the REX_X bit in REX is not
3266 used. We reuse it to encode the upper 16 registers, which is
3267 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3268 as 1's compliment. */
3269 if ((i
.vrex
& REX_B
))
3272 i
.vex
.bytes
[1] &= ~0x40;
3276 /* EVEX instructions shouldn't need the REX prefix. */
3277 i
.vrex
&= ~vrex_used
;
3278 gas_assert (i
.vrex
== 0);
3280 /* Check the REX.W bit. */
3281 w
= (i
.rex
& REX_W
) ? 1 : 0;
3282 if (i
.tm
.opcode_modifier
.vexw
)
3284 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3287 /* If w is not set it means we are dealing with WIG instruction. */
3290 if (evexwig
== evexw1
)
3294 /* Encode the U bit. */
3295 implied_prefix
|= 0x4;
3297 /* The third byte of the EVEX prefix. */
3298 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3300 /* The fourth byte of the EVEX prefix. */
3301 /* The zeroing-masking bit. */
3302 if (i
.mask
&& i
.mask
->zeroing
)
3303 i
.vex
.bytes
[3] |= 0x80;
3305 /* Don't always set the broadcast bit if there is no RC. */
3308 /* Encode the vector length. */
3309 unsigned int vec_length
;
3311 switch (i
.tm
.opcode_modifier
.evex
)
3313 case EVEXLIG
: /* LL' is ignored */
3314 vec_length
= evexlig
<< 5;
3317 vec_length
= 0 << 5;
3320 vec_length
= 1 << 5;
3323 vec_length
= 2 << 5;
3329 i
.vex
.bytes
[3] |= vec_length
;
3330 /* Encode the broadcast bit. */
3332 i
.vex
.bytes
[3] |= 0x10;
3336 if (i
.rounding
->type
!= saeonly
)
3337 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3339 i
.vex
.bytes
[3] |= 0x10;
3342 if (i
.mask
&& i
.mask
->mask
)
3343 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3347 process_immext (void)
3351 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3354 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3355 with an opcode suffix which is coded in the same place as an
3356 8-bit immediate field would be.
3357 Here we check those operands and remove them afterwards. */
3360 for (x
= 0; x
< i
.operands
; x
++)
3361 if (register_number (i
.op
[x
].regs
) != x
)
3362 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3363 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3369 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3370 which is coded in the same place as an 8-bit immediate field
3371 would be. Here we fake an 8-bit immediate operand from the
3372 opcode suffix stored in tm.extension_opcode.
3374 AVX instructions also use this encoding, for some of
3375 3 argument instructions. */
3377 gas_assert (i
.imm_operands
<= 1
3379 || ((i
.tm
.opcode_modifier
.vex
3380 || i
.tm
.opcode_modifier
.evex
)
3381 && i
.operands
<= 4)));
3383 exp
= &im_expressions
[i
.imm_operands
++];
3384 i
.op
[i
.operands
].imms
= exp
;
3385 i
.types
[i
.operands
] = imm8
;
3387 exp
->X_op
= O_constant
;
3388 exp
->X_add_number
= i
.tm
.extension_opcode
;
3389 i
.tm
.extension_opcode
= None
;
3396 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3401 as_bad (_("invalid instruction `%s' after `%s'"),
3402 i
.tm
.name
, i
.hle_prefix
);
3405 if (i
.prefix
[LOCK_PREFIX
])
3407 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3411 case HLEPrefixRelease
:
3412 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3414 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3418 if (i
.mem_operands
== 0
3419 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3421 as_bad (_("memory destination needed for instruction `%s'"
3422 " after `xrelease'"), i
.tm
.name
);
3429 /* This is the guts of the machine-dependent assembler. LINE points to a
3430 machine dependent instruction. This function is supposed to emit
3431 the frags/bytes it assembles to. */
3434 md_assemble (char *line
)
3437 char mnemonic
[MAX_MNEM_SIZE
];
3438 const insn_template
*t
;
3440 /* Initialize globals. */
3441 memset (&i
, '\0', sizeof (i
));
3442 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3443 i
.reloc
[j
] = NO_RELOC
;
3444 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3445 memset (im_expressions
, '\0', sizeof (im_expressions
));
3446 save_stack_p
= save_stack
;
3448 /* First parse an instruction mnemonic & call i386_operand for the operands.
3449 We assume that the scrubber has arranged it so that line[0] is the valid
3450 start of a (possibly prefixed) mnemonic. */
3452 line
= parse_insn (line
, mnemonic
);
3456 line
= parse_operands (line
, mnemonic
);
3461 /* Now we've parsed the mnemonic into a set of templates, and have the
3462 operands at hand. */
3464 /* All intel opcodes have reversed operands except for "bound" and
3465 "enter". We also don't reverse intersegment "jmp" and "call"
3466 instructions with 2 immediate operands so that the immediate segment
3467 precedes the offset, as it does when in AT&T mode. */
3470 && (strcmp (mnemonic
, "bound") != 0)
3471 && (strcmp (mnemonic
, "invlpga") != 0)
3472 && !(operand_type_check (i
.types
[0], imm
)
3473 && operand_type_check (i
.types
[1], imm
)))
3476 /* The order of the immediates should be reversed
3477 for 2 immediates extrq and insertq instructions */
3478 if (i
.imm_operands
== 2
3479 && (strcmp (mnemonic
, "extrq") == 0
3480 || strcmp (mnemonic
, "insertq") == 0))
3481 swap_2_operands (0, 1);
3486 /* Don't optimize displacement for movabs since it only takes 64bit
3489 && i
.disp_encoding
!= disp_encoding_32bit
3490 && (flag_code
!= CODE_64BIT
3491 || strcmp (mnemonic
, "movabs") != 0))
3494 /* Next, we find a template that matches the given insn,
3495 making sure the overlap of the given operands types is consistent
3496 with the template operand types. */
3498 if (!(t
= match_template ()))
3501 if (sse_check
!= check_none
3502 && !i
.tm
.opcode_modifier
.noavx
3503 && (i
.tm
.cpu_flags
.bitfield
.cpusse
3504 || i
.tm
.cpu_flags
.bitfield
.cpusse2
3505 || i
.tm
.cpu_flags
.bitfield
.cpusse3
3506 || i
.tm
.cpu_flags
.bitfield
.cpussse3
3507 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
3508 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
))
3510 (sse_check
== check_warning
3512 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
3515 /* Zap movzx and movsx suffix. The suffix has been set from
3516 "word ptr" or "byte ptr" on the source operand in Intel syntax
3517 or extracted from mnemonic in AT&T syntax. But we'll use
3518 the destination register to choose the suffix for encoding. */
3519 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
3521 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3522 there is no suffix, the default will be byte extension. */
3523 if (i
.reg_operands
!= 2
3526 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
3531 if (i
.tm
.opcode_modifier
.fwait
)
3532 if (!add_prefix (FWAIT_OPCODE
))
3535 /* Check if REP prefix is OK. */
3536 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
3538 as_bad (_("invalid instruction `%s' after `%s'"),
3539 i
.tm
.name
, i
.rep_prefix
);
3543 /* Check for lock without a lockable instruction. Destination operand
3544 must be memory unless it is xchg (0x86). */
3545 if (i
.prefix
[LOCK_PREFIX
]
3546 && (!i
.tm
.opcode_modifier
.islockable
3547 || i
.mem_operands
== 0
3548 || (i
.tm
.base_opcode
!= 0x86
3549 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
3551 as_bad (_("expecting lockable instruction after `lock'"));
3555 /* Check if HLE prefix is OK. */
3556 if (i
.hle_prefix
&& !check_hle ())
3559 /* Check BND prefix. */
3560 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
3561 as_bad (_("expecting valid branch instruction after `bnd'"));
3563 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
3564 && flag_code
== CODE_64BIT
3565 && i
.prefix
[ADDR_PREFIX
])
3566 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3568 /* Insert BND prefix. */
3570 && i
.tm
.opcode_modifier
.bndprefixok
3571 && !i
.prefix
[BND_PREFIX
])
3572 add_prefix (BND_PREFIX_OPCODE
);
3574 /* Check string instruction segment overrides. */
3575 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
3577 if (!check_string ())
3579 i
.disp_operands
= 0;
3582 if (!process_suffix ())
3585 /* Update operand types. */
3586 for (j
= 0; j
< i
.operands
; j
++)
3587 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
3589 /* Make still unresolved immediate matches conform to size of immediate
3590 given in i.suffix. */
3591 if (!finalize_imm ())
3594 if (i
.types
[0].bitfield
.imm1
)
3595 i
.imm_operands
= 0; /* kludge for shift insns. */
3597 /* We only need to check those implicit registers for instructions
3598 with 3 operands or less. */
3599 if (i
.operands
<= 3)
3600 for (j
= 0; j
< i
.operands
; j
++)
3601 if (i
.types
[j
].bitfield
.inoutportreg
3602 || i
.types
[j
].bitfield
.shiftcount
3603 || i
.types
[j
].bitfield
.acc
3604 || i
.types
[j
].bitfield
.floatacc
)
3607 /* ImmExt should be processed after SSE2AVX. */
3608 if (!i
.tm
.opcode_modifier
.sse2avx
3609 && i
.tm
.opcode_modifier
.immext
)
3612 /* For insns with operands there are more diddles to do to the opcode. */
3615 if (!process_operands ())
3618 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
3620 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3621 as_warn (_("translating to `%sp'"), i
.tm
.name
);
3624 if (i
.tm
.opcode_modifier
.vex
)
3625 build_vex_prefix (t
);
3627 if (i
.tm
.opcode_modifier
.evex
)
3628 build_evex_prefix ();
3630 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3631 instructions may define INT_OPCODE as well, so avoid this corner
3632 case for those instructions that use MODRM. */
3633 if (i
.tm
.base_opcode
== INT_OPCODE
3634 && !i
.tm
.opcode_modifier
.modrm
3635 && i
.op
[0].imms
->X_add_number
== 3)
3637 i
.tm
.base_opcode
= INT3_OPCODE
;
3641 if ((i
.tm
.opcode_modifier
.jump
3642 || i
.tm
.opcode_modifier
.jumpbyte
3643 || i
.tm
.opcode_modifier
.jumpdword
)
3644 && i
.op
[0].disps
->X_op
== O_constant
)
3646 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3647 the absolute address given by the constant. Since ix86 jumps and
3648 calls are pc relative, we need to generate a reloc. */
3649 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
3650 i
.op
[0].disps
->X_op
= O_symbol
;
3653 if (i
.tm
.opcode_modifier
.rex64
)
3656 /* For 8 bit registers we need an empty rex prefix. Also if the
3657 instruction already has a prefix, we need to convert old
3658 registers to new ones. */
3660 if ((i
.types
[0].bitfield
.reg8
3661 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
3662 || (i
.types
[1].bitfield
.reg8
3663 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
3664 || ((i
.types
[0].bitfield
.reg8
3665 || i
.types
[1].bitfield
.reg8
)
3670 i
.rex
|= REX_OPCODE
;
3671 for (x
= 0; x
< 2; x
++)
3673 /* Look for 8 bit operand that uses old registers. */
3674 if (i
.types
[x
].bitfield
.reg8
3675 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
3677 /* In case it is "hi" register, give up. */
3678 if (i
.op
[x
].regs
->reg_num
> 3)
3679 as_bad (_("can't encode register '%s%s' in an "
3680 "instruction requiring REX prefix."),
3681 register_prefix
, i
.op
[x
].regs
->reg_name
);
3683 /* Otherwise it is equivalent to the extended register.
3684 Since the encoding doesn't change this is merely
3685 cosmetic cleanup for debug output. */
3687 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
3693 add_prefix (REX_OPCODE
| i
.rex
);
3695 /* We are ready to output the insn. */
3700 parse_insn (char *line
, char *mnemonic
)
3703 char *token_start
= l
;
3706 const insn_template
*t
;
3712 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
3717 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
3719 as_bad (_("no such instruction: `%s'"), token_start
);
3724 if (!is_space_char (*l
)
3725 && *l
!= END_OF_INSN
3727 || (*l
!= PREFIX_SEPARATOR
3730 as_bad (_("invalid character %s in mnemonic"),
3731 output_invalid (*l
));
3734 if (token_start
== l
)
3736 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
3737 as_bad (_("expecting prefix; got nothing"));
3739 as_bad (_("expecting mnemonic; got nothing"));
3743 /* Look up instruction (or prefix) via hash table. */
3744 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3746 if (*l
!= END_OF_INSN
3747 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
3748 && current_templates
3749 && current_templates
->start
->opcode_modifier
.isprefix
)
3751 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
3753 as_bad ((flag_code
!= CODE_64BIT
3754 ? _("`%s' is only supported in 64-bit mode")
3755 : _("`%s' is not supported in 64-bit mode")),
3756 current_templates
->start
->name
);
3759 /* If we are in 16-bit mode, do not allow addr16 or data16.
3760 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3761 if ((current_templates
->start
->opcode_modifier
.size16
3762 || current_templates
->start
->opcode_modifier
.size32
)
3763 && flag_code
!= CODE_64BIT
3764 && (current_templates
->start
->opcode_modifier
.size32
3765 ^ (flag_code
== CODE_16BIT
)))
3767 as_bad (_("redundant %s prefix"),
3768 current_templates
->start
->name
);
3771 /* Add prefix, checking for repeated prefixes. */
3772 switch (add_prefix (current_templates
->start
->base_opcode
))
3777 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
3778 i
.hle_prefix
= current_templates
->start
->name
;
3779 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
3780 i
.bnd_prefix
= current_templates
->start
->name
;
3782 i
.rep_prefix
= current_templates
->start
->name
;
3787 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3794 if (!current_templates
)
3796 /* Check if we should swap operand or force 32bit displacement in
3798 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
3800 else if (mnem_p
- 3 == dot_p
3803 i
.disp_encoding
= disp_encoding_8bit
;
3804 else if (mnem_p
- 4 == dot_p
3808 i
.disp_encoding
= disp_encoding_32bit
;
3813 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
3816 if (!current_templates
)
3819 /* See if we can get a match by trimming off a suffix. */
3822 case WORD_MNEM_SUFFIX
:
3823 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
3824 i
.suffix
= SHORT_MNEM_SUFFIX
;
3826 case BYTE_MNEM_SUFFIX
:
3827 case QWORD_MNEM_SUFFIX
:
3828 i
.suffix
= mnem_p
[-1];
3830 current_templates
= (const templates
*) hash_find (op_hash
,
3833 case SHORT_MNEM_SUFFIX
:
3834 case LONG_MNEM_SUFFIX
:
3837 i
.suffix
= mnem_p
[-1];
3839 current_templates
= (const templates
*) hash_find (op_hash
,
3848 if (intel_float_operand (mnemonic
) == 1)
3849 i
.suffix
= SHORT_MNEM_SUFFIX
;
3851 i
.suffix
= LONG_MNEM_SUFFIX
;
3853 current_templates
= (const templates
*) hash_find (op_hash
,
3858 if (!current_templates
)
3860 as_bad (_("no such instruction: `%s'"), token_start
);
3865 if (current_templates
->start
->opcode_modifier
.jump
3866 || current_templates
->start
->opcode_modifier
.jumpbyte
)
3868 /* Check for a branch hint. We allow ",pt" and ",pn" for
3869 predict taken and predict not taken respectively.
3870 I'm not sure that branch hints actually do anything on loop
3871 and jcxz insns (JumpByte) for current Pentium4 chips. They
3872 may work in the future and it doesn't hurt to accept them
3874 if (l
[0] == ',' && l
[1] == 'p')
3878 if (!add_prefix (DS_PREFIX_OPCODE
))
3882 else if (l
[2] == 'n')
3884 if (!add_prefix (CS_PREFIX_OPCODE
))
3890 /* Any other comma loses. */
3893 as_bad (_("invalid character %s in mnemonic"),
3894 output_invalid (*l
));
3898 /* Check if instruction is supported on specified architecture. */
3900 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
3902 supported
|= cpu_flags_match (t
);
3903 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
3907 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
3909 as_bad (flag_code
== CODE_64BIT
3910 ? _("`%s' is not supported in 64-bit mode")
3911 : _("`%s' is only supported in 64-bit mode"),
3912 current_templates
->start
->name
);
3915 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
3917 as_bad (_("`%s' is not supported on `%s%s'"),
3918 current_templates
->start
->name
,
3919 cpu_arch_name
? cpu_arch_name
: default_arch
,
3920 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
3925 if (!cpu_arch_flags
.bitfield
.cpui386
3926 && (flag_code
!= CODE_16BIT
))
3928 as_warn (_("use .code16 to ensure correct addressing mode"));
3935 parse_operands (char *l
, const char *mnemonic
)
3939 /* 1 if operand is pending after ','. */
3940 unsigned int expecting_operand
= 0;
3942 /* Non-zero if operand parens not balanced. */
3943 unsigned int paren_not_balanced
;
3945 while (*l
!= END_OF_INSN
)
3947 /* Skip optional white space before operand. */
3948 if (is_space_char (*l
))
3950 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
)
3952 as_bad (_("invalid character %s before operand %d"),
3953 output_invalid (*l
),
3957 token_start
= l
; /* after white space */
3958 paren_not_balanced
= 0;
3959 while (paren_not_balanced
|| *l
!= ',')
3961 if (*l
== END_OF_INSN
)
3963 if (paren_not_balanced
)
3966 as_bad (_("unbalanced parenthesis in operand %d."),
3969 as_bad (_("unbalanced brackets in operand %d."),
3974 break; /* we are done */
3976 else if (!is_operand_char (*l
) && !is_space_char (*l
))
3978 as_bad (_("invalid character %s in operand %d"),
3979 output_invalid (*l
),
3986 ++paren_not_balanced
;
3988 --paren_not_balanced
;
3993 ++paren_not_balanced
;
3995 --paren_not_balanced
;
3999 if (l
!= token_start
)
4000 { /* Yes, we've read in another operand. */
4001 unsigned int operand_ok
;
4002 this_operand
= i
.operands
++;
4003 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4004 if (i
.operands
> MAX_OPERANDS
)
4006 as_bad (_("spurious operands; (%d operands/instruction max)"),
4010 /* Now parse operand adding info to 'i' as we go along. */
4011 END_STRING_AND_SAVE (l
);
4015 i386_intel_operand (token_start
,
4016 intel_float_operand (mnemonic
));
4018 operand_ok
= i386_att_operand (token_start
);
4020 RESTORE_END_STRING (l
);
4026 if (expecting_operand
)
4028 expecting_operand_after_comma
:
4029 as_bad (_("expecting operand after ','; got nothing"));
4034 as_bad (_("expecting operand before ','; got nothing"));
4039 /* Now *l must be either ',' or END_OF_INSN. */
4042 if (*++l
== END_OF_INSN
)
4044 /* Just skip it, if it's \n complain. */
4045 goto expecting_operand_after_comma
;
4047 expecting_operand
= 1;
4054 swap_2_operands (int xchg1
, int xchg2
)
4056 union i386_op temp_op
;
4057 i386_operand_type temp_type
;
4058 enum bfd_reloc_code_real temp_reloc
;
4060 temp_type
= i
.types
[xchg2
];
4061 i
.types
[xchg2
] = i
.types
[xchg1
];
4062 i
.types
[xchg1
] = temp_type
;
4063 temp_op
= i
.op
[xchg2
];
4064 i
.op
[xchg2
] = i
.op
[xchg1
];
4065 i
.op
[xchg1
] = temp_op
;
4066 temp_reloc
= i
.reloc
[xchg2
];
4067 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4068 i
.reloc
[xchg1
] = temp_reloc
;
4072 if (i
.mask
->operand
== xchg1
)
4073 i
.mask
->operand
= xchg2
;
4074 else if (i
.mask
->operand
== xchg2
)
4075 i
.mask
->operand
= xchg1
;
4079 if (i
.broadcast
->operand
== xchg1
)
4080 i
.broadcast
->operand
= xchg2
;
4081 else if (i
.broadcast
->operand
== xchg2
)
4082 i
.broadcast
->operand
= xchg1
;
4086 if (i
.rounding
->operand
== xchg1
)
4087 i
.rounding
->operand
= xchg2
;
4088 else if (i
.rounding
->operand
== xchg2
)
4089 i
.rounding
->operand
= xchg1
;
4094 swap_operands (void)
4100 swap_2_operands (1, i
.operands
- 2);
4103 swap_2_operands (0, i
.operands
- 1);
4109 if (i
.mem_operands
== 2)
4111 const seg_entry
*temp_seg
;
4112 temp_seg
= i
.seg
[0];
4113 i
.seg
[0] = i
.seg
[1];
4114 i
.seg
[1] = temp_seg
;
4118 /* Try to ensure constant immediates are represented in the smallest
4123 char guess_suffix
= 0;
4127 guess_suffix
= i
.suffix
;
4128 else if (i
.reg_operands
)
4130 /* Figure out a suffix from the last register operand specified.
4131 We can't do this properly yet, ie. excluding InOutPortReg,
4132 but the following works for instructions with immediates.
4133 In any case, we can't set i.suffix yet. */
4134 for (op
= i
.operands
; --op
>= 0;)
4135 if (i
.types
[op
].bitfield
.reg8
)
4137 guess_suffix
= BYTE_MNEM_SUFFIX
;
4140 else if (i
.types
[op
].bitfield
.reg16
)
4142 guess_suffix
= WORD_MNEM_SUFFIX
;
4145 else if (i
.types
[op
].bitfield
.reg32
)
4147 guess_suffix
= LONG_MNEM_SUFFIX
;
4150 else if (i
.types
[op
].bitfield
.reg64
)
4152 guess_suffix
= QWORD_MNEM_SUFFIX
;
4156 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4157 guess_suffix
= WORD_MNEM_SUFFIX
;
4159 for (op
= i
.operands
; --op
>= 0;)
4160 if (operand_type_check (i
.types
[op
], imm
))
4162 switch (i
.op
[op
].imms
->X_op
)
4165 /* If a suffix is given, this operand may be shortened. */
4166 switch (guess_suffix
)
4168 case LONG_MNEM_SUFFIX
:
4169 i
.types
[op
].bitfield
.imm32
= 1;
4170 i
.types
[op
].bitfield
.imm64
= 1;
4172 case WORD_MNEM_SUFFIX
:
4173 i
.types
[op
].bitfield
.imm16
= 1;
4174 i
.types
[op
].bitfield
.imm32
= 1;
4175 i
.types
[op
].bitfield
.imm32s
= 1;
4176 i
.types
[op
].bitfield
.imm64
= 1;
4178 case BYTE_MNEM_SUFFIX
:
4179 i
.types
[op
].bitfield
.imm8
= 1;
4180 i
.types
[op
].bitfield
.imm8s
= 1;
4181 i
.types
[op
].bitfield
.imm16
= 1;
4182 i
.types
[op
].bitfield
.imm32
= 1;
4183 i
.types
[op
].bitfield
.imm32s
= 1;
4184 i
.types
[op
].bitfield
.imm64
= 1;
4188 /* If this operand is at most 16 bits, convert it
4189 to a signed 16 bit number before trying to see
4190 whether it will fit in an even smaller size.
4191 This allows a 16-bit operand such as $0xffe0 to
4192 be recognised as within Imm8S range. */
4193 if ((i
.types
[op
].bitfield
.imm16
)
4194 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4196 i
.op
[op
].imms
->X_add_number
=
4197 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4199 if ((i
.types
[op
].bitfield
.imm32
)
4200 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4203 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4204 ^ ((offsetT
) 1 << 31))
4205 - ((offsetT
) 1 << 31));
4208 = operand_type_or (i
.types
[op
],
4209 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4211 /* We must avoid matching of Imm32 templates when 64bit
4212 only immediate is available. */
4213 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4214 i
.types
[op
].bitfield
.imm32
= 0;
4221 /* Symbols and expressions. */
4223 /* Convert symbolic operand to proper sizes for matching, but don't
4224 prevent matching a set of insns that only supports sizes other
4225 than those matching the insn suffix. */
4227 i386_operand_type mask
, allowed
;
4228 const insn_template
*t
;
4230 operand_type_set (&mask
, 0);
4231 operand_type_set (&allowed
, 0);
4233 for (t
= current_templates
->start
;
4234 t
< current_templates
->end
;
4236 allowed
= operand_type_or (allowed
,
4237 t
->operand_types
[op
]);
4238 switch (guess_suffix
)
4240 case QWORD_MNEM_SUFFIX
:
4241 mask
.bitfield
.imm64
= 1;
4242 mask
.bitfield
.imm32s
= 1;
4244 case LONG_MNEM_SUFFIX
:
4245 mask
.bitfield
.imm32
= 1;
4247 case WORD_MNEM_SUFFIX
:
4248 mask
.bitfield
.imm16
= 1;
4250 case BYTE_MNEM_SUFFIX
:
4251 mask
.bitfield
.imm8
= 1;
4256 allowed
= operand_type_and (mask
, allowed
);
4257 if (!operand_type_all_zero (&allowed
))
4258 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4265 /* Try to use the smallest displacement type too. */
4267 optimize_disp (void)
4271 for (op
= i
.operands
; --op
>= 0;)
4272 if (operand_type_check (i
.types
[op
], disp
))
4274 if (i
.op
[op
].disps
->X_op
== O_constant
)
4276 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4278 if (i
.types
[op
].bitfield
.disp16
4279 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4281 /* If this operand is at most 16 bits, convert
4282 to a signed 16 bit number and don't use 64bit
4284 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4285 i
.types
[op
].bitfield
.disp64
= 0;
4287 if (i
.types
[op
].bitfield
.disp32
4288 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4290 /* If this operand is at most 32 bits, convert
4291 to a signed 32 bit number and don't use 64bit
4293 op_disp
&= (((offsetT
) 2 << 31) - 1);
4294 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4295 i
.types
[op
].bitfield
.disp64
= 0;
4297 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4299 i
.types
[op
].bitfield
.disp8
= 0;
4300 i
.types
[op
].bitfield
.disp16
= 0;
4301 i
.types
[op
].bitfield
.disp32
= 0;
4302 i
.types
[op
].bitfield
.disp32s
= 0;
4303 i
.types
[op
].bitfield
.disp64
= 0;
4307 else if (flag_code
== CODE_64BIT
)
4309 if (fits_in_signed_long (op_disp
))
4311 i
.types
[op
].bitfield
.disp64
= 0;
4312 i
.types
[op
].bitfield
.disp32s
= 1;
4314 if (i
.prefix
[ADDR_PREFIX
]
4315 && fits_in_unsigned_long (op_disp
))
4316 i
.types
[op
].bitfield
.disp32
= 1;
4318 if ((i
.types
[op
].bitfield
.disp32
4319 || i
.types
[op
].bitfield
.disp32s
4320 || i
.types
[op
].bitfield
.disp16
)
4321 && fits_in_signed_byte (op_disp
))
4322 i
.types
[op
].bitfield
.disp8
= 1;
4324 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4325 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4327 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4328 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4329 i
.types
[op
].bitfield
.disp8
= 0;
4330 i
.types
[op
].bitfield
.disp16
= 0;
4331 i
.types
[op
].bitfield
.disp32
= 0;
4332 i
.types
[op
].bitfield
.disp32s
= 0;
4333 i
.types
[op
].bitfield
.disp64
= 0;
4336 /* We only support 64bit displacement on constants. */
4337 i
.types
[op
].bitfield
.disp64
= 0;
4341 /* Check if operands are valid for the instruction. */
4344 check_VecOperands (const insn_template
*t
)
4348 /* Without VSIB byte, we can't have a vector register for index. */
4349 if (!t
->opcode_modifier
.vecsib
4351 && (i
.index_reg
->reg_type
.bitfield
.regxmm
4352 || i
.index_reg
->reg_type
.bitfield
.regymm
4353 || i
.index_reg
->reg_type
.bitfield
.regzmm
))
4355 i
.error
= unsupported_vector_index_register
;
4359 /* Check if default mask is allowed. */
4360 if (t
->opcode_modifier
.nodefmask
4361 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4363 i
.error
= no_default_mask
;
4367 /* For VSIB byte, we need a vector register for index, and all vector
4368 registers must be distinct. */
4369 if (t
->opcode_modifier
.vecsib
)
4372 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4373 && i
.index_reg
->reg_type
.bitfield
.regxmm
)
4374 || (t
->opcode_modifier
.vecsib
== VecSIB256
4375 && i
.index_reg
->reg_type
.bitfield
.regymm
)
4376 || (t
->opcode_modifier
.vecsib
== VecSIB512
4377 && i
.index_reg
->reg_type
.bitfield
.regzmm
)))
4379 i
.error
= invalid_vsib_address
;
4383 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4384 if (i
.reg_operands
== 2 && !i
.mask
)
4386 gas_assert (i
.types
[0].bitfield
.regxmm
4387 || i
.types
[0].bitfield
.regymm
);
4388 gas_assert (i
.types
[2].bitfield
.regxmm
4389 || i
.types
[2].bitfield
.regymm
);
4390 if (operand_check
== check_none
)
4392 if (register_number (i
.op
[0].regs
)
4393 != register_number (i
.index_reg
)
4394 && register_number (i
.op
[2].regs
)
4395 != register_number (i
.index_reg
)
4396 && register_number (i
.op
[0].regs
)
4397 != register_number (i
.op
[2].regs
))
4399 if (operand_check
== check_error
)
4401 i
.error
= invalid_vector_register_set
;
4404 as_warn (_("mask, index, and destination registers should be distinct"));
4406 else if (i
.reg_operands
== 1 && i
.mask
)
4408 if ((i
.types
[1].bitfield
.regymm
4409 || i
.types
[1].bitfield
.regzmm
)
4410 && (register_number (i
.op
[1].regs
)
4411 == register_number (i
.index_reg
)))
4413 if (operand_check
== check_error
)
4415 i
.error
= invalid_vector_register_set
;
4418 if (operand_check
!= check_none
)
4419 as_warn (_("index and destination registers should be distinct"));
4424 /* Check if broadcast is supported by the instruction and is applied
4425 to the memory operand. */
4428 int broadcasted_opnd_size
;
4430 /* Check if specified broadcast is supported in this instruction,
4431 and it's applied to memory operand of DWORD or QWORD type,
4432 depending on VecESize. */
4433 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
4434 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
4435 || (t
->opcode_modifier
.vecesize
== 0
4436 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
4437 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
4438 || (t
->opcode_modifier
.vecesize
== 1
4439 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
4440 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
4443 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
4444 if (i
.broadcast
->type
== BROADCAST_1TO16
)
4445 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
4446 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
4447 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
4451 if ((broadcasted_opnd_size
== 256
4452 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
4453 || (broadcasted_opnd_size
== 512
4454 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
4457 i
.error
= unsupported_broadcast
;
4461 /* If broadcast is supported in this instruction, we need to check if
4462 operand of one-element size isn't specified without broadcast. */
4463 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
4465 /* Find memory operand. */
4466 for (op
= 0; op
< i
.operands
; op
++)
4467 if (operand_type_check (i
.types
[op
], anymem
))
4469 gas_assert (op
< i
.operands
);
4470 /* Check size of the memory operand. */
4471 if ((t
->opcode_modifier
.vecesize
== 0
4472 && i
.types
[op
].bitfield
.dword
)
4473 || (t
->opcode_modifier
.vecesize
== 1
4474 && i
.types
[op
].bitfield
.qword
))
4476 i
.error
= broadcast_needed
;
4481 /* Check if requested masking is supported. */
4483 && (!t
->opcode_modifier
.masking
4485 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
4487 i
.error
= unsupported_masking
;
4491 /* Check if masking is applied to dest operand. */
4492 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
4494 i
.error
= mask_not_on_destination
;
4501 if ((i
.rounding
->type
!= saeonly
4502 && !t
->opcode_modifier
.staticrounding
)
4503 || (i
.rounding
->type
== saeonly
4504 && (t
->opcode_modifier
.staticrounding
4505 || !t
->opcode_modifier
.sae
)))
4507 i
.error
= unsupported_rc_sae
;
4510 /* If the instruction has several immediate operands and one of
4511 them is rounding, the rounding operand should be the last
4512 immediate operand. */
4513 if (i
.imm_operands
> 1
4514 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
4516 i
.error
= rc_sae_operand_not_last_imm
;
4521 /* Check vector Disp8 operand. */
4522 if (t
->opcode_modifier
.disp8memshift
)
4525 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
4527 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
4529 for (op
= 0; op
< i
.operands
; op
++)
4530 if (operand_type_check (i
.types
[op
], disp
)
4531 && i
.op
[op
].disps
->X_op
== O_constant
)
4533 offsetT value
= i
.op
[op
].disps
->X_add_number
;
4534 int vec_disp8_ok
= fits_in_vec_disp8 (value
);
4535 if (t
->operand_types
[op
].bitfield
.vec_disp8
)
4538 i
.types
[op
].bitfield
.vec_disp8
= 1;
4541 /* Vector insn can only have Vec_Disp8/Disp32 in
4542 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4544 i
.types
[op
].bitfield
.disp8
= 0;
4545 if (flag_code
!= CODE_16BIT
)
4546 i
.types
[op
].bitfield
.disp16
= 0;
4549 else if (flag_code
!= CODE_16BIT
)
4551 /* One form of this instruction supports vector Disp8.
4552 Try vector Disp8 if we need to use Disp32. */
4553 if (vec_disp8_ok
&& !fits_in_signed_byte (value
))
4555 i
.error
= try_vector_disp8
;
4567 /* Check if operands are valid for the instruction. Update VEX
4571 VEX_check_operands (const insn_template
*t
)
4573 /* VREX is only valid with EVEX prefix. */
4574 if (i
.need_vrex
&& !t
->opcode_modifier
.evex
)
4576 i
.error
= invalid_register_operand
;
4580 if (!t
->opcode_modifier
.vex
)
4583 /* Only check VEX_Imm4, which must be the first operand. */
4584 if (t
->operand_types
[0].bitfield
.vec_imm4
)
4586 if (i
.op
[0].imms
->X_op
!= O_constant
4587 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
4593 /* Turn off Imm8 so that update_imm won't complain. */
4594 i
.types
[0] = vec_imm4
;
4600 static const insn_template
*
4601 match_template (void)
4603 /* Points to template once we've found it. */
4604 const insn_template
*t
;
4605 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
4606 i386_operand_type overlap4
;
4607 unsigned int found_reverse_match
;
4608 i386_opcode_modifier suffix_check
;
4609 i386_operand_type operand_types
[MAX_OPERANDS
];
4610 int addr_prefix_disp
;
4612 unsigned int found_cpu_match
;
4613 unsigned int check_register
;
4614 enum i386_error specific_error
= 0;
4616 #if MAX_OPERANDS != 5
4617 # error "MAX_OPERANDS must be 5."
4620 found_reverse_match
= 0;
4621 addr_prefix_disp
= -1;
4623 memset (&suffix_check
, 0, sizeof (suffix_check
));
4624 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
4625 suffix_check
.no_bsuf
= 1;
4626 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
4627 suffix_check
.no_wsuf
= 1;
4628 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
4629 suffix_check
.no_ssuf
= 1;
4630 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
4631 suffix_check
.no_lsuf
= 1;
4632 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
4633 suffix_check
.no_qsuf
= 1;
4634 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
4635 suffix_check
.no_ldsuf
= 1;
4637 /* Must have right number of operands. */
4638 i
.error
= number_of_operands_mismatch
;
4640 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
4642 addr_prefix_disp
= -1;
4644 if (i
.operands
!= t
->operands
)
4647 /* Check processor support. */
4648 i
.error
= unsupported
;
4649 found_cpu_match
= (cpu_flags_match (t
)
4650 == CPU_FLAGS_PERFECT_MATCH
);
4651 if (!found_cpu_match
)
4654 /* Check old gcc support. */
4655 i
.error
= old_gcc_only
;
4656 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
4659 /* Check AT&T mnemonic. */
4660 i
.error
= unsupported_with_intel_mnemonic
;
4661 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
4664 /* Check AT&T/Intel syntax. */
4665 i
.error
= unsupported_syntax
;
4666 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
4667 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
4670 /* Check the suffix, except for some instructions in intel mode. */
4671 i
.error
= invalid_instruction_suffix
;
4672 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
4673 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
4674 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
4675 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
4676 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
4677 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
4678 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
4681 if (!operand_size_match (t
))
4684 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4685 operand_types
[j
] = t
->operand_types
[j
];
4687 /* In general, don't allow 64-bit operands in 32-bit mode. */
4688 if (i
.suffix
== QWORD_MNEM_SUFFIX
4689 && flag_code
!= CODE_64BIT
4691 ? (!t
->opcode_modifier
.ignoresize
4692 && !intel_float_operand (t
->name
))
4693 : intel_float_operand (t
->name
) != 2)
4694 && ((!operand_types
[0].bitfield
.regmmx
4695 && !operand_types
[0].bitfield
.regxmm
4696 && !operand_types
[0].bitfield
.regymm
4697 && !operand_types
[0].bitfield
.regzmm
)
4698 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4699 && !!operand_types
[t
->operands
> 1].bitfield
.regxmm
4700 && !!operand_types
[t
->operands
> 1].bitfield
.regymm
4701 && !!operand_types
[t
->operands
> 1].bitfield
.regzmm
))
4702 && (t
->base_opcode
!= 0x0fc7
4703 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
4706 /* In general, don't allow 32-bit operands on pre-386. */
4707 else if (i
.suffix
== LONG_MNEM_SUFFIX
4708 && !cpu_arch_flags
.bitfield
.cpui386
4710 ? (!t
->opcode_modifier
.ignoresize
4711 && !intel_float_operand (t
->name
))
4712 : intel_float_operand (t
->name
) != 2)
4713 && ((!operand_types
[0].bitfield
.regmmx
4714 && !operand_types
[0].bitfield
.regxmm
)
4715 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
4716 && !!operand_types
[t
->operands
> 1].bitfield
.regxmm
)))
4719 /* Do not verify operands when there are none. */
4723 /* We've found a match; break out of loop. */
4727 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4728 into Disp32/Disp16/Disp32 operand. */
4729 if (i
.prefix
[ADDR_PREFIX
] != 0)
4731 /* There should be only one Disp operand. */
4735 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4737 if (operand_types
[j
].bitfield
.disp16
)
4739 addr_prefix_disp
= j
;
4740 operand_types
[j
].bitfield
.disp32
= 1;
4741 operand_types
[j
].bitfield
.disp16
= 0;
4747 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4749 if (operand_types
[j
].bitfield
.disp32
)
4751 addr_prefix_disp
= j
;
4752 operand_types
[j
].bitfield
.disp32
= 0;
4753 operand_types
[j
].bitfield
.disp16
= 1;
4759 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4761 if (operand_types
[j
].bitfield
.disp64
)
4763 addr_prefix_disp
= j
;
4764 operand_types
[j
].bitfield
.disp64
= 0;
4765 operand_types
[j
].bitfield
.disp32
= 1;
4773 /* We check register size if needed. */
4774 check_register
= t
->opcode_modifier
.checkregsize
;
4775 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
4776 switch (t
->operands
)
4779 if (!operand_type_match (overlap0
, i
.types
[0]))
4783 /* xchg %eax, %eax is a special case. It is an aliase for nop
4784 only in 32bit mode and we can use opcode 0x90. In 64bit
4785 mode, we can't use 0x90 for xchg %eax, %eax since it should
4786 zero-extend %eax to %rax. */
4787 if (flag_code
== CODE_64BIT
4788 && t
->base_opcode
== 0x90
4789 && operand_type_equal (&i
.types
[0], &acc32
)
4790 && operand_type_equal (&i
.types
[1], &acc32
))
4794 /* If we swap operand in encoding, we either match
4795 the next one or reverse direction of operands. */
4796 if (t
->opcode_modifier
.s
)
4798 else if (t
->opcode_modifier
.d
)
4803 /* If we swap operand in encoding, we match the next one. */
4804 if (i
.swap_operand
&& t
->opcode_modifier
.s
)
4808 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
4809 if (!operand_type_match (overlap0
, i
.types
[0])
4810 || !operand_type_match (overlap1
, i
.types
[1])
4812 && !operand_type_register_match (overlap0
, i
.types
[0],
4814 overlap1
, i
.types
[1],
4817 /* Check if other direction is valid ... */
4818 if (!t
->opcode_modifier
.d
&& !t
->opcode_modifier
.floatd
)
4822 /* Try reversing direction of operands. */
4823 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
4824 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
4825 if (!operand_type_match (overlap0
, i
.types
[0])
4826 || !operand_type_match (overlap1
, i
.types
[1])
4828 && !operand_type_register_match (overlap0
,
4835 /* Does not match either direction. */
4838 /* found_reverse_match holds which of D or FloatDR
4840 if (t
->opcode_modifier
.d
)
4841 found_reverse_match
= Opcode_D
;
4842 else if (t
->opcode_modifier
.floatd
)
4843 found_reverse_match
= Opcode_FloatD
;
4845 found_reverse_match
= 0;
4846 if (t
->opcode_modifier
.floatr
)
4847 found_reverse_match
|= Opcode_FloatR
;
4851 /* Found a forward 2 operand match here. */
4852 switch (t
->operands
)
4855 overlap4
= operand_type_and (i
.types
[4],
4858 overlap3
= operand_type_and (i
.types
[3],
4861 overlap2
= operand_type_and (i
.types
[2],
4866 switch (t
->operands
)
4869 if (!operand_type_match (overlap4
, i
.types
[4])
4870 || !operand_type_register_match (overlap3
,
4878 if (!operand_type_match (overlap3
, i
.types
[3])
4880 && !operand_type_register_match (overlap2
,
4888 /* Here we make use of the fact that there are no
4889 reverse match 3 operand instructions, and all 3
4890 operand instructions only need to be checked for
4891 register consistency between operands 2 and 3. */
4892 if (!operand_type_match (overlap2
, i
.types
[2])
4894 && !operand_type_register_match (overlap1
,
4904 /* Found either forward/reverse 2, 3 or 4 operand match here:
4905 slip through to break. */
4907 if (!found_cpu_match
)
4909 found_reverse_match
= 0;
4913 /* Check if vector and VEX operands are valid. */
4914 if (check_VecOperands (t
) || VEX_check_operands (t
))
4916 specific_error
= i
.error
;
4920 /* We've found a match; break out of loop. */
4924 if (t
== current_templates
->end
)
4926 /* We found no match. */
4927 const char *err_msg
;
4928 switch (specific_error
? specific_error
: i
.error
)
4932 case operand_size_mismatch
:
4933 err_msg
= _("operand size mismatch");
4935 case operand_type_mismatch
:
4936 err_msg
= _("operand type mismatch");
4938 case register_type_mismatch
:
4939 err_msg
= _("register type mismatch");
4941 case number_of_operands_mismatch
:
4942 err_msg
= _("number of operands mismatch");
4944 case invalid_instruction_suffix
:
4945 err_msg
= _("invalid instruction suffix");
4948 err_msg
= _("constant doesn't fit in 4 bits");
4951 err_msg
= _("only supported with old gcc");
4953 case unsupported_with_intel_mnemonic
:
4954 err_msg
= _("unsupported with Intel mnemonic");
4956 case unsupported_syntax
:
4957 err_msg
= _("unsupported syntax");
4960 as_bad (_("unsupported instruction `%s'"),
4961 current_templates
->start
->name
);
4963 case invalid_vsib_address
:
4964 err_msg
= _("invalid VSIB address");
4966 case invalid_vector_register_set
:
4967 err_msg
= _("mask, index, and destination registers must be distinct");
4969 case unsupported_vector_index_register
:
4970 err_msg
= _("unsupported vector index register");
4972 case unsupported_broadcast
:
4973 err_msg
= _("unsupported broadcast");
4975 case broadcast_not_on_src_operand
:
4976 err_msg
= _("broadcast not on source memory operand");
4978 case broadcast_needed
:
4979 err_msg
= _("broadcast is needed for operand of such type");
4981 case unsupported_masking
:
4982 err_msg
= _("unsupported masking");
4984 case mask_not_on_destination
:
4985 err_msg
= _("mask not on destination operand");
4987 case no_default_mask
:
4988 err_msg
= _("default mask isn't allowed");
4990 case unsupported_rc_sae
:
4991 err_msg
= _("unsupported static rounding/sae");
4993 case rc_sae_operand_not_last_imm
:
4995 err_msg
= _("RC/SAE operand must precede immediate operands");
4997 err_msg
= _("RC/SAE operand must follow immediate operands");
4999 case invalid_register_operand
:
5000 err_msg
= _("invalid register operand");
5003 as_bad (_("%s for `%s'"), err_msg
,
5004 current_templates
->start
->name
);
5008 if (!quiet_warnings
)
5011 && (i
.types
[0].bitfield
.jumpabsolute
5012 != operand_types
[0].bitfield
.jumpabsolute
))
5014 as_warn (_("indirect %s without `*'"), t
->name
);
5017 if (t
->opcode_modifier
.isprefix
5018 && t
->opcode_modifier
.ignoresize
)
5020 /* Warn them that a data or address size prefix doesn't
5021 affect assembly of the next line of code. */
5022 as_warn (_("stand-alone `%s' prefix"), t
->name
);
5026 /* Copy the template we found. */
5029 if (addr_prefix_disp
!= -1)
5030 i
.tm
.operand_types
[addr_prefix_disp
]
5031 = operand_types
[addr_prefix_disp
];
5033 if (found_reverse_match
)
5035 /* If we found a reverse match we must alter the opcode
5036 direction bit. found_reverse_match holds bits to change
5037 (different for int & float insns). */
5039 i
.tm
.base_opcode
^= found_reverse_match
;
5041 i
.tm
.operand_types
[0] = operand_types
[1];
5042 i
.tm
.operand_types
[1] = operand_types
[0];
5051 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
5052 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
5054 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
5056 as_bad (_("`%s' operand %d must use `%ses' segment"),
5062 /* There's only ever one segment override allowed per instruction.
5063 This instruction possibly has a legal segment override on the
5064 second operand, so copy the segment to where non-string
5065 instructions store it, allowing common code. */
5066 i
.seg
[0] = i
.seg
[1];
5068 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5070 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5072 as_bad (_("`%s' operand %d must use `%ses' segment"),
5083 process_suffix (void)
5085 /* If matched instruction specifies an explicit instruction mnemonic
5087 if (i
.tm
.opcode_modifier
.size16
)
5088 i
.suffix
= WORD_MNEM_SUFFIX
;
5089 else if (i
.tm
.opcode_modifier
.size32
)
5090 i
.suffix
= LONG_MNEM_SUFFIX
;
5091 else if (i
.tm
.opcode_modifier
.size64
)
5092 i
.suffix
= QWORD_MNEM_SUFFIX
;
5093 else if (i
.reg_operands
)
5095 /* If there's no instruction mnemonic suffix we try to invent one
5096 based on register operands. */
5099 /* We take i.suffix from the last register operand specified,
5100 Destination register type is more significant than source
5101 register type. crc32 in SSE4.2 prefers source register
5103 if (i
.tm
.base_opcode
== 0xf20f38f1)
5105 if (i
.types
[0].bitfield
.reg16
)
5106 i
.suffix
= WORD_MNEM_SUFFIX
;
5107 else if (i
.types
[0].bitfield
.reg32
)
5108 i
.suffix
= LONG_MNEM_SUFFIX
;
5109 else if (i
.types
[0].bitfield
.reg64
)
5110 i
.suffix
= QWORD_MNEM_SUFFIX
;
5112 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5114 if (i
.types
[0].bitfield
.reg8
)
5115 i
.suffix
= BYTE_MNEM_SUFFIX
;
5122 if (i
.tm
.base_opcode
== 0xf20f38f1
5123 || i
.tm
.base_opcode
== 0xf20f38f0)
5125 /* We have to know the operand size for crc32. */
5126 as_bad (_("ambiguous memory operand size for `%s`"),
5131 for (op
= i
.operands
; --op
>= 0;)
5132 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5134 if (i
.types
[op
].bitfield
.reg8
)
5136 i
.suffix
= BYTE_MNEM_SUFFIX
;
5139 else if (i
.types
[op
].bitfield
.reg16
)
5141 i
.suffix
= WORD_MNEM_SUFFIX
;
5144 else if (i
.types
[op
].bitfield
.reg32
)
5146 i
.suffix
= LONG_MNEM_SUFFIX
;
5149 else if (i
.types
[op
].bitfield
.reg64
)
5151 i
.suffix
= QWORD_MNEM_SUFFIX
;
5157 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5160 && i
.tm
.opcode_modifier
.ignoresize
5161 && i
.tm
.opcode_modifier
.no_bsuf
)
5163 else if (!check_byte_reg ())
5166 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5169 && i
.tm
.opcode_modifier
.ignoresize
5170 && i
.tm
.opcode_modifier
.no_lsuf
)
5172 else if (!check_long_reg ())
5175 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5178 && i
.tm
.opcode_modifier
.ignoresize
5179 && i
.tm
.opcode_modifier
.no_qsuf
)
5181 else if (!check_qword_reg ())
5184 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5187 && i
.tm
.opcode_modifier
.ignoresize
5188 && i
.tm
.opcode_modifier
.no_wsuf
)
5190 else if (!check_word_reg ())
5193 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5194 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5195 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5197 /* Skip if the instruction has x/y/z suffix. match_template
5198 should check if it is a valid suffix. */
5200 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5201 /* Do nothing if the instruction is going to ignore the prefix. */
5206 else if (i
.tm
.opcode_modifier
.defaultsize
5208 /* exclude fldenv/frstor/fsave/fstenv */
5209 && i
.tm
.opcode_modifier
.no_ssuf
)
5211 i
.suffix
= stackop_size
;
5213 else if (intel_syntax
5215 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5216 || i
.tm
.opcode_modifier
.jumpbyte
5217 || i
.tm
.opcode_modifier
.jumpintersegment
5218 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5219 && i
.tm
.extension_opcode
<= 3)))
5224 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5226 i
.suffix
= QWORD_MNEM_SUFFIX
;
5230 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5231 i
.suffix
= LONG_MNEM_SUFFIX
;
5234 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5235 i
.suffix
= WORD_MNEM_SUFFIX
;
5244 if (i
.tm
.opcode_modifier
.w
)
5246 as_bad (_("no instruction mnemonic suffix given and "
5247 "no register operands; can't size instruction"));
5253 unsigned int suffixes
;
5255 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5256 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5258 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5260 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5262 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5264 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5267 /* There are more than suffix matches. */
5268 if (i
.tm
.opcode_modifier
.w
5269 || ((suffixes
& (suffixes
- 1))
5270 && !i
.tm
.opcode_modifier
.defaultsize
5271 && !i
.tm
.opcode_modifier
.ignoresize
))
5273 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5279 /* Change the opcode based on the operand size given by i.suffix;
5280 We don't need to change things for byte insns. */
5283 && i
.suffix
!= BYTE_MNEM_SUFFIX
5284 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5285 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5286 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5288 /* It's not a byte, select word/dword operation. */
5289 if (i
.tm
.opcode_modifier
.w
)
5291 if (i
.tm
.opcode_modifier
.shortform
)
5292 i
.tm
.base_opcode
|= 8;
5294 i
.tm
.base_opcode
|= 1;
5297 /* Now select between word & dword operations via the operand
5298 size prefix, except for instructions that will ignore this
5300 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5302 /* The address size override prefix changes the size of the
5304 if ((flag_code
== CODE_32BIT
5305 && i
.op
->regs
[0].reg_type
.bitfield
.reg16
)
5306 || (flag_code
!= CODE_32BIT
5307 && i
.op
->regs
[0].reg_type
.bitfield
.reg32
))
5308 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5311 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5312 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5313 && !i
.tm
.opcode_modifier
.ignoresize
5314 && !i
.tm
.opcode_modifier
.floatmf
5315 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5316 || (flag_code
== CODE_64BIT
5317 && i
.tm
.opcode_modifier
.jumpbyte
)))
5319 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5321 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5322 prefix
= ADDR_PREFIX_OPCODE
;
5324 if (!add_prefix (prefix
))
5328 /* Set mode64 for an operand. */
5329 if (i
.suffix
== QWORD_MNEM_SUFFIX
5330 && flag_code
== CODE_64BIT
5331 && !i
.tm
.opcode_modifier
.norex64
)
5333 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5334 need rex64. cmpxchg8b is also a special case. */
5335 if (! (i
.operands
== 2
5336 && i
.tm
.base_opcode
== 0x90
5337 && i
.tm
.extension_opcode
== None
5338 && operand_type_equal (&i
.types
[0], &acc64
)
5339 && operand_type_equal (&i
.types
[1], &acc64
))
5340 && ! (i
.operands
== 1
5341 && i
.tm
.base_opcode
== 0xfc7
5342 && i
.tm
.extension_opcode
== 1
5343 && !operand_type_check (i
.types
[0], reg
)
5344 && operand_type_check (i
.types
[0], anymem
)))
5348 /* Size floating point instruction. */
5349 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5350 if (i
.tm
.opcode_modifier
.floatmf
)
5351 i
.tm
.base_opcode
^= 4;
5358 check_byte_reg (void)
5362 for (op
= i
.operands
; --op
>= 0;)
5364 /* If this is an eight bit register, it's OK. If it's the 16 or
5365 32 bit version of an eight bit register, we will just use the
5366 low portion, and that's OK too. */
5367 if (i
.types
[op
].bitfield
.reg8
)
5370 /* I/O port address operands are OK too. */
5371 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
5374 /* crc32 doesn't generate this warning. */
5375 if (i
.tm
.base_opcode
== 0xf20f38f0)
5378 if ((i
.types
[op
].bitfield
.reg16
5379 || i
.types
[op
].bitfield
.reg32
5380 || i
.types
[op
].bitfield
.reg64
)
5381 && i
.op
[op
].regs
->reg_num
< 4
5382 /* Prohibit these changes in 64bit mode, since the lowering
5383 would be more complicated. */
5384 && flag_code
!= CODE_64BIT
)
5386 #if REGISTER_WARNINGS
5387 if (!quiet_warnings
)
5388 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5390 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.reg16
5391 ? REGNAM_AL
- REGNAM_AX
5392 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
5394 i
.op
[op
].regs
->reg_name
,
5399 /* Any other register is bad. */
5400 if (i
.types
[op
].bitfield
.reg16
5401 || i
.types
[op
].bitfield
.reg32
5402 || i
.types
[op
].bitfield
.reg64
5403 || i
.types
[op
].bitfield
.regmmx
5404 || i
.types
[op
].bitfield
.regxmm
5405 || i
.types
[op
].bitfield
.regymm
5406 || i
.types
[op
].bitfield
.regzmm
5407 || i
.types
[op
].bitfield
.sreg2
5408 || i
.types
[op
].bitfield
.sreg3
5409 || i
.types
[op
].bitfield
.control
5410 || i
.types
[op
].bitfield
.debug
5411 || i
.types
[op
].bitfield
.test
5412 || i
.types
[op
].bitfield
.floatreg
5413 || i
.types
[op
].bitfield
.floatacc
)
5415 as_bad (_("`%s%s' not allowed with `%s%c'"),
5417 i
.op
[op
].regs
->reg_name
,
5427 check_long_reg (void)
5431 for (op
= i
.operands
; --op
>= 0;)
5432 /* Reject eight bit registers, except where the template requires
5433 them. (eg. movzb) */
5434 if (i
.types
[op
].bitfield
.reg8
5435 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5436 || i
.tm
.operand_types
[op
].bitfield
.reg32
5437 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5439 as_bad (_("`%s%s' not allowed with `%s%c'"),
5441 i
.op
[op
].regs
->reg_name
,
5446 /* Warn if the e prefix on a general reg is missing. */
5447 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5448 && i
.types
[op
].bitfield
.reg16
5449 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5450 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5452 /* Prohibit these changes in the 64bit mode, since the
5453 lowering is more complicated. */
5454 if (flag_code
== CODE_64BIT
)
5456 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5457 register_prefix
, i
.op
[op
].regs
->reg_name
,
5461 #if REGISTER_WARNINGS
5462 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5464 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
5465 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5468 /* Warn if the r prefix on a general reg is present. */
5469 else if (i
.types
[op
].bitfield
.reg64
5470 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5471 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5474 && i
.tm
.opcode_modifier
.toqword
5475 && !i
.types
[0].bitfield
.regxmm
)
5477 /* Convert to QWORD. We want REX byte. */
5478 i
.suffix
= QWORD_MNEM_SUFFIX
;
5482 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5483 register_prefix
, i
.op
[op
].regs
->reg_name
,
5492 check_qword_reg (void)
5496 for (op
= i
.operands
; --op
>= 0; )
5497 /* Reject eight bit registers, except where the template requires
5498 them. (eg. movzb) */
5499 if (i
.types
[op
].bitfield
.reg8
5500 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5501 || i
.tm
.operand_types
[op
].bitfield
.reg32
5502 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5504 as_bad (_("`%s%s' not allowed with `%s%c'"),
5506 i
.op
[op
].regs
->reg_name
,
5511 /* Warn if the r prefix on a general reg is missing. */
5512 else if ((i
.types
[op
].bitfield
.reg16
5513 || i
.types
[op
].bitfield
.reg32
)
5514 && (i
.tm
.operand_types
[op
].bitfield
.reg32
5515 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5517 /* Prohibit these changes in the 64bit mode, since the
5518 lowering is more complicated. */
5520 && i
.tm
.opcode_modifier
.todword
5521 && !i
.types
[0].bitfield
.regxmm
)
5523 /* Convert to DWORD. We don't want REX byte. */
5524 i
.suffix
= LONG_MNEM_SUFFIX
;
5528 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5529 register_prefix
, i
.op
[op
].regs
->reg_name
,
5538 check_word_reg (void)
5541 for (op
= i
.operands
; --op
>= 0;)
5542 /* Reject eight bit registers, except where the template requires
5543 them. (eg. movzb) */
5544 if (i
.types
[op
].bitfield
.reg8
5545 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5546 || i
.tm
.operand_types
[op
].bitfield
.reg32
5547 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5549 as_bad (_("`%s%s' not allowed with `%s%c'"),
5551 i
.op
[op
].regs
->reg_name
,
5556 /* Warn if the e or r prefix on a general reg is present. */
5557 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
5558 && (i
.types
[op
].bitfield
.reg32
5559 || i
.types
[op
].bitfield
.reg64
)
5560 && (i
.tm
.operand_types
[op
].bitfield
.reg16
5561 || i
.tm
.operand_types
[op
].bitfield
.acc
))
5563 /* Prohibit these changes in the 64bit mode, since the
5564 lowering is more complicated. */
5565 if (flag_code
== CODE_64BIT
)
5567 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5568 register_prefix
, i
.op
[op
].regs
->reg_name
,
5572 #if REGISTER_WARNINGS
5573 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5575 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
5576 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
5583 update_imm (unsigned int j
)
5585 i386_operand_type overlap
= i
.types
[j
];
5586 if ((overlap
.bitfield
.imm8
5587 || overlap
.bitfield
.imm8s
5588 || overlap
.bitfield
.imm16
5589 || overlap
.bitfield
.imm32
5590 || overlap
.bitfield
.imm32s
5591 || overlap
.bitfield
.imm64
)
5592 && !operand_type_equal (&overlap
, &imm8
)
5593 && !operand_type_equal (&overlap
, &imm8s
)
5594 && !operand_type_equal (&overlap
, &imm16
)
5595 && !operand_type_equal (&overlap
, &imm32
)
5596 && !operand_type_equal (&overlap
, &imm32s
)
5597 && !operand_type_equal (&overlap
, &imm64
))
5601 i386_operand_type temp
;
5603 operand_type_set (&temp
, 0);
5604 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5606 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
5607 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
5609 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5610 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
5611 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5613 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
5614 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
5617 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
5620 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
5621 || operand_type_equal (&overlap
, &imm16_32
)
5622 || operand_type_equal (&overlap
, &imm16_32s
))
5624 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5629 if (!operand_type_equal (&overlap
, &imm8
)
5630 && !operand_type_equal (&overlap
, &imm8s
)
5631 && !operand_type_equal (&overlap
, &imm16
)
5632 && !operand_type_equal (&overlap
, &imm32
)
5633 && !operand_type_equal (&overlap
, &imm32s
)
5634 && !operand_type_equal (&overlap
, &imm64
))
5636 as_bad (_("no instruction mnemonic suffix given; "
5637 "can't determine immediate size"));
5641 i
.types
[j
] = overlap
;
5651 /* Update the first 2 immediate operands. */
5652 n
= i
.operands
> 2 ? 2 : i
.operands
;
5655 for (j
= 0; j
< n
; j
++)
5656 if (update_imm (j
) == 0)
5659 /* The 3rd operand can't be immediate operand. */
5660 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
5667 bad_implicit_operand (int xmm
)
5669 const char *ireg
= xmm
? "xmm0" : "ymm0";
5672 as_bad (_("the last operand of `%s' must be `%s%s'"),
5673 i
.tm
.name
, register_prefix
, ireg
);
5675 as_bad (_("the first operand of `%s' must be `%s%s'"),
5676 i
.tm
.name
, register_prefix
, ireg
);
5681 process_operands (void)
5683 /* Default segment register this instruction will use for memory
5684 accesses. 0 means unknown. This is only for optimizing out
5685 unnecessary segment overrides. */
5686 const seg_entry
*default_seg
= 0;
5688 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
5690 unsigned int dupl
= i
.operands
;
5691 unsigned int dest
= dupl
- 1;
5694 /* The destination must be an xmm register. */
5695 gas_assert (i
.reg_operands
5696 && MAX_OPERANDS
> dupl
5697 && operand_type_equal (&i
.types
[dest
], ®xmm
));
5699 if (i
.tm
.opcode_modifier
.firstxmm0
)
5701 /* The first operand is implicit and must be xmm0. */
5702 gas_assert (operand_type_equal (&i
.types
[0], ®xmm
));
5703 if (register_number (i
.op
[0].regs
) != 0)
5704 return bad_implicit_operand (1);
5706 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
5708 /* Keep xmm0 for instructions with VEX prefix and 3
5714 /* We remove the first xmm0 and keep the number of
5715 operands unchanged, which in fact duplicates the
5717 for (j
= 1; j
< i
.operands
; j
++)
5719 i
.op
[j
- 1] = i
.op
[j
];
5720 i
.types
[j
- 1] = i
.types
[j
];
5721 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5725 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
5727 gas_assert ((MAX_OPERANDS
- 1) > dupl
5728 && (i
.tm
.opcode_modifier
.vexsources
5731 /* Add the implicit xmm0 for instructions with VEX prefix
5733 for (j
= i
.operands
; j
> 0; j
--)
5735 i
.op
[j
] = i
.op
[j
- 1];
5736 i
.types
[j
] = i
.types
[j
- 1];
5737 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
5740 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
5741 i
.types
[0] = regxmm
;
5742 i
.tm
.operand_types
[0] = regxmm
;
5745 i
.reg_operands
+= 2;
5750 i
.op
[dupl
] = i
.op
[dest
];
5751 i
.types
[dupl
] = i
.types
[dest
];
5752 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5761 i
.op
[dupl
] = i
.op
[dest
];
5762 i
.types
[dupl
] = i
.types
[dest
];
5763 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
5766 if (i
.tm
.opcode_modifier
.immext
)
5769 else if (i
.tm
.opcode_modifier
.firstxmm0
)
5773 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5774 gas_assert (i
.reg_operands
5775 && (operand_type_equal (&i
.types
[0], ®xmm
)
5776 || operand_type_equal (&i
.types
[0], ®ymm
)
5777 || operand_type_equal (&i
.types
[0], ®zmm
)));
5778 if (register_number (i
.op
[0].regs
) != 0)
5779 return bad_implicit_operand (i
.types
[0].bitfield
.regxmm
);
5781 for (j
= 1; j
< i
.operands
; j
++)
5783 i
.op
[j
- 1] = i
.op
[j
];
5784 i
.types
[j
- 1] = i
.types
[j
];
5786 /* We need to adjust fields in i.tm since they are used by
5787 build_modrm_byte. */
5788 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
5795 else if (i
.tm
.opcode_modifier
.regkludge
)
5797 /* The imul $imm, %reg instruction is converted into
5798 imul $imm, %reg, %reg, and the clr %reg instruction
5799 is converted into xor %reg, %reg. */
5801 unsigned int first_reg_op
;
5803 if (operand_type_check (i
.types
[0], reg
))
5807 /* Pretend we saw the extra register operand. */
5808 gas_assert (i
.reg_operands
== 1
5809 && i
.op
[first_reg_op
+ 1].regs
== 0);
5810 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
5811 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
5816 if (i
.tm
.opcode_modifier
.shortform
)
5818 if (i
.types
[0].bitfield
.sreg2
5819 || i
.types
[0].bitfield
.sreg3
)
5821 if (i
.tm
.base_opcode
== POP_SEG_SHORT
5822 && i
.op
[0].regs
->reg_num
== 1)
5824 as_bad (_("you can't `pop %scs'"), register_prefix
);
5827 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
5828 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
5833 /* The register or float register operand is in operand
5837 if (i
.types
[0].bitfield
.floatreg
5838 || operand_type_check (i
.types
[0], reg
))
5842 /* Register goes in low 3 bits of opcode. */
5843 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
5844 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
5846 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5848 /* Warn about some common errors, but press on regardless.
5849 The first case can be generated by gcc (<= 2.8.1). */
5850 if (i
.operands
== 2)
5852 /* Reversed arguments on faddp, fsubp, etc. */
5853 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
5854 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
5855 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
5859 /* Extraneous `l' suffix on fp insn. */
5860 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
5861 register_prefix
, i
.op
[0].regs
->reg_name
);
5866 else if (i
.tm
.opcode_modifier
.modrm
)
5868 /* The opcode is completed (modulo i.tm.extension_opcode which
5869 must be put into the modrm byte). Now, we make the modrm and
5870 index base bytes based on all the info we've collected. */
5872 default_seg
= build_modrm_byte ();
5874 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
5878 else if (i
.tm
.opcode_modifier
.isstring
)
5880 /* For the string instructions that allow a segment override
5881 on one of their operands, the default segment is ds. */
5885 if (i
.tm
.base_opcode
== 0x8d /* lea */
5888 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
5890 /* If a segment was explicitly specified, and the specified segment
5891 is not the default, use an opcode prefix to select it. If we
5892 never figured out what the default segment is, then default_seg
5893 will be zero at this point, and the specified segment prefix will
5895 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
5897 if (!add_prefix (i
.seg
[0]->seg_prefix
))
5903 static const seg_entry
*
5904 build_modrm_byte (void)
5906 const seg_entry
*default_seg
= 0;
5907 unsigned int source
, dest
;
5910 /* The first operand of instructions with VEX prefix and 3 sources
5911 must be VEX_Imm4. */
5912 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
5915 unsigned int nds
, reg_slot
;
5918 if (i
.tm
.opcode_modifier
.veximmext
5919 && i
.tm
.opcode_modifier
.immext
)
5921 dest
= i
.operands
- 2;
5922 gas_assert (dest
== 3);
5925 dest
= i
.operands
- 1;
5928 /* There are 2 kinds of instructions:
5929 1. 5 operands: 4 register operands or 3 register operands
5930 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5931 VexW0 or VexW1. The destination must be either XMM, YMM or
5933 2. 4 operands: 4 register operands or 3 register operands
5934 plus 1 memory operand, VexXDS, and VexImmExt */
5935 gas_assert ((i
.reg_operands
== 4
5936 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
5937 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
5938 && (i
.tm
.opcode_modifier
.veximmext
5939 || (i
.imm_operands
== 1
5940 && i
.types
[0].bitfield
.vec_imm4
5941 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
5942 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5943 && (operand_type_equal (&i
.tm
.operand_types
[dest
], ®xmm
)
5944 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®ymm
)
5945 || operand_type_equal (&i
.tm
.operand_types
[dest
], ®zmm
)))));
5947 if (i
.imm_operands
== 0)
5949 /* When there is no immediate operand, generate an 8bit
5950 immediate operand to encode the first operand. */
5951 exp
= &im_expressions
[i
.imm_operands
++];
5952 i
.op
[i
.operands
].imms
= exp
;
5953 i
.types
[i
.operands
] = imm8
;
5955 /* If VexW1 is set, the first operand is the source and
5956 the second operand is encoded in the immediate operand. */
5957 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
5968 /* FMA swaps REG and NDS. */
5969 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
5977 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5979 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5981 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
5983 exp
->X_op
= O_constant
;
5984 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
5985 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
5989 unsigned int imm_slot
;
5991 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
5993 /* If VexW0 is set, the third operand is the source and
5994 the second operand is encoded in the immediate
6001 /* VexW1 is set, the second operand is the source and
6002 the third operand is encoded in the immediate
6008 if (i
.tm
.opcode_modifier
.immext
)
6010 /* When ImmExt is set, the immdiate byte is the last
6012 imm_slot
= i
.operands
- 1;
6020 /* Turn on Imm8 so that output_imm will generate it. */
6021 i
.types
[imm_slot
].bitfield
.imm8
= 1;
6024 gas_assert (operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6026 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6028 || operand_type_equal (&i
.tm
.operand_types
[reg_slot
],
6030 i
.op
[imm_slot
].imms
->X_add_number
6031 |= register_number (i
.op
[reg_slot
].regs
) << 4;
6032 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6035 gas_assert (operand_type_equal (&i
.tm
.operand_types
[nds
], ®xmm
)
6036 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6038 || operand_type_equal (&i
.tm
.operand_types
[nds
],
6040 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
6045 /* i.reg_operands MUST be the number of real register operands;
6046 implicit registers do not count. If there are 3 register
6047 operands, it must be a instruction with VexNDS. For a
6048 instruction with VexNDD, the destination register is encoded
6049 in VEX prefix. If there are 4 register operands, it must be
6050 a instruction with VEX prefix and 3 sources. */
6051 if (i
.mem_operands
== 0
6052 && ((i
.reg_operands
== 2
6053 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
6054 || (i
.reg_operands
== 3
6055 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6056 || (i
.reg_operands
== 4 && vex_3_sources
)))
6064 /* When there are 3 operands, one of them may be immediate,
6065 which may be the first or the last operand. Otherwise,
6066 the first operand must be shift count register (cl) or it
6067 is an instruction with VexNDS. */
6068 gas_assert (i
.imm_operands
== 1
6069 || (i
.imm_operands
== 0
6070 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6071 || i
.types
[0].bitfield
.shiftcount
)));
6072 if (operand_type_check (i
.types
[0], imm
)
6073 || i
.types
[0].bitfield
.shiftcount
)
6079 /* When there are 4 operands, the first two must be 8bit
6080 immediate operands. The source operand will be the 3rd
6083 For instructions with VexNDS, if the first operand
6084 an imm8, the source operand is the 2nd one. If the last
6085 operand is imm8, the source operand is the first one. */
6086 gas_assert ((i
.imm_operands
== 2
6087 && i
.types
[0].bitfield
.imm8
6088 && i
.types
[1].bitfield
.imm8
)
6089 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6090 && i
.imm_operands
== 1
6091 && (i
.types
[0].bitfield
.imm8
6092 || i
.types
[i
.operands
- 1].bitfield
.imm8
6094 if (i
.imm_operands
== 2)
6098 if (i
.types
[0].bitfield
.imm8
)
6105 if (i
.tm
.opcode_modifier
.evex
)
6107 /* For EVEX instructions, when there are 5 operands, the
6108 first one must be immediate operand. If the second one
6109 is immediate operand, the source operand is the 3th
6110 one. If the last one is immediate operand, the source
6111 operand is the 2nd one. */
6112 gas_assert (i
.imm_operands
== 2
6113 && i
.tm
.opcode_modifier
.sae
6114 && operand_type_check (i
.types
[0], imm
));
6115 if (operand_type_check (i
.types
[1], imm
))
6117 else if (operand_type_check (i
.types
[4], imm
))
6131 /* RC/SAE operand could be between DEST and SRC. That happens
6132 when one operand is GPR and the other one is XMM/YMM/ZMM
6134 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6137 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6139 /* For instructions with VexNDS, the register-only source
6140 operand must be 32/64bit integer, XMM, YMM or ZMM
6141 register. It is encoded in VEX prefix. We need to
6142 clear RegMem bit before calling operand_type_equal. */
6144 i386_operand_type op
;
6147 /* Check register-only source operand when two source
6148 operands are swapped. */
6149 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6150 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6158 op
= i
.tm
.operand_types
[vvvv
];
6159 op
.bitfield
.regmem
= 0;
6160 if ((dest
+ 1) >= i
.operands
6161 || (op
.bitfield
.reg32
!= 1
6162 && !op
.bitfield
.reg64
!= 1
6163 && !operand_type_equal (&op
, ®xmm
)
6164 && !operand_type_equal (&op
, ®ymm
)
6165 && !operand_type_equal (&op
, ®zmm
)
6166 && !operand_type_equal (&op
, ®mask
)))
6168 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6174 /* One of the register operands will be encoded in the i.tm.reg
6175 field, the other in the combined i.tm.mode and i.tm.regmem
6176 fields. If no form of this instruction supports a memory
6177 destination operand, then we assume the source operand may
6178 sometimes be a memory operand and so we need to store the
6179 destination in the i.rm.reg field. */
6180 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6181 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6183 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6184 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6185 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6187 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6189 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6191 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6196 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6197 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6198 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6200 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6202 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6204 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6207 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6209 if (!i
.types
[0].bitfield
.control
6210 && !i
.types
[1].bitfield
.control
)
6212 i
.rex
&= ~(REX_R
| REX_B
);
6213 add_prefix (LOCK_PREFIX_OPCODE
);
6217 { /* If it's not 2 reg operands... */
6222 unsigned int fake_zero_displacement
= 0;
6225 for (op
= 0; op
< i
.operands
; op
++)
6226 if (operand_type_check (i
.types
[op
], anymem
))
6228 gas_assert (op
< i
.operands
);
6230 if (i
.tm
.opcode_modifier
.vecsib
)
6232 if (i
.index_reg
->reg_num
== RegEiz
6233 || i
.index_reg
->reg_num
== RegRiz
)
6236 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6239 i
.sib
.base
= NO_BASE_REGISTER
;
6240 i
.sib
.scale
= i
.log2_scale_factor
;
6241 /* No Vec_Disp8 if there is no base. */
6242 i
.types
[op
].bitfield
.vec_disp8
= 0;
6243 i
.types
[op
].bitfield
.disp8
= 0;
6244 i
.types
[op
].bitfield
.disp16
= 0;
6245 i
.types
[op
].bitfield
.disp64
= 0;
6246 if (flag_code
!= CODE_64BIT
)
6248 /* Must be 32 bit */
6249 i
.types
[op
].bitfield
.disp32
= 1;
6250 i
.types
[op
].bitfield
.disp32s
= 0;
6254 i
.types
[op
].bitfield
.disp32
= 0;
6255 i
.types
[op
].bitfield
.disp32s
= 1;
6258 i
.sib
.index
= i
.index_reg
->reg_num
;
6259 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6261 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6267 if (i
.base_reg
== 0)
6270 if (!i
.disp_operands
)
6272 fake_zero_displacement
= 1;
6273 /* Instructions with VSIB byte need 32bit displacement
6274 if there is no base register. */
6275 if (i
.tm
.opcode_modifier
.vecsib
)
6276 i
.types
[op
].bitfield
.disp32
= 1;
6278 if (i
.index_reg
== 0)
6280 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6281 /* Operand is just <disp> */
6282 if (flag_code
== CODE_64BIT
)
6284 /* 64bit mode overwrites the 32bit absolute
6285 addressing by RIP relative addressing and
6286 absolute addressing is encoded by one of the
6287 redundant SIB forms. */
6288 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6289 i
.sib
.base
= NO_BASE_REGISTER
;
6290 i
.sib
.index
= NO_INDEX_REGISTER
;
6291 i
.types
[op
] = ((i
.prefix
[ADDR_PREFIX
] == 0)
6292 ? disp32s
: disp32
);
6294 else if ((flag_code
== CODE_16BIT
)
6295 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6297 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6298 i
.types
[op
] = disp16
;
6302 i
.rm
.regmem
= NO_BASE_REGISTER
;
6303 i
.types
[op
] = disp32
;
6306 else if (!i
.tm
.opcode_modifier
.vecsib
)
6308 /* !i.base_reg && i.index_reg */
6309 if (i
.index_reg
->reg_num
== RegEiz
6310 || i
.index_reg
->reg_num
== RegRiz
)
6311 i
.sib
.index
= NO_INDEX_REGISTER
;
6313 i
.sib
.index
= i
.index_reg
->reg_num
;
6314 i
.sib
.base
= NO_BASE_REGISTER
;
6315 i
.sib
.scale
= i
.log2_scale_factor
;
6316 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6317 /* No Vec_Disp8 if there is no base. */
6318 i
.types
[op
].bitfield
.vec_disp8
= 0;
6319 i
.types
[op
].bitfield
.disp8
= 0;
6320 i
.types
[op
].bitfield
.disp16
= 0;
6321 i
.types
[op
].bitfield
.disp64
= 0;
6322 if (flag_code
!= CODE_64BIT
)
6324 /* Must be 32 bit */
6325 i
.types
[op
].bitfield
.disp32
= 1;
6326 i
.types
[op
].bitfield
.disp32s
= 0;
6330 i
.types
[op
].bitfield
.disp32
= 0;
6331 i
.types
[op
].bitfield
.disp32s
= 1;
6333 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6337 /* RIP addressing for 64bit mode. */
6338 else if (i
.base_reg
->reg_num
== RegRip
||
6339 i
.base_reg
->reg_num
== RegEip
)
6341 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6342 i
.rm
.regmem
= NO_BASE_REGISTER
;
6343 i
.types
[op
].bitfield
.disp8
= 0;
6344 i
.types
[op
].bitfield
.disp16
= 0;
6345 i
.types
[op
].bitfield
.disp32
= 0;
6346 i
.types
[op
].bitfield
.disp32s
= 1;
6347 i
.types
[op
].bitfield
.disp64
= 0;
6348 i
.types
[op
].bitfield
.vec_disp8
= 0;
6349 i
.flags
[op
] |= Operand_PCrel
;
6350 if (! i
.disp_operands
)
6351 fake_zero_displacement
= 1;
6353 else if (i
.base_reg
->reg_type
.bitfield
.reg16
)
6355 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6356 switch (i
.base_reg
->reg_num
)
6359 if (i
.index_reg
== 0)
6361 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6362 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6366 if (i
.index_reg
== 0)
6369 if (operand_type_check (i
.types
[op
], disp
) == 0)
6371 /* fake (%bp) into 0(%bp) */
6372 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6373 i
.types
[op
].bitfield
.vec_disp8
= 1;
6375 i
.types
[op
].bitfield
.disp8
= 1;
6376 fake_zero_displacement
= 1;
6379 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6380 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6382 default: /* (%si) -> 4 or (%di) -> 5 */
6383 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6385 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6387 else /* i.base_reg and 32/64 bit mode */
6389 if (flag_code
== CODE_64BIT
6390 && operand_type_check (i
.types
[op
], disp
))
6392 i386_operand_type temp
;
6393 operand_type_set (&temp
, 0);
6394 temp
.bitfield
.disp8
= i
.types
[op
].bitfield
.disp8
;
6395 temp
.bitfield
.vec_disp8
6396 = i
.types
[op
].bitfield
.vec_disp8
;
6398 if (i
.prefix
[ADDR_PREFIX
] == 0)
6399 i
.types
[op
].bitfield
.disp32s
= 1;
6401 i
.types
[op
].bitfield
.disp32
= 1;
6404 if (!i
.tm
.opcode_modifier
.vecsib
)
6405 i
.rm
.regmem
= i
.base_reg
->reg_num
;
6406 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
6408 i
.sib
.base
= i
.base_reg
->reg_num
;
6409 /* x86-64 ignores REX prefix bit here to avoid decoder
6411 if (!(i
.base_reg
->reg_flags
& RegRex
)
6412 && (i
.base_reg
->reg_num
== EBP_REG_NUM
6413 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
6415 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
6417 fake_zero_displacement
= 1;
6418 if (i
.tm
.operand_types
[op
].bitfield
.vec_disp8
)
6419 i
.types
[op
].bitfield
.vec_disp8
= 1;
6421 i
.types
[op
].bitfield
.disp8
= 1;
6423 i
.sib
.scale
= i
.log2_scale_factor
;
6424 if (i
.index_reg
== 0)
6426 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6427 /* <disp>(%esp) becomes two byte modrm with no index
6428 register. We've already stored the code for esp
6429 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6430 Any base register besides %esp will not use the
6431 extra modrm byte. */
6432 i
.sib
.index
= NO_INDEX_REGISTER
;
6434 else if (!i
.tm
.opcode_modifier
.vecsib
)
6436 if (i
.index_reg
->reg_num
== RegEiz
6437 || i
.index_reg
->reg_num
== RegRiz
)
6438 i
.sib
.index
= NO_INDEX_REGISTER
;
6440 i
.sib
.index
= i
.index_reg
->reg_num
;
6441 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6442 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6447 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6448 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
6452 if (!fake_zero_displacement
6456 fake_zero_displacement
= 1;
6457 if (i
.disp_encoding
== disp_encoding_8bit
)
6458 i
.types
[op
].bitfield
.disp8
= 1;
6460 i
.types
[op
].bitfield
.disp32
= 1;
6462 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6466 if (fake_zero_displacement
)
6468 /* Fakes a zero displacement assuming that i.types[op]
6469 holds the correct displacement size. */
6472 gas_assert (i
.op
[op
].disps
== 0);
6473 exp
= &disp_expressions
[i
.disp_operands
++];
6474 i
.op
[op
].disps
= exp
;
6475 exp
->X_op
= O_constant
;
6476 exp
->X_add_number
= 0;
6477 exp
->X_add_symbol
= (symbolS
*) 0;
6478 exp
->X_op_symbol
= (symbolS
*) 0;
6486 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
6488 if (operand_type_check (i
.types
[0], imm
))
6489 i
.vex
.register_specifier
= NULL
;
6492 /* VEX.vvvv encodes one of the sources when the first
6493 operand is not an immediate. */
6494 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6495 i
.vex
.register_specifier
= i
.op
[0].regs
;
6497 i
.vex
.register_specifier
= i
.op
[1].regs
;
6500 /* Destination is a XMM register encoded in the ModRM.reg
6502 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
6503 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
6506 /* ModRM.rm and VEX.B encodes the other source. */
6507 if (!i
.mem_operands
)
6511 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6512 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6514 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
6516 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6520 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
6522 i
.vex
.register_specifier
= i
.op
[2].regs
;
6523 if (!i
.mem_operands
)
6526 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
6527 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
6531 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6532 (if any) based on i.tm.extension_opcode. Again, we must be
6533 careful to make sure that segment/control/debug/test/MMX
6534 registers are coded into the i.rm.reg field. */
6535 else if (i
.reg_operands
)
6538 unsigned int vex_reg
= ~0;
6540 for (op
= 0; op
< i
.operands
; op
++)
6541 if (i
.types
[op
].bitfield
.reg8
6542 || i
.types
[op
].bitfield
.reg16
6543 || i
.types
[op
].bitfield
.reg32
6544 || i
.types
[op
].bitfield
.reg64
6545 || i
.types
[op
].bitfield
.regmmx
6546 || i
.types
[op
].bitfield
.regxmm
6547 || i
.types
[op
].bitfield
.regymm
6548 || i
.types
[op
].bitfield
.regbnd
6549 || i
.types
[op
].bitfield
.regzmm
6550 || i
.types
[op
].bitfield
.regmask
6551 || i
.types
[op
].bitfield
.sreg2
6552 || i
.types
[op
].bitfield
.sreg3
6553 || i
.types
[op
].bitfield
.control
6554 || i
.types
[op
].bitfield
.debug
6555 || i
.types
[op
].bitfield
.test
)
6560 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6562 /* For instructions with VexNDS, the register-only
6563 source operand is encoded in VEX prefix. */
6564 gas_assert (mem
!= (unsigned int) ~0);
6569 gas_assert (op
< i
.operands
);
6573 /* Check register-only source operand when two source
6574 operands are swapped. */
6575 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
6576 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
6580 gas_assert (mem
== (vex_reg
+ 1)
6581 && op
< i
.operands
);
6586 gas_assert (vex_reg
< i
.operands
);
6590 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
6592 /* For instructions with VexNDD, the register destination
6593 is encoded in VEX prefix. */
6594 if (i
.mem_operands
== 0)
6596 /* There is no memory operand. */
6597 gas_assert ((op
+ 2) == i
.operands
);
6602 /* There are only 2 operands. */
6603 gas_assert (op
< 2 && i
.operands
== 2);
6608 gas_assert (op
< i
.operands
);
6610 if (vex_reg
!= (unsigned int) ~0)
6612 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
6614 if (type
->bitfield
.reg32
!= 1
6615 && type
->bitfield
.reg64
!= 1
6616 && !operand_type_equal (type
, ®xmm
)
6617 && !operand_type_equal (type
, ®ymm
)
6618 && !operand_type_equal (type
, ®zmm
)
6619 && !operand_type_equal (type
, ®mask
))
6622 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
6625 /* Don't set OP operand twice. */
6628 /* If there is an extension opcode to put here, the
6629 register number must be put into the regmem field. */
6630 if (i
.tm
.extension_opcode
!= None
)
6632 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
6633 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6635 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6640 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
6641 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6643 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
6648 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6649 must set it to 3 to indicate this is a register operand
6650 in the regmem field. */
6651 if (!i
.mem_operands
)
6655 /* Fill in i.rm.reg field with extension opcode (if any). */
6656 if (i
.tm
.extension_opcode
!= None
)
6657 i
.rm
.reg
= i
.tm
.extension_opcode
;
6663 output_branch (void)
6669 relax_substateT subtype
;
6673 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
6674 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
6677 if (i
.prefix
[DATA_PREFIX
] != 0)
6683 /* Pentium4 branch hints. */
6684 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6685 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6690 if (i
.prefix
[REX_PREFIX
] != 0)
6696 /* BND prefixed jump. */
6697 if (i
.prefix
[BND_PREFIX
] != 0)
6699 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6703 if (i
.prefixes
!= 0 && !intel_syntax
)
6704 as_warn (_("skipping prefixes on this instruction"));
6706 /* It's always a symbol; End frag & setup for relax.
6707 Make sure there is enough room in this frag for the largest
6708 instruction we may generate in md_convert_frag. This is 2
6709 bytes for the opcode and room for the prefix and largest
6711 frag_grow (prefix
+ 2 + 4);
6712 /* Prefix and 1 opcode byte go in fr_fix. */
6713 p
= frag_more (prefix
+ 1);
6714 if (i
.prefix
[DATA_PREFIX
] != 0)
6715 *p
++ = DATA_PREFIX_OPCODE
;
6716 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
6717 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
6718 *p
++ = i
.prefix
[SEG_PREFIX
];
6719 if (i
.prefix
[REX_PREFIX
] != 0)
6720 *p
++ = i
.prefix
[REX_PREFIX
];
6721 *p
= i
.tm
.base_opcode
;
6723 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
6724 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
6725 else if (cpu_arch_flags
.bitfield
.cpui386
)
6726 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
6728 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
6731 sym
= i
.op
[0].disps
->X_add_symbol
;
6732 off
= i
.op
[0].disps
->X_add_number
;
6734 if (i
.op
[0].disps
->X_op
!= O_constant
6735 && i
.op
[0].disps
->X_op
!= O_symbol
)
6737 /* Handle complex expressions. */
6738 sym
= make_expr_symbol (i
.op
[0].disps
);
6742 /* 1 possible extra opcode + 4 byte displacement go in var part.
6743 Pass reloc in fr_var. */
6744 frag_var (rs_machine_dependent
, 5,
6746 || i
.reloc
[0] != NO_RELOC
6747 || (i
.bnd_prefix
== NULL
&& !add_bnd_prefix
))
6749 : BFD_RELOC_X86_64_PC32_BND
),
6750 subtype
, sym
, off
, p
);
6760 if (i
.tm
.opcode_modifier
.jumpbyte
)
6762 /* This is a loop or jecxz type instruction. */
6764 if (i
.prefix
[ADDR_PREFIX
] != 0)
6766 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
6769 /* Pentium4 branch hints. */
6770 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
6771 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
6773 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
6782 if (flag_code
== CODE_16BIT
)
6785 if (i
.prefix
[DATA_PREFIX
] != 0)
6787 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
6797 if (i
.prefix
[REX_PREFIX
] != 0)
6799 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
6803 /* BND prefixed jump. */
6804 if (i
.prefix
[BND_PREFIX
] != 0)
6806 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
6810 if (i
.prefixes
!= 0 && !intel_syntax
)
6811 as_warn (_("skipping prefixes on this instruction"));
6813 p
= frag_more (i
.tm
.opcode_length
+ size
);
6814 switch (i
.tm
.opcode_length
)
6817 *p
++ = i
.tm
.base_opcode
>> 8;
6819 *p
++ = i
.tm
.base_opcode
;
6825 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6826 i
.op
[0].disps
, 1, reloc (size
, 1, 1,
6827 (i
.bnd_prefix
!= NULL
6831 /* All jumps handled here are signed, but don't use a signed limit
6832 check for 32 and 16 bit jumps as we want to allow wrap around at
6833 4G and 64k respectively. */
6835 fixP
->fx_signed
= 1;
6839 output_interseg_jump (void)
6847 if (flag_code
== CODE_16BIT
)
6851 if (i
.prefix
[DATA_PREFIX
] != 0)
6857 if (i
.prefix
[REX_PREFIX
] != 0)
6867 if (i
.prefixes
!= 0 && !intel_syntax
)
6868 as_warn (_("skipping prefixes on this instruction"));
6870 /* 1 opcode; 2 segment; offset */
6871 p
= frag_more (prefix
+ 1 + 2 + size
);
6873 if (i
.prefix
[DATA_PREFIX
] != 0)
6874 *p
++ = DATA_PREFIX_OPCODE
;
6876 if (i
.prefix
[REX_PREFIX
] != 0)
6877 *p
++ = i
.prefix
[REX_PREFIX
];
6879 *p
++ = i
.tm
.base_opcode
;
6880 if (i
.op
[1].imms
->X_op
== O_constant
)
6882 offsetT n
= i
.op
[1].imms
->X_add_number
;
6885 && !fits_in_unsigned_word (n
)
6886 && !fits_in_signed_word (n
))
6888 as_bad (_("16-bit jump out of range"));
6891 md_number_to_chars (p
, n
, size
);
6894 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
6895 i
.op
[1].imms
, 0, reloc (size
, 0, 0, 0, i
.reloc
[1]));
6896 if (i
.op
[0].imms
->X_op
!= O_constant
)
6897 as_bad (_("can't handle non absolute segment in `%s'"),
6899 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
6905 fragS
*insn_start_frag
;
6906 offsetT insn_start_off
;
6908 /* Tie dwarf2 debug info to the address at the start of the insn.
6909 We can't do this after the insn has been output as the current
6910 frag may have been closed off. eg. by frag_var. */
6911 dwarf2_emit_insn (0);
6913 insn_start_frag
= frag_now
;
6914 insn_start_off
= frag_now_fix ();
6917 if (i
.tm
.opcode_modifier
.jump
)
6919 else if (i
.tm
.opcode_modifier
.jumpbyte
6920 || i
.tm
.opcode_modifier
.jumpdword
)
6922 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
6923 output_interseg_jump ();
6926 /* Output normal instructions here. */
6930 unsigned int prefix
;
6932 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6933 don't need the explicit prefix. */
6934 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
6936 switch (i
.tm
.opcode_length
)
6939 if (i
.tm
.base_opcode
& 0xff000000)
6941 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
6946 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
6948 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
6949 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
6952 if (prefix
!= REPE_PREFIX_OPCODE
6953 || (i
.prefix
[REP_PREFIX
]
6954 != REPE_PREFIX_OPCODE
))
6955 add_prefix (prefix
);
6958 add_prefix (prefix
);
6967 /* The prefix bytes. */
6968 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
6970 FRAG_APPEND_1_CHAR (*q
);
6974 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
6979 /* REX byte is encoded in VEX prefix. */
6983 FRAG_APPEND_1_CHAR (*q
);
6986 /* There should be no other prefixes for instructions
6991 /* For EVEX instructions i.vrex should become 0 after
6992 build_evex_prefix. For VEX instructions upper 16 registers
6993 aren't available, so VREX should be 0. */
6996 /* Now the VEX prefix. */
6997 p
= frag_more (i
.vex
.length
);
6998 for (j
= 0; j
< i
.vex
.length
; j
++)
6999 p
[j
] = i
.vex
.bytes
[j
];
7002 /* Now the opcode; be careful about word order here! */
7003 if (i
.tm
.opcode_length
== 1)
7005 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
7009 switch (i
.tm
.opcode_length
)
7013 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
7014 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7018 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7028 /* Put out high byte first: can't use md_number_to_chars! */
7029 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
7030 *p
= i
.tm
.base_opcode
& 0xff;
7033 /* Now the modrm byte and sib byte (if present). */
7034 if (i
.tm
.opcode_modifier
.modrm
)
7036 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
7039 /* If i.rm.regmem == ESP (4)
7040 && i.rm.mode != (Register mode)
7042 ==> need second modrm byte. */
7043 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
7045 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.reg16
))
7046 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7048 | i
.sib
.scale
<< 6));
7051 if (i
.disp_operands
)
7052 output_disp (insn_start_frag
, insn_start_off
);
7055 output_imm (insn_start_frag
, insn_start_off
);
7061 pi ("" /*line*/, &i
);
7063 #endif /* DEBUG386 */
7066 /* Return the size of the displacement operand N. */
7069 disp_size (unsigned int n
)
7073 /* Vec_Disp8 has to be 8bit. */
7074 if (i
.types
[n
].bitfield
.vec_disp8
)
7076 else if (i
.types
[n
].bitfield
.disp64
)
7078 else if (i
.types
[n
].bitfield
.disp8
)
7080 else if (i
.types
[n
].bitfield
.disp16
)
7085 /* Return the size of the immediate operand N. */
7088 imm_size (unsigned int n
)
7091 if (i
.types
[n
].bitfield
.imm64
)
7093 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7095 else if (i
.types
[n
].bitfield
.imm16
)
7101 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7106 for (n
= 0; n
< i
.operands
; n
++)
7108 if (i
.types
[n
].bitfield
.vec_disp8
7109 || operand_type_check (i
.types
[n
], disp
))
7111 if (i
.op
[n
].disps
->X_op
== O_constant
)
7113 int size
= disp_size (n
);
7114 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7116 if (i
.types
[n
].bitfield
.vec_disp8
)
7118 val
= offset_in_range (val
, size
);
7119 p
= frag_more (size
);
7120 md_number_to_chars (p
, val
, size
);
7124 enum bfd_reloc_code_real reloc_type
;
7125 int size
= disp_size (n
);
7126 int sign
= i
.types
[n
].bitfield
.disp32s
;
7127 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7129 /* We can't have 8 bit displacement here. */
7130 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7132 /* The PC relative address is computed relative
7133 to the instruction boundary, so in case immediate
7134 fields follows, we need to adjust the value. */
7135 if (pcrel
&& i
.imm_operands
)
7140 for (n1
= 0; n1
< i
.operands
; n1
++)
7141 if (operand_type_check (i
.types
[n1
], imm
))
7143 /* Only one immediate is allowed for PC
7144 relative address. */
7145 gas_assert (sz
== 0);
7147 i
.op
[n
].disps
->X_add_number
-= sz
;
7149 /* We should find the immediate. */
7150 gas_assert (sz
!= 0);
7153 p
= frag_more (size
);
7154 reloc_type
= reloc (size
, pcrel
, sign
,
7155 (i
.bnd_prefix
!= NULL
7159 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7160 && (((reloc_type
== BFD_RELOC_32
7161 || reloc_type
== BFD_RELOC_X86_64_32S
7162 || (reloc_type
== BFD_RELOC_64
7164 && (i
.op
[n
].disps
->X_op
== O_symbol
7165 || (i
.op
[n
].disps
->X_op
== O_add
7166 && ((symbol_get_value_expression
7167 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7169 || reloc_type
== BFD_RELOC_32_PCREL
))
7173 if (insn_start_frag
== frag_now
)
7174 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7179 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7180 for (fr
= insn_start_frag
->fr_next
;
7181 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7183 add
+= p
- frag_now
->fr_literal
;
7188 reloc_type
= BFD_RELOC_386_GOTPC
;
7189 i
.op
[n
].imms
->X_add_number
+= add
;
7191 else if (reloc_type
== BFD_RELOC_64
)
7192 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7194 /* Don't do the adjustment for x86-64, as there
7195 the pcrel addressing is relative to the _next_
7196 insn, and that is taken care of in other code. */
7197 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7199 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7200 i
.op
[n
].disps
, pcrel
, reloc_type
);
7207 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7212 for (n
= 0; n
< i
.operands
; n
++)
7214 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7215 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7218 if (operand_type_check (i
.types
[n
], imm
))
7220 if (i
.op
[n
].imms
->X_op
== O_constant
)
7222 int size
= imm_size (n
);
7225 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7227 p
= frag_more (size
);
7228 md_number_to_chars (p
, val
, size
);
7232 /* Not absolute_section.
7233 Need a 32-bit fixup (don't support 8bit
7234 non-absolute imms). Try to support other
7236 enum bfd_reloc_code_real reloc_type
;
7237 int size
= imm_size (n
);
7240 if (i
.types
[n
].bitfield
.imm32s
7241 && (i
.suffix
== QWORD_MNEM_SUFFIX
7242 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7247 p
= frag_more (size
);
7248 reloc_type
= reloc (size
, 0, sign
, 0, i
.reloc
[n
]);
7250 /* This is tough to explain. We end up with this one if we
7251 * have operands that look like
7252 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7253 * obtain the absolute address of the GOT, and it is strongly
7254 * preferable from a performance point of view to avoid using
7255 * a runtime relocation for this. The actual sequence of
7256 * instructions often look something like:
7261 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7263 * The call and pop essentially return the absolute address
7264 * of the label .L66 and store it in %ebx. The linker itself
7265 * will ultimately change the first operand of the addl so
7266 * that %ebx points to the GOT, but to keep things simple, the
7267 * .o file must have this operand set so that it generates not
7268 * the absolute address of .L66, but the absolute address of
7269 * itself. This allows the linker itself simply treat a GOTPC
7270 * relocation as asking for a pcrel offset to the GOT to be
7271 * added in, and the addend of the relocation is stored in the
7272 * operand field for the instruction itself.
7274 * Our job here is to fix the operand so that it would add
7275 * the correct offset so that %ebx would point to itself. The
7276 * thing that is tricky is that .-.L66 will point to the
7277 * beginning of the instruction, so we need to further modify
7278 * the operand so that it will point to itself. There are
7279 * other cases where you have something like:
7281 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7283 * and here no correction would be required. Internally in
7284 * the assembler we treat operands of this form as not being
7285 * pcrel since the '.' is explicitly mentioned, and I wonder
7286 * whether it would simplify matters to do it this way. Who
7287 * knows. In earlier versions of the PIC patches, the
7288 * pcrel_adjust field was used to store the correction, but
7289 * since the expression is not pcrel, I felt it would be
7290 * confusing to do it this way. */
7292 if ((reloc_type
== BFD_RELOC_32
7293 || reloc_type
== BFD_RELOC_X86_64_32S
7294 || reloc_type
== BFD_RELOC_64
)
7296 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
7297 && (i
.op
[n
].imms
->X_op
== O_symbol
7298 || (i
.op
[n
].imms
->X_op
== O_add
7299 && ((symbol_get_value_expression
7300 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
7305 if (insn_start_frag
== frag_now
)
7306 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7311 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7312 for (fr
= insn_start_frag
->fr_next
;
7313 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7315 add
+= p
- frag_now
->fr_literal
;
7319 reloc_type
= BFD_RELOC_386_GOTPC
;
7321 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7323 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7324 i
.op
[n
].imms
->X_add_number
+= add
;
7326 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7327 i
.op
[n
].imms
, 0, reloc_type
);
7333 /* x86_cons_fix_new is called via the expression parsing code when a
7334 reloc is needed. We use this hook to get the correct .got reloc. */
7335 static enum bfd_reloc_code_real got_reloc
= NO_RELOC
;
7336 static int cons_sign
= -1;
7339 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
7342 enum bfd_reloc_code_real r
= reloc (len
, 0, cons_sign
, 0, got_reloc
);
7344 got_reloc
= NO_RELOC
;
7347 if (exp
->X_op
== O_secrel
)
7349 exp
->X_op
= O_symbol
;
7350 r
= BFD_RELOC_32_SECREL
;
7354 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
7357 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7358 purpose of the `.dc.a' internal pseudo-op. */
7361 x86_address_bytes (void)
7363 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
7365 return stdoutput
->arch_info
->bits_per_address
/ 8;
7368 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7370 # define lex_got(reloc, adjust, types, bnd_prefix) NULL
7372 /* Parse operands of the form
7373 <symbol>@GOTOFF+<nnn>
7374 and similar .plt or .got references.
7376 If we find one, set up the correct relocation in RELOC and copy the
7377 input string, minus the `@GOTOFF' into a malloc'd buffer for
7378 parsing by the calling routine. Return this buffer, and if ADJUST
7379 is non-null set it to the length of the string we removed from the
7380 input line. Otherwise return NULL. */
7382 lex_got (enum bfd_reloc_code_real
*rel
,
7384 i386_operand_type
*types
,
7387 /* Some of the relocations depend on the size of what field is to
7388 be relocated. But in our callers i386_immediate and i386_displacement
7389 we don't yet know the operand size (this will be set by insn
7390 matching). Hence we record the word32 relocation here,
7391 and adjust the reloc according to the real size in reloc(). */
7392 static const struct {
7395 const enum bfd_reloc_code_real rel
[2];
7396 const i386_operand_type types64
;
7398 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7399 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
7401 OPERAND_TYPE_IMM32_64
},
7403 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
7404 BFD_RELOC_X86_64_PLTOFF64
},
7405 OPERAND_TYPE_IMM64
},
7406 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
7407 BFD_RELOC_X86_64_PLT32
},
7408 OPERAND_TYPE_IMM32_32S_DISP32
},
7409 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
7410 BFD_RELOC_X86_64_GOTPLT64
},
7411 OPERAND_TYPE_IMM64_DISP64
},
7412 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
7413 BFD_RELOC_X86_64_GOTOFF64
},
7414 OPERAND_TYPE_IMM64_DISP64
},
7415 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
7416 BFD_RELOC_X86_64_GOTPCREL
},
7417 OPERAND_TYPE_IMM32_32S_DISP32
},
7418 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
7419 BFD_RELOC_X86_64_TLSGD
},
7420 OPERAND_TYPE_IMM32_32S_DISP32
},
7421 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
7422 _dummy_first_bfd_reloc_code_real
},
7423 OPERAND_TYPE_NONE
},
7424 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
7425 BFD_RELOC_X86_64_TLSLD
},
7426 OPERAND_TYPE_IMM32_32S_DISP32
},
7427 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
7428 BFD_RELOC_X86_64_GOTTPOFF
},
7429 OPERAND_TYPE_IMM32_32S_DISP32
},
7430 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
7431 BFD_RELOC_X86_64_TPOFF32
},
7432 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7433 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
7434 _dummy_first_bfd_reloc_code_real
},
7435 OPERAND_TYPE_NONE
},
7436 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
7437 BFD_RELOC_X86_64_DTPOFF32
},
7438 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7439 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
7440 _dummy_first_bfd_reloc_code_real
},
7441 OPERAND_TYPE_NONE
},
7442 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
7443 _dummy_first_bfd_reloc_code_real
},
7444 OPERAND_TYPE_NONE
},
7445 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
7446 BFD_RELOC_X86_64_GOT32
},
7447 OPERAND_TYPE_IMM32_32S_64_DISP32
},
7448 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
7449 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
7450 OPERAND_TYPE_IMM32_32S_DISP32
},
7451 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
7452 BFD_RELOC_X86_64_TLSDESC_CALL
},
7453 OPERAND_TYPE_IMM32_32S_DISP32
},
7458 #if defined (OBJ_MAYBE_ELF)
7463 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7464 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7467 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7469 int len
= gotrel
[j
].len
;
7470 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7472 if (gotrel
[j
].rel
[object_64bit
] != 0)
7475 char *tmpbuf
, *past_reloc
;
7477 *rel
= gotrel
[j
].rel
[object_64bit
];
7481 if (flag_code
!= CODE_64BIT
)
7483 types
->bitfield
.imm32
= 1;
7484 types
->bitfield
.disp32
= 1;
7487 *types
= gotrel
[j
].types64
;
7490 if (j
!= 0 && GOT_symbol
== NULL
)
7491 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
7493 /* The length of the first part of our input line. */
7494 first
= cp
- input_line_pointer
;
7496 /* The second part goes from after the reloc token until
7497 (and including) an end_of_line char or comma. */
7498 past_reloc
= cp
+ 1 + len
;
7500 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7502 second
= cp
+ 1 - past_reloc
;
7504 /* Allocate and copy string. The trailing NUL shouldn't
7505 be necessary, but be safe. */
7506 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7507 memcpy (tmpbuf
, input_line_pointer
, first
);
7508 if (second
!= 0 && *past_reloc
!= ' ')
7509 /* Replace the relocation token with ' ', so that
7510 errors like foo@GOTOFF1 will be detected. */
7511 tmpbuf
[first
++] = ' ';
7513 /* Increment length by 1 if the relocation token is
7518 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7519 tmpbuf
[first
+ second
] = '\0';
7520 if (bnd_prefix
&& *rel
== BFD_RELOC_X86_64_PLT32
)
7521 *rel
= BFD_RELOC_X86_64_PLT32_BND
;
7525 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7526 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7531 /* Might be a symbol version string. Don't as_bad here. */
7540 /* Parse operands of the form
7541 <symbol>@SECREL32+<nnn>
7543 If we find one, set up the correct relocation in RELOC and copy the
7544 input string, minus the `@SECREL32' into a malloc'd buffer for
7545 parsing by the calling routine. Return this buffer, and if ADJUST
7546 is non-null set it to the length of the string we removed from the
7547 input line. Otherwise return NULL.
7549 This function is copied from the ELF version above adjusted for PE targets. */
7552 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
7553 int *adjust ATTRIBUTE_UNUSED
,
7554 i386_operand_type
*types
,
7555 int bnd_prefix ATTRIBUTE_UNUSED
)
7561 const enum bfd_reloc_code_real rel
[2];
7562 const i386_operand_type types64
;
7566 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
7567 BFD_RELOC_32_SECREL
},
7568 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
7574 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
7575 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
7578 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
7580 int len
= gotrel
[j
].len
;
7582 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
7584 if (gotrel
[j
].rel
[object_64bit
] != 0)
7587 char *tmpbuf
, *past_reloc
;
7589 *rel
= gotrel
[j
].rel
[object_64bit
];
7595 if (flag_code
!= CODE_64BIT
)
7597 types
->bitfield
.imm32
= 1;
7598 types
->bitfield
.disp32
= 1;
7601 *types
= gotrel
[j
].types64
;
7604 /* The length of the first part of our input line. */
7605 first
= cp
- input_line_pointer
;
7607 /* The second part goes from after the reloc token until
7608 (and including) an end_of_line char or comma. */
7609 past_reloc
= cp
+ 1 + len
;
7611 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
7613 second
= cp
+ 1 - past_reloc
;
7615 /* Allocate and copy string. The trailing NUL shouldn't
7616 be necessary, but be safe. */
7617 tmpbuf
= (char *) xmalloc (first
+ second
+ 2);
7618 memcpy (tmpbuf
, input_line_pointer
, first
);
7619 if (second
!= 0 && *past_reloc
!= ' ')
7620 /* Replace the relocation token with ' ', so that
7621 errors like foo@SECLREL321 will be detected. */
7622 tmpbuf
[first
++] = ' ';
7623 memcpy (tmpbuf
+ first
, past_reloc
, second
);
7624 tmpbuf
[first
+ second
] = '\0';
7628 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7629 gotrel
[j
].str
, 1 << (5 + object_64bit
));
7634 /* Might be a symbol version string. Don't as_bad here. */
7641 x86_cons (expressionS
*exp
, int size
)
7643 intel_syntax
= -intel_syntax
;
7646 if (size
== 4 || (object_64bit
&& size
== 8))
7648 /* Handle @GOTOFF and the like in an expression. */
7650 char *gotfree_input_line
;
7653 save
= input_line_pointer
;
7654 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
, 0);
7655 if (gotfree_input_line
)
7656 input_line_pointer
= gotfree_input_line
;
7660 if (gotfree_input_line
)
7662 /* expression () has merrily parsed up to the end of line,
7663 or a comma - in the wrong buffer. Transfer how far
7664 input_line_pointer has moved to the right buffer. */
7665 input_line_pointer
= (save
7666 + (input_line_pointer
- gotfree_input_line
)
7668 free (gotfree_input_line
);
7669 if (exp
->X_op
== O_constant
7670 || exp
->X_op
== O_absent
7671 || exp
->X_op
== O_illegal
7672 || exp
->X_op
== O_register
7673 || exp
->X_op
== O_big
)
7675 char c
= *input_line_pointer
;
7676 *input_line_pointer
= 0;
7677 as_bad (_("missing or invalid expression `%s'"), save
);
7678 *input_line_pointer
= c
;
7685 intel_syntax
= -intel_syntax
;
7688 i386_intel_simplify (exp
);
7692 signed_cons (int size
)
7694 if (flag_code
== CODE_64BIT
)
7702 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
7709 if (exp
.X_op
== O_symbol
)
7710 exp
.X_op
= O_secrel
;
7712 emit_expr (&exp
, 4);
7714 while (*input_line_pointer
++ == ',');
7716 input_line_pointer
--;
7717 demand_empty_rest_of_line ();
7721 /* Handle Vector operations. */
7724 check_VecOperations (char *op_string
, char *op_end
)
7726 const reg_entry
*mask
;
7731 && (op_end
== NULL
|| op_string
< op_end
))
7734 if (*op_string
== '{')
7738 /* Check broadcasts. */
7739 if (strncmp (op_string
, "1to", 3) == 0)
7744 goto duplicated_vec_op
;
7747 if (*op_string
== '8')
7748 bcst_type
= BROADCAST_1TO8
;
7749 else if (*op_string
== '1'
7750 && *(op_string
+1) == '6')
7752 bcst_type
= BROADCAST_1TO16
;
7757 as_bad (_("Unsupported broadcast: `%s'"), saved
);
7762 broadcast_op
.type
= bcst_type
;
7763 broadcast_op
.operand
= this_operand
;
7764 i
.broadcast
= &broadcast_op
;
7766 /* Check masking operation. */
7767 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
7769 /* k0 can't be used for write mask. */
7770 if (mask
->reg_num
== 0)
7772 as_bad (_("`%s' can't be used for write mask"),
7779 mask_op
.mask
= mask
;
7780 mask_op
.zeroing
= 0;
7781 mask_op
.operand
= this_operand
;
7787 goto duplicated_vec_op
;
7789 i
.mask
->mask
= mask
;
7791 /* Only "{z}" is allowed here. No need to check
7792 zeroing mask explicitly. */
7793 if (i
.mask
->operand
!= this_operand
)
7795 as_bad (_("invalid write mask `%s'"), saved
);
7802 /* Check zeroing-flag for masking operation. */
7803 else if (*op_string
== 'z')
7807 mask_op
.mask
= NULL
;
7808 mask_op
.zeroing
= 1;
7809 mask_op
.operand
= this_operand
;
7814 if (i
.mask
->zeroing
)
7817 as_bad (_("duplicated `%s'"), saved
);
7821 i
.mask
->zeroing
= 1;
7823 /* Only "{%k}" is allowed here. No need to check mask
7824 register explicitly. */
7825 if (i
.mask
->operand
!= this_operand
)
7827 as_bad (_("invalid zeroing-masking `%s'"),
7836 goto unknown_vec_op
;
7838 if (*op_string
!= '}')
7840 as_bad (_("missing `}' in `%s'"), saved
);
7847 /* We don't know this one. */
7848 as_bad (_("unknown vector operation: `%s'"), saved
);
7856 i386_immediate (char *imm_start
)
7858 char *save_input_line_pointer
;
7859 char *gotfree_input_line
;
7862 i386_operand_type types
;
7864 operand_type_set (&types
, ~0);
7866 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
7868 as_bad (_("at most %d immediate operands are allowed"),
7869 MAX_IMMEDIATE_OPERANDS
);
7873 exp
= &im_expressions
[i
.imm_operands
++];
7874 i
.op
[this_operand
].imms
= exp
;
7876 if (is_space_char (*imm_start
))
7879 save_input_line_pointer
= input_line_pointer
;
7880 input_line_pointer
= imm_start
;
7882 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
,
7883 (i
.bnd_prefix
!= NULL
7884 || add_bnd_prefix
));
7885 if (gotfree_input_line
)
7886 input_line_pointer
= gotfree_input_line
;
7888 exp_seg
= expression (exp
);
7892 /* Handle vector operations. */
7893 if (*input_line_pointer
== '{')
7895 input_line_pointer
= check_VecOperations (input_line_pointer
,
7897 if (input_line_pointer
== NULL
)
7901 if (*input_line_pointer
)
7902 as_bad (_("junk `%s' after expression"), input_line_pointer
);
7904 input_line_pointer
= save_input_line_pointer
;
7905 if (gotfree_input_line
)
7907 free (gotfree_input_line
);
7909 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
7910 exp
->X_op
= O_illegal
;
7913 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
7917 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
7918 i386_operand_type types
, const char *imm_start
)
7920 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
7923 as_bad (_("missing or invalid immediate expression `%s'"),
7927 else if (exp
->X_op
== O_constant
)
7929 /* Size it properly later. */
7930 i
.types
[this_operand
].bitfield
.imm64
= 1;
7931 /* If not 64bit, sign extend val. */
7932 if (flag_code
!= CODE_64BIT
7933 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
7935 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
7937 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7938 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
7939 && exp_seg
!= absolute_section
7940 && exp_seg
!= text_section
7941 && exp_seg
!= data_section
7942 && exp_seg
!= bss_section
7943 && exp_seg
!= undefined_section
7944 && !bfd_is_com_section (exp_seg
))
7946 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
7950 else if (!intel_syntax
&& exp
->X_op
== O_register
)
7953 as_bad (_("illegal immediate register operand %s"), imm_start
);
7958 /* This is an address. The size of the address will be
7959 determined later, depending on destination register,
7960 suffix, or the default for the section. */
7961 i
.types
[this_operand
].bitfield
.imm8
= 1;
7962 i
.types
[this_operand
].bitfield
.imm16
= 1;
7963 i
.types
[this_operand
].bitfield
.imm32
= 1;
7964 i
.types
[this_operand
].bitfield
.imm32s
= 1;
7965 i
.types
[this_operand
].bitfield
.imm64
= 1;
7966 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
7974 i386_scale (char *scale
)
7977 char *save
= input_line_pointer
;
7979 input_line_pointer
= scale
;
7980 val
= get_absolute_expression ();
7985 i
.log2_scale_factor
= 0;
7988 i
.log2_scale_factor
= 1;
7991 i
.log2_scale_factor
= 2;
7994 i
.log2_scale_factor
= 3;
7998 char sep
= *input_line_pointer
;
8000 *input_line_pointer
= '\0';
8001 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8003 *input_line_pointer
= sep
;
8004 input_line_pointer
= save
;
8008 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
8010 as_warn (_("scale factor of %d without an index register"),
8011 1 << i
.log2_scale_factor
);
8012 i
.log2_scale_factor
= 0;
8014 scale
= input_line_pointer
;
8015 input_line_pointer
= save
;
8020 i386_displacement (char *disp_start
, char *disp_end
)
8024 char *save_input_line_pointer
;
8025 char *gotfree_input_line
;
8027 i386_operand_type bigdisp
, types
= anydisp
;
8030 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
8032 as_bad (_("at most %d displacement operands are allowed"),
8033 MAX_MEMORY_OPERANDS
);
8037 operand_type_set (&bigdisp
, 0);
8038 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
8039 || (!current_templates
->start
->opcode_modifier
.jump
8040 && !current_templates
->start
->opcode_modifier
.jumpdword
))
8042 bigdisp
.bitfield
.disp32
= 1;
8043 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
8044 if (flag_code
== CODE_64BIT
)
8048 bigdisp
.bitfield
.disp32s
= 1;
8049 bigdisp
.bitfield
.disp64
= 1;
8052 else if ((flag_code
== CODE_16BIT
) ^ override
)
8054 bigdisp
.bitfield
.disp32
= 0;
8055 bigdisp
.bitfield
.disp16
= 1;
8060 /* For PC-relative branches, the width of the displacement
8061 is dependent upon data size, not address size. */
8062 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8063 if (flag_code
== CODE_64BIT
)
8065 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8066 bigdisp
.bitfield
.disp16
= 1;
8069 bigdisp
.bitfield
.disp32
= 1;
8070 bigdisp
.bitfield
.disp32s
= 1;
8076 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8078 : LONG_MNEM_SUFFIX
));
8079 bigdisp
.bitfield
.disp32
= 1;
8080 if ((flag_code
== CODE_16BIT
) ^ override
)
8082 bigdisp
.bitfield
.disp32
= 0;
8083 bigdisp
.bitfield
.disp16
= 1;
8087 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8090 exp
= &disp_expressions
[i
.disp_operands
];
8091 i
.op
[this_operand
].disps
= exp
;
8093 save_input_line_pointer
= input_line_pointer
;
8094 input_line_pointer
= disp_start
;
8095 END_STRING_AND_SAVE (disp_end
);
8097 #ifndef GCC_ASM_O_HACK
8098 #define GCC_ASM_O_HACK 0
8101 END_STRING_AND_SAVE (disp_end
+ 1);
8102 if (i
.types
[this_operand
].bitfield
.baseIndex
8103 && displacement_string_end
[-1] == '+')
8105 /* This hack is to avoid a warning when using the "o"
8106 constraint within gcc asm statements.
8109 #define _set_tssldt_desc(n,addr,limit,type) \
8110 __asm__ __volatile__ ( \
8112 "movw %w1,2+%0\n\t" \
8114 "movb %b1,4+%0\n\t" \
8115 "movb %4,5+%0\n\t" \
8116 "movb $0,6+%0\n\t" \
8117 "movb %h1,7+%0\n\t" \
8119 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8121 This works great except that the output assembler ends
8122 up looking a bit weird if it turns out that there is
8123 no offset. You end up producing code that looks like:
8136 So here we provide the missing zero. */
8138 *displacement_string_end
= '0';
8141 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
,
8142 (i
.bnd_prefix
!= NULL
8143 || add_bnd_prefix
));
8144 if (gotfree_input_line
)
8145 input_line_pointer
= gotfree_input_line
;
8147 exp_seg
= expression (exp
);
8150 if (*input_line_pointer
)
8151 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8153 RESTORE_END_STRING (disp_end
+ 1);
8155 input_line_pointer
= save_input_line_pointer
;
8156 if (gotfree_input_line
)
8158 free (gotfree_input_line
);
8160 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8161 exp
->X_op
= O_illegal
;
8164 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8166 RESTORE_END_STRING (disp_end
);
8172 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8173 i386_operand_type types
, const char *disp_start
)
8175 i386_operand_type bigdisp
;
8178 /* We do this to make sure that the section symbol is in
8179 the symbol table. We will ultimately change the relocation
8180 to be relative to the beginning of the section. */
8181 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8182 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8183 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8185 if (exp
->X_op
!= O_symbol
)
8188 if (S_IS_LOCAL (exp
->X_add_symbol
)
8189 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8190 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8191 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8192 exp
->X_op
= O_subtract
;
8193 exp
->X_op_symbol
= GOT_symbol
;
8194 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8195 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8196 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8197 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8199 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8202 else if (exp
->X_op
== O_absent
8203 || exp
->X_op
== O_illegal
8204 || exp
->X_op
== O_big
)
8207 as_bad (_("missing or invalid displacement expression `%s'"),
8212 else if (flag_code
== CODE_64BIT
8213 && !i
.prefix
[ADDR_PREFIX
]
8214 && exp
->X_op
== O_constant
)
8216 /* Since displacement is signed extended to 64bit, don't allow
8217 disp32 and turn off disp32s if they are out of range. */
8218 i
.types
[this_operand
].bitfield
.disp32
= 0;
8219 if (!fits_in_signed_long (exp
->X_add_number
))
8221 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8222 if (i
.types
[this_operand
].bitfield
.baseindex
)
8224 as_bad (_("0x%lx out range of signed 32bit displacement"),
8225 (long) exp
->X_add_number
);
8231 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8232 else if (exp
->X_op
!= O_constant
8233 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8234 && exp_seg
!= absolute_section
8235 && exp_seg
!= text_section
8236 && exp_seg
!= data_section
8237 && exp_seg
!= bss_section
8238 && exp_seg
!= undefined_section
8239 && !bfd_is_com_section (exp_seg
))
8241 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8246 /* Check if this is a displacement only operand. */
8247 bigdisp
= i
.types
[this_operand
];
8248 bigdisp
.bitfield
.disp8
= 0;
8249 bigdisp
.bitfield
.disp16
= 0;
8250 bigdisp
.bitfield
.disp32
= 0;
8251 bigdisp
.bitfield
.disp32s
= 0;
8252 bigdisp
.bitfield
.disp64
= 0;
8253 if (operand_type_all_zero (&bigdisp
))
8254 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8260 /* Make sure the memory operand we've been dealt is valid.
8261 Return 1 on success, 0 on a failure. */
8264 i386_index_check (const char *operand_string
)
8266 const char *kind
= "base/index";
8267 enum flag_code addr_mode
;
8269 if (i
.prefix
[ADDR_PREFIX
])
8270 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8273 addr_mode
= flag_code
;
8275 #if INFER_ADDR_PREFIX
8276 if (i
.mem_operands
== 0)
8278 /* Infer address prefix from the first memory operand. */
8279 const reg_entry
*addr_reg
= i
.base_reg
;
8281 if (addr_reg
== NULL
)
8282 addr_reg
= i
.index_reg
;
8286 if (addr_reg
->reg_num
== RegEip
8287 || addr_reg
->reg_num
== RegEiz
8288 || addr_reg
->reg_type
.bitfield
.reg32
)
8289 addr_mode
= CODE_32BIT
;
8290 else if (flag_code
!= CODE_64BIT
8291 && addr_reg
->reg_type
.bitfield
.reg16
)
8292 addr_mode
= CODE_16BIT
;
8294 if (addr_mode
!= flag_code
)
8296 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
8298 /* Change the size of any displacement too. At most one
8299 of Disp16 or Disp32 is set.
8300 FIXME. There doesn't seem to be any real need for
8301 separate Disp16 and Disp32 flags. The same goes for
8302 Imm16 and Imm32. Removing them would probably clean
8303 up the code quite a lot. */
8304 if (flag_code
!= CODE_64BIT
8305 && (i
.types
[this_operand
].bitfield
.disp16
8306 || i
.types
[this_operand
].bitfield
.disp32
))
8307 i
.types
[this_operand
]
8308 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
8315 if (current_templates
->start
->opcode_modifier
.isstring
8316 && !current_templates
->start
->opcode_modifier
.immext
8317 && (current_templates
->end
[-1].opcode_modifier
.isstring
8320 /* Memory operands of string insns are special in that they only allow
8321 a single register (rDI, rSI, or rBX) as their memory address. */
8322 const reg_entry
*expected_reg
;
8323 static const char *di_si
[][2] =
8329 static const char *bx
[] = { "ebx", "bx", "rbx" };
8331 kind
= "string address";
8333 if (current_templates
->start
->opcode_modifier
.w
)
8335 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
8337 if (!type
.bitfield
.baseindex
8338 || ((!i
.mem_operands
!= !intel_syntax
)
8339 && current_templates
->end
[-1].operand_types
[1]
8340 .bitfield
.baseindex
))
8341 type
= current_templates
->end
[-1].operand_types
[1];
8342 expected_reg
= hash_find (reg_hash
,
8343 di_si
[addr_mode
][type
.bitfield
.esseg
]);
8347 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
8349 if (i
.base_reg
!= expected_reg
8351 || operand_type_check (i
.types
[this_operand
], disp
))
8353 /* The second memory operand must have the same size as
8357 && !((addr_mode
== CODE_64BIT
8358 && i
.base_reg
->reg_type
.bitfield
.reg64
)
8359 || (addr_mode
== CODE_32BIT
8360 ? i
.base_reg
->reg_type
.bitfield
.reg32
8361 : i
.base_reg
->reg_type
.bitfield
.reg16
)))
8364 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8366 intel_syntax
? '[' : '(',
8368 expected_reg
->reg_name
,
8369 intel_syntax
? ']' : ')');
8376 as_bad (_("`%s' is not a valid %s expression"),
8377 operand_string
, kind
);
8382 if (addr_mode
!= CODE_16BIT
)
8384 /* 32-bit/64-bit checks. */
8386 && (addr_mode
== CODE_64BIT
8387 ? !i
.base_reg
->reg_type
.bitfield
.reg64
8388 : !i
.base_reg
->reg_type
.bitfield
.reg32
)
8390 || (i
.base_reg
->reg_num
8391 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
8393 && !i
.index_reg
->reg_type
.bitfield
.regxmm
8394 && !i
.index_reg
->reg_type
.bitfield
.regymm
8395 && !i
.index_reg
->reg_type
.bitfield
.regzmm
8396 && ((addr_mode
== CODE_64BIT
8397 ? !(i
.index_reg
->reg_type
.bitfield
.reg64
8398 || i
.index_reg
->reg_num
== RegRiz
)
8399 : !(i
.index_reg
->reg_type
.bitfield
.reg32
8400 || i
.index_reg
->reg_num
== RegEiz
))
8401 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
8406 /* 16-bit checks. */
8408 && (!i
.base_reg
->reg_type
.bitfield
.reg16
8409 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
8411 && (!i
.index_reg
->reg_type
.bitfield
.reg16
8412 || !i
.index_reg
->reg_type
.bitfield
.baseindex
8414 && i
.base_reg
->reg_num
< 6
8415 && i
.index_reg
->reg_num
>= 6
8416 && i
.log2_scale_factor
== 0))))
8423 /* Handle vector immediates. */
8426 RC_SAE_immediate (const char *imm_start
)
8428 unsigned int match_found
, j
;
8429 const char *pstr
= imm_start
;
8437 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
8439 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
8443 rc_op
.type
= RC_NamesTable
[j
].type
;
8444 rc_op
.operand
= this_operand
;
8445 i
.rounding
= &rc_op
;
8449 as_bad (_("duplicated `%s'"), imm_start
);
8452 pstr
+= RC_NamesTable
[j
].len
;
8462 as_bad (_("Missing '}': '%s'"), imm_start
);
8465 /* RC/SAE immediate string should contain nothing more. */;
8468 as_bad (_("Junk after '}': '%s'"), imm_start
);
8472 exp
= &im_expressions
[i
.imm_operands
++];
8473 i
.op
[this_operand
].imms
= exp
;
8475 exp
->X_op
= O_constant
;
8476 exp
->X_add_number
= 0;
8477 exp
->X_add_symbol
= (symbolS
*) 0;
8478 exp
->X_op_symbol
= (symbolS
*) 0;
8480 i
.types
[this_operand
].bitfield
.imm8
= 1;
8484 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8488 i386_att_operand (char *operand_string
)
8492 char *op_string
= operand_string
;
8494 if (is_space_char (*op_string
))
8497 /* We check for an absolute prefix (differentiating,
8498 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8499 if (*op_string
== ABSOLUTE_PREFIX
)
8502 if (is_space_char (*op_string
))
8504 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8507 /* Check if operand is a register. */
8508 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
8510 i386_operand_type temp
;
8512 /* Check for a segment override by searching for ':' after a
8513 segment register. */
8515 if (is_space_char (*op_string
))
8517 if (*op_string
== ':'
8518 && (r
->reg_type
.bitfield
.sreg2
8519 || r
->reg_type
.bitfield
.sreg3
))
8524 i
.seg
[i
.mem_operands
] = &es
;
8527 i
.seg
[i
.mem_operands
] = &cs
;
8530 i
.seg
[i
.mem_operands
] = &ss
;
8533 i
.seg
[i
.mem_operands
] = &ds
;
8536 i
.seg
[i
.mem_operands
] = &fs
;
8539 i
.seg
[i
.mem_operands
] = &gs
;
8543 /* Skip the ':' and whitespace. */
8545 if (is_space_char (*op_string
))
8548 if (!is_digit_char (*op_string
)
8549 && !is_identifier_char (*op_string
)
8550 && *op_string
!= '('
8551 && *op_string
!= ABSOLUTE_PREFIX
)
8553 as_bad (_("bad memory operand `%s'"), op_string
);
8556 /* Handle case of %es:*foo. */
8557 if (*op_string
== ABSOLUTE_PREFIX
)
8560 if (is_space_char (*op_string
))
8562 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
8564 goto do_memory_reference
;
8567 /* Handle vector operations. */
8568 if (*op_string
== '{')
8570 op_string
= check_VecOperations (op_string
, NULL
);
8571 if (op_string
== NULL
)
8577 as_bad (_("junk `%s' after register"), op_string
);
8581 temp
.bitfield
.baseindex
= 0;
8582 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8584 i
.types
[this_operand
].bitfield
.unspecified
= 0;
8585 i
.op
[this_operand
].regs
= r
;
8588 else if (*op_string
== REGISTER_PREFIX
)
8590 as_bad (_("bad register name `%s'"), op_string
);
8593 else if (*op_string
== IMMEDIATE_PREFIX
)
8596 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
8598 as_bad (_("immediate operand illegal with absolute jump"));
8601 if (!i386_immediate (op_string
))
8604 else if (RC_SAE_immediate (operand_string
))
8606 /* If it is a RC or SAE immediate, do nothing. */
8609 else if (is_digit_char (*op_string
)
8610 || is_identifier_char (*op_string
)
8611 || *op_string
== '(')
8613 /* This is a memory reference of some sort. */
8616 /* Start and end of displacement string expression (if found). */
8617 char *displacement_string_start
;
8618 char *displacement_string_end
;
8621 do_memory_reference
:
8622 if ((i
.mem_operands
== 1
8623 && !current_templates
->start
->opcode_modifier
.isstring
)
8624 || i
.mem_operands
== 2)
8626 as_bad (_("too many memory references for `%s'"),
8627 current_templates
->start
->name
);
8631 /* Check for base index form. We detect the base index form by
8632 looking for an ')' at the end of the operand, searching
8633 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8635 base_string
= op_string
+ strlen (op_string
);
8637 /* Handle vector operations. */
8638 vop_start
= strchr (op_string
, '{');
8639 if (vop_start
&& vop_start
< base_string
)
8641 if (check_VecOperations (vop_start
, base_string
) == NULL
)
8643 base_string
= vop_start
;
8647 if (is_space_char (*base_string
))
8650 /* If we only have a displacement, set-up for it to be parsed later. */
8651 displacement_string_start
= op_string
;
8652 displacement_string_end
= base_string
+ 1;
8654 if (*base_string
== ')')
8657 unsigned int parens_balanced
= 1;
8658 /* We've already checked that the number of left & right ()'s are
8659 equal, so this loop will not be infinite. */
8663 if (*base_string
== ')')
8665 if (*base_string
== '(')
8668 while (parens_balanced
);
8670 temp_string
= base_string
;
8672 /* Skip past '(' and whitespace. */
8674 if (is_space_char (*base_string
))
8677 if (*base_string
== ','
8678 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
8681 displacement_string_end
= temp_string
;
8683 i
.types
[this_operand
].bitfield
.baseindex
= 1;
8687 base_string
= end_op
;
8688 if (is_space_char (*base_string
))
8692 /* There may be an index reg or scale factor here. */
8693 if (*base_string
== ',')
8696 if (is_space_char (*base_string
))
8699 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
8702 base_string
= end_op
;
8703 if (is_space_char (*base_string
))
8705 if (*base_string
== ',')
8708 if (is_space_char (*base_string
))
8711 else if (*base_string
!= ')')
8713 as_bad (_("expecting `,' or `)' "
8714 "after index register in `%s'"),
8719 else if (*base_string
== REGISTER_PREFIX
)
8721 end_op
= strchr (base_string
, ',');
8724 as_bad (_("bad register name `%s'"), base_string
);
8728 /* Check for scale factor. */
8729 if (*base_string
!= ')')
8731 char *end_scale
= i386_scale (base_string
);
8736 base_string
= end_scale
;
8737 if (is_space_char (*base_string
))
8739 if (*base_string
!= ')')
8741 as_bad (_("expecting `)' "
8742 "after scale factor in `%s'"),
8747 else if (!i
.index_reg
)
8749 as_bad (_("expecting index register or scale factor "
8750 "after `,'; got '%c'"),
8755 else if (*base_string
!= ')')
8757 as_bad (_("expecting `,' or `)' "
8758 "after base register in `%s'"),
8763 else if (*base_string
== REGISTER_PREFIX
)
8765 end_op
= strchr (base_string
, ',');
8768 as_bad (_("bad register name `%s'"), base_string
);
8773 /* If there's an expression beginning the operand, parse it,
8774 assuming displacement_string_start and
8775 displacement_string_end are meaningful. */
8776 if (displacement_string_start
!= displacement_string_end
)
8778 if (!i386_displacement (displacement_string_start
,
8779 displacement_string_end
))
8783 /* Special case for (%dx) while doing input/output op. */
8785 && operand_type_equal (&i
.base_reg
->reg_type
,
8786 ®16_inoutportreg
)
8788 && i
.log2_scale_factor
== 0
8789 && i
.seg
[i
.mem_operands
] == 0
8790 && !operand_type_check (i
.types
[this_operand
], disp
))
8792 i
.types
[this_operand
] = inoutportreg
;
8796 if (i386_index_check (operand_string
) == 0)
8798 i
.types
[this_operand
].bitfield
.mem
= 1;
8803 /* It's not a memory operand; argh! */
8804 as_bad (_("invalid char %s beginning operand %d `%s'"),
8805 output_invalid (*op_string
),
8810 return 1; /* Normal return. */
8813 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8814 that an rs_machine_dependent frag may reach. */
8817 i386_frag_max_var (fragS
*frag
)
8819 /* The only relaxable frags are for jumps.
8820 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8821 gas_assert (frag
->fr_type
== rs_machine_dependent
);
8822 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
8825 /* md_estimate_size_before_relax()
8827 Called just before relax() for rs_machine_dependent frags. The x86
8828 assembler uses these frags to handle variable size jump
8831 Any symbol that is now undefined will not become defined.
8832 Return the correct fr_subtype in the frag.
8833 Return the initial "guess for variable size of frag" to caller.
8834 The guess is actually the growth beyond the fixed part. Whatever
8835 we do to grow the fixed or variable part contributes to our
8839 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
8841 /* We've already got fragP->fr_subtype right; all we have to do is
8842 check for un-relaxable symbols. On an ELF system, we can't relax
8843 an externally visible symbol, because it may be overridden by a
8845 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
8846 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8848 && (S_IS_EXTERNAL (fragP
->fr_symbol
)
8849 || S_IS_WEAK (fragP
->fr_symbol
)
8850 || ((symbol_get_bfdsym (fragP
->fr_symbol
)->flags
8851 & BSF_GNU_INDIRECT_FUNCTION
))))
8853 #if defined (OBJ_COFF) && defined (TE_PE)
8854 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
8855 && S_IS_WEAK (fragP
->fr_symbol
))
8859 /* Symbol is undefined in this segment, or we need to keep a
8860 reloc so that weak symbols can be overridden. */
8861 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
8862 enum bfd_reloc_code_real reloc_type
;
8863 unsigned char *opcode
;
8866 if (fragP
->fr_var
!= NO_RELOC
)
8867 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
8869 reloc_type
= BFD_RELOC_16_PCREL
;
8871 reloc_type
= BFD_RELOC_32_PCREL
;
8873 old_fr_fix
= fragP
->fr_fix
;
8874 opcode
= (unsigned char *) fragP
->fr_opcode
;
8876 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
8879 /* Make jmp (0xeb) a (d)word displacement jump. */
8881 fragP
->fr_fix
+= size
;
8882 fix_new (fragP
, old_fr_fix
, size
,
8884 fragP
->fr_offset
, 1,
8890 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
8892 /* Negate the condition, and branch past an
8893 unconditional jump. */
8896 /* Insert an unconditional jump. */
8898 /* We added two extra opcode bytes, and have a two byte
8900 fragP
->fr_fix
+= 2 + 2;
8901 fix_new (fragP
, old_fr_fix
+ 2, 2,
8903 fragP
->fr_offset
, 1,
8910 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
8915 fixP
= fix_new (fragP
, old_fr_fix
, 1,
8917 fragP
->fr_offset
, 1,
8919 fixP
->fx_signed
= 1;
8923 /* This changes the byte-displacement jump 0x7N
8924 to the (d)word-displacement jump 0x0f,0x8N. */
8925 opcode
[1] = opcode
[0] + 0x10;
8926 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
8927 /* We've added an opcode byte. */
8928 fragP
->fr_fix
+= 1 + size
;
8929 fix_new (fragP
, old_fr_fix
+ 1, size
,
8931 fragP
->fr_offset
, 1,
8936 BAD_CASE (fragP
->fr_subtype
);
8940 return fragP
->fr_fix
- old_fr_fix
;
8943 /* Guess size depending on current relax state. Initially the relax
8944 state will correspond to a short jump and we return 1, because
8945 the variable part of the frag (the branch offset) is one byte
8946 long. However, we can relax a section more than once and in that
8947 case we must either set fr_subtype back to the unrelaxed state,
8948 or return the value for the appropriate branch. */
8949 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
8952 /* Called after relax() is finished.
8954 In: Address of frag.
8955 fr_type == rs_machine_dependent.
8956 fr_subtype is what the address relaxed to.
8958 Out: Any fixSs and constants are set up.
8959 Caller will turn frag into a ".space 0". */
8962 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
8965 unsigned char *opcode
;
8966 unsigned char *where_to_put_displacement
= NULL
;
8967 offsetT target_address
;
8968 offsetT opcode_address
;
8969 unsigned int extension
= 0;
8970 offsetT displacement_from_opcode_start
;
8972 opcode
= (unsigned char *) fragP
->fr_opcode
;
8974 /* Address we want to reach in file space. */
8975 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
8977 /* Address opcode resides at in file space. */
8978 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
8980 /* Displacement from opcode start to fill into instruction. */
8981 displacement_from_opcode_start
= target_address
- opcode_address
;
8983 if ((fragP
->fr_subtype
& BIG
) == 0)
8985 /* Don't have to change opcode. */
8986 extension
= 1; /* 1 opcode + 1 displacement */
8987 where_to_put_displacement
= &opcode
[1];
8991 if (no_cond_jump_promotion
8992 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
8993 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
8994 _("long jump required"));
8996 switch (fragP
->fr_subtype
)
8998 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
8999 extension
= 4; /* 1 opcode + 4 displacement */
9001 where_to_put_displacement
= &opcode
[1];
9004 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
9005 extension
= 2; /* 1 opcode + 2 displacement */
9007 where_to_put_displacement
= &opcode
[1];
9010 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
9011 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
9012 extension
= 5; /* 2 opcode + 4 displacement */
9013 opcode
[1] = opcode
[0] + 0x10;
9014 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9015 where_to_put_displacement
= &opcode
[2];
9018 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
9019 extension
= 3; /* 2 opcode + 2 displacement */
9020 opcode
[1] = opcode
[0] + 0x10;
9021 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9022 where_to_put_displacement
= &opcode
[2];
9025 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
9030 where_to_put_displacement
= &opcode
[3];
9034 BAD_CASE (fragP
->fr_subtype
);
9039 /* If size if less then four we are sure that the operand fits,
9040 but if it's 4, then it could be that the displacement is larger
9042 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
9044 && ((addressT
) (displacement_from_opcode_start
- extension
9045 + ((addressT
) 1 << 31))
9046 > (((addressT
) 2 << 31) - 1)))
9048 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
9049 _("jump target out of range"));
9050 /* Make us emit 0. */
9051 displacement_from_opcode_start
= extension
;
9053 /* Now put displacement after opcode. */
9054 md_number_to_chars ((char *) where_to_put_displacement
,
9055 (valueT
) (displacement_from_opcode_start
- extension
),
9056 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9057 fragP
->fr_fix
+= extension
;
9060 /* Apply a fixup (fixP) to segment data, once it has been determined
9061 by our caller that we have all the info we need to fix it up.
9063 Parameter valP is the pointer to the value of the bits.
9065 On the 386, immediates, displacements, and data pointers are all in
9066 the same (little-endian) format, so we don't need to care about which
9070 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9072 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9073 valueT value
= *valP
;
9075 #if !defined (TE_Mach)
9078 switch (fixP
->fx_r_type
)
9084 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9087 case BFD_RELOC_X86_64_32S
:
9088 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9091 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9094 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9099 if (fixP
->fx_addsy
!= NULL
9100 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9101 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9102 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9103 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
9104 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PC32_BND
)
9105 && !use_rela_relocations
)
9107 /* This is a hack. There should be a better way to handle this.
9108 This covers for the fact that bfd_install_relocation will
9109 subtract the current location (for partial_inplace, PC relative
9110 relocations); see more below. */
9114 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9117 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9119 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9122 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9125 || (symbol_section_p (fixP
->fx_addsy
)
9126 && sym_seg
!= absolute_section
))
9127 && !generic_force_reloc (fixP
))
9129 /* Yes, we add the values in twice. This is because
9130 bfd_install_relocation subtracts them out again. I think
9131 bfd_install_relocation is broken, but I don't dare change
9133 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9137 #if defined (OBJ_COFF) && defined (TE_PE)
9138 /* For some reason, the PE format does not store a
9139 section address offset for a PC relative symbol. */
9140 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9141 || S_IS_WEAK (fixP
->fx_addsy
))
9142 value
+= md_pcrel_from (fixP
);
9145 #if defined (OBJ_COFF) && defined (TE_PE)
9146 if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9148 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9152 /* Fix a few things - the dynamic linker expects certain values here,
9153 and we must not disappoint it. */
9154 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9155 if (IS_ELF
&& fixP
->fx_addsy
)
9156 switch (fixP
->fx_r_type
)
9158 case BFD_RELOC_386_PLT32
:
9159 case BFD_RELOC_X86_64_PLT32
:
9160 case BFD_RELOC_X86_64_PLT32_BND
:
9161 /* Make the jump instruction point to the address of the operand. At
9162 runtime we merely add the offset to the actual PLT entry. */
9166 case BFD_RELOC_386_TLS_GD
:
9167 case BFD_RELOC_386_TLS_LDM
:
9168 case BFD_RELOC_386_TLS_IE_32
:
9169 case BFD_RELOC_386_TLS_IE
:
9170 case BFD_RELOC_386_TLS_GOTIE
:
9171 case BFD_RELOC_386_TLS_GOTDESC
:
9172 case BFD_RELOC_X86_64_TLSGD
:
9173 case BFD_RELOC_X86_64_TLSLD
:
9174 case BFD_RELOC_X86_64_GOTTPOFF
:
9175 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9176 value
= 0; /* Fully resolved at runtime. No addend. */
9178 case BFD_RELOC_386_TLS_LE
:
9179 case BFD_RELOC_386_TLS_LDO_32
:
9180 case BFD_RELOC_386_TLS_LE_32
:
9181 case BFD_RELOC_X86_64_DTPOFF32
:
9182 case BFD_RELOC_X86_64_DTPOFF64
:
9183 case BFD_RELOC_X86_64_TPOFF32
:
9184 case BFD_RELOC_X86_64_TPOFF64
:
9185 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9188 case BFD_RELOC_386_TLS_DESC_CALL
:
9189 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9190 value
= 0; /* Fully resolved at runtime. No addend. */
9191 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
9195 case BFD_RELOC_386_GOT32
:
9196 case BFD_RELOC_X86_64_GOT32
:
9197 value
= 0; /* Fully resolved at runtime. No addend. */
9200 case BFD_RELOC_VTABLE_INHERIT
:
9201 case BFD_RELOC_VTABLE_ENTRY
:
9208 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9210 #endif /* !defined (TE_Mach) */
9212 /* Are we finished with this relocation now? */
9213 if (fixP
->fx_addsy
== NULL
)
9215 #if defined (OBJ_COFF) && defined (TE_PE)
9216 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
9219 /* Remember value for tc_gen_reloc. */
9220 fixP
->fx_addnumber
= value
;
9221 /* Clear out the frag for now. */
9225 else if (use_rela_relocations
)
9227 fixP
->fx_no_overflow
= 1;
9228 /* Remember value for tc_gen_reloc. */
9229 fixP
->fx_addnumber
= value
;
9233 md_number_to_chars (p
, value
, fixP
->fx_size
);
9237 md_atof (int type
, char *litP
, int *sizeP
)
9239 /* This outputs the LITTLENUMs in REVERSE order;
9240 in accord with the bigendian 386. */
9241 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
9244 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
9247 output_invalid (int c
)
9250 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9253 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
9254 "(0x%x)", (unsigned char) c
);
9255 return output_invalid_buf
;
9258 /* REG_STRING starts *before* REGISTER_PREFIX. */
9260 static const reg_entry
*
9261 parse_real_register (char *reg_string
, char **end_op
)
9263 char *s
= reg_string
;
9265 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
9268 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9269 if (*s
== REGISTER_PREFIX
)
9272 if (is_space_char (*s
))
9276 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
9278 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
9279 return (const reg_entry
*) NULL
;
9283 /* For naked regs, make sure that we are not dealing with an identifier.
9284 This prevents confusing an identifier like `eax_var' with register
9286 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
9287 return (const reg_entry
*) NULL
;
9291 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
9293 /* Handle floating point regs, allowing spaces in the (i) part. */
9294 if (r
== i386_regtab
/* %st is first entry of table */)
9296 if (is_space_char (*s
))
9301 if (is_space_char (*s
))
9303 if (*s
>= '0' && *s
<= '7')
9307 if (is_space_char (*s
))
9312 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
9317 /* We have "%st(" then garbage. */
9318 return (const reg_entry
*) NULL
;
9322 if (r
== NULL
|| allow_pseudo_reg
)
9325 if (operand_type_all_zero (&r
->reg_type
))
9326 return (const reg_entry
*) NULL
;
9328 if ((r
->reg_type
.bitfield
.reg32
9329 || r
->reg_type
.bitfield
.sreg3
9330 || r
->reg_type
.bitfield
.control
9331 || r
->reg_type
.bitfield
.debug
9332 || r
->reg_type
.bitfield
.test
)
9333 && !cpu_arch_flags
.bitfield
.cpui386
)
9334 return (const reg_entry
*) NULL
;
9336 if (r
->reg_type
.bitfield
.floatreg
9337 && !cpu_arch_flags
.bitfield
.cpu8087
9338 && !cpu_arch_flags
.bitfield
.cpu287
9339 && !cpu_arch_flags
.bitfield
.cpu387
)
9340 return (const reg_entry
*) NULL
;
9342 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpummx
)
9343 return (const reg_entry
*) NULL
;
9345 if (r
->reg_type
.bitfield
.regxmm
&& !cpu_arch_flags
.bitfield
.cpusse
)
9346 return (const reg_entry
*) NULL
;
9348 if (r
->reg_type
.bitfield
.regymm
&& !cpu_arch_flags
.bitfield
.cpuavx
)
9349 return (const reg_entry
*) NULL
;
9351 if ((r
->reg_type
.bitfield
.regzmm
|| r
->reg_type
.bitfield
.regmask
)
9352 && !cpu_arch_flags
.bitfield
.cpuavx512f
)
9353 return (const reg_entry
*) NULL
;
9355 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9356 if (!allow_index_reg
9357 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
9358 return (const reg_entry
*) NULL
;
9360 /* Upper 16 vector register is only available with VREX in 64bit
9362 if ((r
->reg_flags
& RegVRex
))
9364 if (!cpu_arch_flags
.bitfield
.cpuvrex
9365 || flag_code
!= CODE_64BIT
)
9366 return (const reg_entry
*) NULL
;
9371 if (((r
->reg_flags
& (RegRex64
| RegRex
))
9372 || r
->reg_type
.bitfield
.reg64
)
9373 && (!cpu_arch_flags
.bitfield
.cpulm
9374 || !operand_type_equal (&r
->reg_type
, &control
))
9375 && flag_code
!= CODE_64BIT
)
9376 return (const reg_entry
*) NULL
;
9378 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
9379 return (const reg_entry
*) NULL
;
9384 /* REG_STRING starts *before* REGISTER_PREFIX. */
9386 static const reg_entry
*
9387 parse_register (char *reg_string
, char **end_op
)
9391 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
9392 r
= parse_real_register (reg_string
, end_op
);
9397 char *save
= input_line_pointer
;
9401 input_line_pointer
= reg_string
;
9402 c
= get_symbol_end ();
9403 symbolP
= symbol_find (reg_string
);
9404 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
9406 const expressionS
*e
= symbol_get_value_expression (symbolP
);
9408 know (e
->X_op
== O_register
);
9409 know (e
->X_add_number
>= 0
9410 && (valueT
) e
->X_add_number
< i386_regtab_size
);
9411 r
= i386_regtab
+ e
->X_add_number
;
9412 *end_op
= input_line_pointer
;
9414 *input_line_pointer
= c
;
9415 input_line_pointer
= save
;
9421 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
9424 char *end
= input_line_pointer
;
9427 r
= parse_register (name
, &input_line_pointer
);
9428 if (r
&& end
<= input_line_pointer
)
9430 *nextcharP
= *input_line_pointer
;
9431 *input_line_pointer
= 0;
9432 e
->X_op
= O_register
;
9433 e
->X_add_number
= r
- i386_regtab
;
9436 input_line_pointer
= end
;
9438 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
9442 md_operand (expressionS
*e
)
9447 switch (*input_line_pointer
)
9449 case REGISTER_PREFIX
:
9450 r
= parse_real_register (input_line_pointer
, &end
);
9453 e
->X_op
= O_register
;
9454 e
->X_add_number
= r
- i386_regtab
;
9455 input_line_pointer
= end
;
9460 gas_assert (intel_syntax
);
9461 end
= input_line_pointer
++;
9463 if (*input_line_pointer
== ']')
9465 ++input_line_pointer
;
9466 e
->X_op_symbol
= make_expr_symbol (e
);
9467 e
->X_add_symbol
= NULL
;
9468 e
->X_add_number
= 0;
9474 input_line_pointer
= end
;
9481 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9482 const char *md_shortopts
= "kVQ:sqn";
9484 const char *md_shortopts
= "qn";
9487 #define OPTION_32 (OPTION_MD_BASE + 0)
9488 #define OPTION_64 (OPTION_MD_BASE + 1)
9489 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9490 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9491 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9492 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9493 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9494 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9495 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9496 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9497 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9498 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9499 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9500 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9501 #define OPTION_X32 (OPTION_MD_BASE + 14)
9502 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9503 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9504 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9505 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9507 struct option md_longopts
[] =
9509 {"32", no_argument
, NULL
, OPTION_32
},
9510 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9511 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9512 {"64", no_argument
, NULL
, OPTION_64
},
9514 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9515 {"x32", no_argument
, NULL
, OPTION_X32
},
9517 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
9518 {"march", required_argument
, NULL
, OPTION_MARCH
},
9519 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
9520 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
9521 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
9522 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
9523 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
9524 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
9525 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
9526 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
9527 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
9528 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
9529 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
9530 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
9531 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
9532 # if defined (TE_PE) || defined (TE_PEP)
9533 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
9535 {NULL
, no_argument
, NULL
, 0}
9537 size_t md_longopts_size
= sizeof (md_longopts
);
9540 md_parse_option (int c
, char *arg
)
9548 optimize_align_code
= 0;
9555 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9556 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9557 should be emitted or not. FIXME: Not implemented. */
9561 /* -V: SVR4 argument to print version ID. */
9563 print_version_id ();
9566 /* -k: Ignore for FreeBSD compatibility. */
9571 /* -s: On i386 Solaris, this tells the native assembler to use
9572 .stab instead of .stab.excl. We always use .stab anyhow. */
9575 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9576 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9579 const char **list
, **l
;
9581 list
= bfd_target_list ();
9582 for (l
= list
; *l
!= NULL
; l
++)
9583 if (CONST_STRNEQ (*l
, "elf64-x86-64")
9584 || strcmp (*l
, "coff-x86-64") == 0
9585 || strcmp (*l
, "pe-x86-64") == 0
9586 || strcmp (*l
, "pei-x86-64") == 0
9587 || strcmp (*l
, "mach-o-x86-64") == 0)
9589 default_arch
= "x86_64";
9593 as_fatal (_("no compiled in support for x86_64"));
9599 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9603 const char **list
, **l
;
9605 list
= bfd_target_list ();
9606 for (l
= list
; *l
!= NULL
; l
++)
9607 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
9609 default_arch
= "x86_64:32";
9613 as_fatal (_("no compiled in support for 32bit x86_64"));
9617 as_fatal (_("32bit x86_64 is only supported for ELF"));
9622 default_arch
= "i386";
9626 #ifdef SVR4_COMMENT_CHARS
9631 n
= (char *) xmalloc (strlen (i386_comment_chars
) + 1);
9633 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
9637 i386_comment_chars
= n
;
9643 arch
= xstrdup (arg
);
9647 as_fatal (_("invalid -march= option: `%s'"), arg
);
9648 next
= strchr (arch
, '+');
9651 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9653 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
9656 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9659 cpu_arch_name
= cpu_arch
[j
].name
;
9660 cpu_sub_arch_name
= NULL
;
9661 cpu_arch_flags
= cpu_arch
[j
].flags
;
9662 cpu_arch_isa
= cpu_arch
[j
].type
;
9663 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
9664 if (!cpu_arch_tune_set
)
9666 cpu_arch_tune
= cpu_arch_isa
;
9667 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
9671 else if (*cpu_arch
[j
].name
== '.'
9672 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
9674 /* ISA entension. */
9675 i386_cpu_flags flags
;
9677 if (!cpu_arch
[j
].negated
)
9678 flags
= cpu_flags_or (cpu_arch_flags
,
9681 flags
= cpu_flags_and_not (cpu_arch_flags
,
9683 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
9685 if (cpu_sub_arch_name
)
9687 char *name
= cpu_sub_arch_name
;
9688 cpu_sub_arch_name
= concat (name
,
9690 (const char *) NULL
);
9694 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
9695 cpu_arch_flags
= flags
;
9696 cpu_arch_isa_flags
= flags
;
9702 if (j
>= ARRAY_SIZE (cpu_arch
))
9703 as_fatal (_("invalid -march= option: `%s'"), arg
);
9707 while (next
!= NULL
);
9712 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9713 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9715 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
9717 cpu_arch_tune_set
= 1;
9718 cpu_arch_tune
= cpu_arch
[j
].type
;
9719 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
9723 if (j
>= ARRAY_SIZE (cpu_arch
))
9724 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
9727 case OPTION_MMNEMONIC
:
9728 if (strcasecmp (arg
, "att") == 0)
9730 else if (strcasecmp (arg
, "intel") == 0)
9733 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
9736 case OPTION_MSYNTAX
:
9737 if (strcasecmp (arg
, "att") == 0)
9739 else if (strcasecmp (arg
, "intel") == 0)
9742 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
9745 case OPTION_MINDEX_REG
:
9746 allow_index_reg
= 1;
9749 case OPTION_MNAKED_REG
:
9750 allow_naked_reg
= 1;
9753 case OPTION_MOLD_GCC
:
9757 case OPTION_MSSE2AVX
:
9761 case OPTION_MSSE_CHECK
:
9762 if (strcasecmp (arg
, "error") == 0)
9763 sse_check
= check_error
;
9764 else if (strcasecmp (arg
, "warning") == 0)
9765 sse_check
= check_warning
;
9766 else if (strcasecmp (arg
, "none") == 0)
9767 sse_check
= check_none
;
9769 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
9772 case OPTION_MOPERAND_CHECK
:
9773 if (strcasecmp (arg
, "error") == 0)
9774 operand_check
= check_error
;
9775 else if (strcasecmp (arg
, "warning") == 0)
9776 operand_check
= check_warning
;
9777 else if (strcasecmp (arg
, "none") == 0)
9778 operand_check
= check_none
;
9780 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
9783 case OPTION_MAVXSCALAR
:
9784 if (strcasecmp (arg
, "128") == 0)
9786 else if (strcasecmp (arg
, "256") == 0)
9789 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
9792 case OPTION_MADD_BND_PREFIX
:
9796 case OPTION_MEVEXLIG
:
9797 if (strcmp (arg
, "128") == 0)
9799 else if (strcmp (arg
, "256") == 0)
9801 else if (strcmp (arg
, "512") == 0)
9804 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
9807 case OPTION_MEVEXWIG
:
9808 if (strcmp (arg
, "0") == 0)
9810 else if (strcmp (arg
, "1") == 0)
9813 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
9816 # if defined (TE_PE) || defined (TE_PEP)
9817 case OPTION_MBIG_OBJ
:
9828 #define MESSAGE_TEMPLATE \
9832 show_arch (FILE *stream
, int ext
, int check
)
9834 static char message
[] = MESSAGE_TEMPLATE
;
9835 char *start
= message
+ 27;
9837 int size
= sizeof (MESSAGE_TEMPLATE
);
9844 left
= size
- (start
- message
);
9845 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
9847 /* Should it be skipped? */
9848 if (cpu_arch
[j
].skip
)
9851 name
= cpu_arch
[j
].name
;
9852 len
= cpu_arch
[j
].len
;
9855 /* It is an extension. Skip if we aren't asked to show it. */
9866 /* It is an processor. Skip if we show only extension. */
9869 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
9871 /* It is an impossible processor - skip. */
9875 /* Reserve 2 spaces for ", " or ",\0" */
9878 /* Check if there is any room. */
9886 p
= mempcpy (p
, name
, len
);
9890 /* Output the current message now and start a new one. */
9893 fprintf (stream
, "%s\n", message
);
9895 left
= size
- (start
- message
) - len
- 2;
9897 gas_assert (left
>= 0);
9899 p
= mempcpy (p
, name
, len
);
9904 fprintf (stream
, "%s\n", message
);
9908 md_show_usage (FILE *stream
)
9910 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9911 fprintf (stream
, _("\
9913 -V print assembler version number\n\
9916 fprintf (stream
, _("\
9917 -n Do not optimize code alignment\n\
9918 -q quieten some warnings\n"));
9919 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9920 fprintf (stream
, _("\
9923 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9924 || defined (TE_PE) || defined (TE_PEP))
9925 fprintf (stream
, _("\
9926 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9928 #ifdef SVR4_COMMENT_CHARS
9929 fprintf (stream
, _("\
9930 --divide do not treat `/' as a comment character\n"));
9932 fprintf (stream
, _("\
9933 --divide ignored\n"));
9935 fprintf (stream
, _("\
9936 -march=CPU[,+EXTENSION...]\n\
9937 generate code for CPU and EXTENSION, CPU is one of:\n"));
9938 show_arch (stream
, 0, 1);
9939 fprintf (stream
, _("\
9940 EXTENSION is combination of:\n"));
9941 show_arch (stream
, 1, 0);
9942 fprintf (stream
, _("\
9943 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9944 show_arch (stream
, 0, 0);
9945 fprintf (stream
, _("\
9946 -msse2avx encode SSE instructions with VEX prefix\n"));
9947 fprintf (stream
, _("\
9948 -msse-check=[none|error|warning]\n\
9949 check SSE instructions\n"));
9950 fprintf (stream
, _("\
9951 -moperand-check=[none|error|warning]\n\
9952 check operand combinations for validity\n"));
9953 fprintf (stream
, _("\
9954 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9956 fprintf (stream
, _("\
9957 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9959 fprintf (stream
, _("\
9960 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9961 for EVEX.W bit ignored instructions\n"));
9962 fprintf (stream
, _("\
9963 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9964 fprintf (stream
, _("\
9965 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9966 fprintf (stream
, _("\
9967 -mindex-reg support pseudo index registers\n"));
9968 fprintf (stream
, _("\
9969 -mnaked-reg don't require `%%' prefix for registers\n"));
9970 fprintf (stream
, _("\
9971 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9972 fprintf (stream
, _("\
9973 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9974 # if defined (TE_PE) || defined (TE_PEP)
9975 fprintf (stream
, _("\
9976 -mbig-obj generate big object files\n"));
9980 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9981 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9982 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9984 /* Pick the target format to use. */
9987 i386_target_format (void)
9989 if (!strncmp (default_arch
, "x86_64", 6))
9991 update_code_flag (CODE_64BIT
, 1);
9992 if (default_arch
[6] == '\0')
9993 x86_elf_abi
= X86_64_ABI
;
9995 x86_elf_abi
= X86_64_X32_ABI
;
9997 else if (!strcmp (default_arch
, "i386"))
9998 update_code_flag (CODE_32BIT
, 1);
10000 as_fatal (_("unknown architecture"));
10002 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
10003 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10004 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
10005 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
10007 switch (OUTPUT_FLAVOR
)
10009 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10010 case bfd_target_aout_flavour
:
10011 return AOUT_TARGET_FORMAT
;
10013 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10014 # if defined (TE_PE) || defined (TE_PEP)
10015 case bfd_target_coff_flavour
:
10016 if (flag_code
== CODE_64BIT
)
10017 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
10020 # elif defined (TE_GO32)
10021 case bfd_target_coff_flavour
:
10022 return "coff-go32";
10024 case bfd_target_coff_flavour
:
10025 return "coff-i386";
10028 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10029 case bfd_target_elf_flavour
:
10031 const char *format
;
10033 switch (x86_elf_abi
)
10036 format
= ELF_TARGET_FORMAT
;
10039 use_rela_relocations
= 1;
10041 format
= ELF_TARGET_FORMAT64
;
10043 case X86_64_X32_ABI
:
10044 use_rela_relocations
= 1;
10046 disallow_64bit_reloc
= 1;
10047 format
= ELF_TARGET_FORMAT32
;
10050 if (cpu_arch_isa
== PROCESSOR_L1OM
)
10052 if (x86_elf_abi
!= X86_64_ABI
)
10053 as_fatal (_("Intel L1OM is 64bit only"));
10054 return ELF_TARGET_L1OM_FORMAT
;
10056 if (cpu_arch_isa
== PROCESSOR_K1OM
)
10058 if (x86_elf_abi
!= X86_64_ABI
)
10059 as_fatal (_("Intel K1OM is 64bit only"));
10060 return ELF_TARGET_K1OM_FORMAT
;
10066 #if defined (OBJ_MACH_O)
10067 case bfd_target_mach_o_flavour
:
10068 if (flag_code
== CODE_64BIT
)
10070 use_rela_relocations
= 1;
10072 return "mach-o-x86-64";
10075 return "mach-o-i386";
10083 #endif /* OBJ_MAYBE_ more than one */
10085 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10087 i386_elf_emit_arch_note (void)
10089 if (IS_ELF
&& cpu_arch_name
!= NULL
)
10092 asection
*seg
= now_seg
;
10093 subsegT subseg
= now_subseg
;
10094 Elf_Internal_Note i_note
;
10095 Elf_External_Note e_note
;
10096 asection
*note_secp
;
10099 /* Create the .note section. */
10100 note_secp
= subseg_new (".note", 0);
10101 bfd_set_section_flags (stdoutput
,
10103 SEC_HAS_CONTENTS
| SEC_READONLY
);
10105 /* Process the arch string. */
10106 len
= strlen (cpu_arch_name
);
10108 i_note
.namesz
= len
+ 1;
10110 i_note
.type
= NT_ARCH
;
10111 p
= frag_more (sizeof (e_note
.namesz
));
10112 md_number_to_chars (p
, (valueT
) i_note
.namesz
, sizeof (e_note
.namesz
));
10113 p
= frag_more (sizeof (e_note
.descsz
));
10114 md_number_to_chars (p
, (valueT
) i_note
.descsz
, sizeof (e_note
.descsz
));
10115 p
= frag_more (sizeof (e_note
.type
));
10116 md_number_to_chars (p
, (valueT
) i_note
.type
, sizeof (e_note
.type
));
10117 p
= frag_more (len
+ 1);
10118 strcpy (p
, cpu_arch_name
);
10120 frag_align (2, 0, 0);
10122 subseg_set (seg
, subseg
);
10128 md_undefined_symbol (char *name
)
10130 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
10131 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
10132 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
10133 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
10137 if (symbol_find (name
))
10138 as_bad (_("GOT already in symbol table"));
10139 GOT_symbol
= symbol_new (name
, undefined_section
,
10140 (valueT
) 0, &zero_address_frag
);
10147 /* Round up a section size to the appropriate boundary. */
10150 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
10152 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10153 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
10155 /* For a.out, force the section size to be aligned. If we don't do
10156 this, BFD will align it for us, but it will not write out the
10157 final bytes of the section. This may be a bug in BFD, but it is
10158 easier to fix it here since that is how the other a.out targets
10162 align
= bfd_get_section_alignment (stdoutput
, segment
);
10163 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
10170 /* On the i386, PC-relative offsets are relative to the start of the
10171 next instruction. That is, the address of the offset, plus its
10172 size, since the offset is always the last part of the insn. */
10175 md_pcrel_from (fixS
*fixP
)
10177 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
10183 s_bss (int ignore ATTRIBUTE_UNUSED
)
10187 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10189 obj_elf_section_change_hook ();
10191 temp
= get_absolute_expression ();
10192 subseg_set (bss_section
, (subsegT
) temp
);
10193 demand_empty_rest_of_line ();
10199 i386_validate_fix (fixS
*fixp
)
10201 if (fixp
->fx_subsy
&& fixp
->fx_subsy
== GOT_symbol
)
10203 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
10207 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
10212 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
10214 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
10216 fixp
->fx_subsy
= 0;
10221 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
10224 bfd_reloc_code_real_type code
;
10226 switch (fixp
->fx_r_type
)
10228 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10229 case BFD_RELOC_SIZE32
:
10230 case BFD_RELOC_SIZE64
:
10231 if (S_IS_DEFINED (fixp
->fx_addsy
)
10232 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
10234 /* Resolve size relocation against local symbol to size of
10235 the symbol plus addend. */
10236 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
10237 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
10238 && !fits_in_unsigned_long (value
))
10239 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10240 _("symbol size computation overflow"));
10241 fixp
->fx_addsy
= NULL
;
10242 fixp
->fx_subsy
= NULL
;
10243 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
10248 case BFD_RELOC_X86_64_PLT32
:
10249 case BFD_RELOC_X86_64_PLT32_BND
:
10250 case BFD_RELOC_X86_64_GOT32
:
10251 case BFD_RELOC_X86_64_GOTPCREL
:
10252 case BFD_RELOC_386_PLT32
:
10253 case BFD_RELOC_386_GOT32
:
10254 case BFD_RELOC_386_GOTOFF
:
10255 case BFD_RELOC_386_GOTPC
:
10256 case BFD_RELOC_386_TLS_GD
:
10257 case BFD_RELOC_386_TLS_LDM
:
10258 case BFD_RELOC_386_TLS_LDO_32
:
10259 case BFD_RELOC_386_TLS_IE_32
:
10260 case BFD_RELOC_386_TLS_IE
:
10261 case BFD_RELOC_386_TLS_GOTIE
:
10262 case BFD_RELOC_386_TLS_LE_32
:
10263 case BFD_RELOC_386_TLS_LE
:
10264 case BFD_RELOC_386_TLS_GOTDESC
:
10265 case BFD_RELOC_386_TLS_DESC_CALL
:
10266 case BFD_RELOC_X86_64_TLSGD
:
10267 case BFD_RELOC_X86_64_TLSLD
:
10268 case BFD_RELOC_X86_64_DTPOFF32
:
10269 case BFD_RELOC_X86_64_DTPOFF64
:
10270 case BFD_RELOC_X86_64_GOTTPOFF
:
10271 case BFD_RELOC_X86_64_TPOFF32
:
10272 case BFD_RELOC_X86_64_TPOFF64
:
10273 case BFD_RELOC_X86_64_GOTOFF64
:
10274 case BFD_RELOC_X86_64_GOTPC32
:
10275 case BFD_RELOC_X86_64_GOT64
:
10276 case BFD_RELOC_X86_64_GOTPCREL64
:
10277 case BFD_RELOC_X86_64_GOTPC64
:
10278 case BFD_RELOC_X86_64_GOTPLT64
:
10279 case BFD_RELOC_X86_64_PLTOFF64
:
10280 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10281 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10282 case BFD_RELOC_RVA
:
10283 case BFD_RELOC_VTABLE_ENTRY
:
10284 case BFD_RELOC_VTABLE_INHERIT
:
10286 case BFD_RELOC_32_SECREL
:
10288 code
= fixp
->fx_r_type
;
10290 case BFD_RELOC_X86_64_32S
:
10291 if (!fixp
->fx_pcrel
)
10293 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10294 code
= fixp
->fx_r_type
;
10298 if (fixp
->fx_pcrel
)
10300 switch (fixp
->fx_size
)
10303 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10304 _("can not do %d byte pc-relative relocation"),
10306 code
= BFD_RELOC_32_PCREL
;
10308 case 1: code
= BFD_RELOC_8_PCREL
; break;
10309 case 2: code
= BFD_RELOC_16_PCREL
; break;
10311 code
= (fixp
->fx_r_type
== BFD_RELOC_X86_64_PC32_BND
10312 ? fixp
-> fx_r_type
: BFD_RELOC_32_PCREL
);
10315 case 8: code
= BFD_RELOC_64_PCREL
; break;
10321 switch (fixp
->fx_size
)
10324 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10325 _("can not do %d byte relocation"),
10327 code
= BFD_RELOC_32
;
10329 case 1: code
= BFD_RELOC_8
; break;
10330 case 2: code
= BFD_RELOC_16
; break;
10331 case 4: code
= BFD_RELOC_32
; break;
10333 case 8: code
= BFD_RELOC_64
; break;
10340 if ((code
== BFD_RELOC_32
10341 || code
== BFD_RELOC_32_PCREL
10342 || code
== BFD_RELOC_X86_64_32S
)
10344 && fixp
->fx_addsy
== GOT_symbol
)
10347 code
= BFD_RELOC_386_GOTPC
;
10349 code
= BFD_RELOC_X86_64_GOTPC32
;
10351 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
10353 && fixp
->fx_addsy
== GOT_symbol
)
10355 code
= BFD_RELOC_X86_64_GOTPC64
;
10358 rel
= (arelent
*) xmalloc (sizeof (arelent
));
10359 rel
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
10360 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
10362 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
10364 if (!use_rela_relocations
)
10366 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10367 vtable entry to be used in the relocation's section offset. */
10368 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
10369 rel
->address
= fixp
->fx_offset
;
10370 #if defined (OBJ_COFF) && defined (TE_PE)
10371 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
10372 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
10377 /* Use the rela in 64bit mode. */
10380 if (disallow_64bit_reloc
)
10383 case BFD_RELOC_X86_64_DTPOFF64
:
10384 case BFD_RELOC_X86_64_TPOFF64
:
10385 case BFD_RELOC_64_PCREL
:
10386 case BFD_RELOC_X86_64_GOTOFF64
:
10387 case BFD_RELOC_X86_64_GOT64
:
10388 case BFD_RELOC_X86_64_GOTPCREL64
:
10389 case BFD_RELOC_X86_64_GOTPC64
:
10390 case BFD_RELOC_X86_64_GOTPLT64
:
10391 case BFD_RELOC_X86_64_PLTOFF64
:
10392 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10393 _("cannot represent relocation type %s in x32 mode"),
10394 bfd_get_reloc_code_name (code
));
10400 if (!fixp
->fx_pcrel
)
10401 rel
->addend
= fixp
->fx_offset
;
10405 case BFD_RELOC_X86_64_PLT32
:
10406 case BFD_RELOC_X86_64_PLT32_BND
:
10407 case BFD_RELOC_X86_64_GOT32
:
10408 case BFD_RELOC_X86_64_GOTPCREL
:
10409 case BFD_RELOC_X86_64_TLSGD
:
10410 case BFD_RELOC_X86_64_TLSLD
:
10411 case BFD_RELOC_X86_64_GOTTPOFF
:
10412 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10413 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10414 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
10417 rel
->addend
= (section
->vma
10419 + fixp
->fx_addnumber
10420 + md_pcrel_from (fixp
));
10425 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
10426 if (rel
->howto
== NULL
)
10428 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
10429 _("cannot represent relocation type %s"),
10430 bfd_get_reloc_code_name (code
));
10431 /* Set howto to a garbage value so that we can keep going. */
10432 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
10433 gas_assert (rel
->howto
!= NULL
);
10439 #include "tc-i386-intel.c"
10442 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
10444 int saved_naked_reg
;
10445 char saved_register_dot
;
10447 saved_naked_reg
= allow_naked_reg
;
10448 allow_naked_reg
= 1;
10449 saved_register_dot
= register_chars
['.'];
10450 register_chars
['.'] = '.';
10451 allow_pseudo_reg
= 1;
10452 expression_and_evaluate (exp
);
10453 allow_pseudo_reg
= 0;
10454 register_chars
['.'] = saved_register_dot
;
10455 allow_naked_reg
= saved_naked_reg
;
10457 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
10459 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
10461 exp
->X_op
= O_constant
;
10462 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
10463 .dw2_regnum
[flag_code
>> 1];
10466 exp
->X_op
= O_illegal
;
10471 tc_x86_frame_initial_instructions (void)
10473 static unsigned int sp_regno
[2];
10475 if (!sp_regno
[flag_code
>> 1])
10477 char *saved_input
= input_line_pointer
;
10478 char sp
[][4] = {"esp", "rsp"};
10481 input_line_pointer
= sp
[flag_code
>> 1];
10482 tc_x86_parse_to_dw2regnum (&exp
);
10483 gas_assert (exp
.X_op
== O_constant
);
10484 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
10485 input_line_pointer
= saved_input
;
10488 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
10489 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
10493 x86_dwarf2_addr_size (void)
10495 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10496 if (x86_elf_abi
== X86_64_X32_ABI
)
10499 return bfd_arch_bits_per_address (stdoutput
) / 8;
10503 i386_elf_section_type (const char *str
, size_t len
)
10505 if (flag_code
== CODE_64BIT
10506 && len
== sizeof ("unwind") - 1
10507 && strncmp (str
, "unwind", 6) == 0)
10508 return SHT_X86_64_UNWIND
;
10515 i386_solaris_fix_up_eh_frame (segT sec
)
10517 if (flag_code
== CODE_64BIT
)
10518 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
10524 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
10528 exp
.X_op
= O_secrel
;
10529 exp
.X_add_symbol
= symbol
;
10530 exp
.X_add_number
= 0;
10531 emit_expr (&exp
, size
);
10535 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10536 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10539 x86_64_section_letter (int letter
, char **ptr_msg
)
10541 if (flag_code
== CODE_64BIT
)
10544 return SHF_X86_64_LARGE
;
10546 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10549 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
10554 x86_64_section_word (char *str
, size_t len
)
10556 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
10557 return SHF_X86_64_LARGE
;
10563 handle_large_common (int small ATTRIBUTE_UNUSED
)
10565 if (flag_code
!= CODE_64BIT
)
10567 s_comm_internal (0, elf_common_parse
);
10568 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10572 static segT lbss_section
;
10573 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
10574 asection
*saved_bss_section
= bss_section
;
10576 if (lbss_section
== NULL
)
10578 flagword applicable
;
10579 segT seg
= now_seg
;
10580 subsegT subseg
= now_subseg
;
10582 /* The .lbss section is for local .largecomm symbols. */
10583 lbss_section
= subseg_new (".lbss", 0);
10584 applicable
= bfd_applicable_section_flags (stdoutput
);
10585 bfd_set_section_flags (stdoutput
, lbss_section
,
10586 applicable
& SEC_ALLOC
);
10587 seg_info (lbss_section
)->bss
= 1;
10589 subseg_set (seg
, subseg
);
10592 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
10593 bss_section
= lbss_section
;
10595 s_comm_internal (0, elf_common_parse
);
10597 elf_com_section_ptr
= saved_com_section_ptr
;
10598 bss_section
= saved_bss_section
;
10601 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */