Disallow 64bit relocations in x32 mode.
[binutils.git] / gas / config / tc-i386.c
blob74ceacab6d12aadd57a23557551a91f3a10b68cc
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GAS, the GNU Assembler.
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90 #define END_OF_INSN '\0'
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
99 typedef struct
101 const insn_template *start;
102 const insn_template *end;
104 templates;
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
113 modrm_byte;
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
121 unsigned base;
122 unsigned index;
123 unsigned scale;
125 sib_byte;
127 /* x86 arch names, types and features */
128 typedef struct
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
137 arch_entry;
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 #endif
187 static const char *default_arch = DEFAULT_ARCH;
189 /* VEX prefix. */
190 typedef struct
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
194 unsigned int length;
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
197 } vex_prefix;
199 /* 'md_assemble ()' gathers together information and puts it into a
200 i386_insn. */
202 union i386_op
204 expressionS *disps;
205 expressionS *imms;
206 const reg_entry *regs;
209 enum i386_error
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
216 bad_imm4,
217 old_gcc_only,
218 unsupported_with_intel_mnemonic,
219 unsupported_syntax,
220 unsupported
223 struct _i386_insn
225 /* TM holds the template for the insn were currently assembling. */
226 insn_template tm;
228 /* SUFFIX holds the instruction size suffix for byte, word, dword
229 or qword, if given. */
230 char suffix;
232 /* OPERANDS gives the number of given operands. */
233 unsigned int operands;
235 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
236 of given register, displacement, memory operands and immediate
237 operands. */
238 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
240 /* TYPES [i] is the type (see above #defines) which tells us how to
241 use OP[i] for the corresponding operand. */
242 i386_operand_type types[MAX_OPERANDS];
244 /* Displacement expression, immediate expression, or register for each
245 operand. */
246 union i386_op op[MAX_OPERANDS];
248 /* Flags for operands. */
249 unsigned int flags[MAX_OPERANDS];
250 #define Operand_PCrel 1
252 /* Relocation type for operand */
253 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
255 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
256 the base index byte below. */
257 const reg_entry *base_reg;
258 const reg_entry *index_reg;
259 unsigned int log2_scale_factor;
261 /* SEG gives the seg_entries of this insn. They are zero unless
262 explicit segment overrides are given. */
263 const seg_entry *seg[2];
265 /* PREFIX holds all the given prefix opcodes (usually null).
266 PREFIXES is the number of prefix opcodes. */
267 unsigned int prefixes;
268 unsigned char prefix[MAX_PREFIXES];
270 /* RM and SIB are the modrm byte and the sib byte where the
271 addressing modes of this insn are encoded. */
272 modrm_byte rm;
273 rex_byte rex;
274 sib_byte sib;
275 vex_prefix vex;
277 /* Swap operand in encoding. */
278 unsigned int swap_operand;
280 /* Force 32bit displacement in encoding. */
281 unsigned int disp32_encoding;
283 /* Error message. */
284 enum i386_error error;
287 typedef struct _i386_insn i386_insn;
289 /* List of chars besides those in app.c:symbol_chars that can start an
290 operand. Used to prevent the scrubber eating vital white-space. */
291 const char extra_symbol_chars[] = "*%-(["
292 #ifdef LEX_AT
294 #endif
295 #ifdef LEX_QM
297 #endif
300 #if (defined (TE_I386AIX) \
301 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
302 && !defined (TE_GNU) \
303 && !defined (TE_LINUX) \
304 && !defined (TE_NETWARE) \
305 && !defined (TE_FreeBSD) \
306 && !defined (TE_NetBSD)))
307 /* This array holds the chars that always start a comment. If the
308 pre-processor is disabled, these aren't very useful. The option
309 --divide will remove '/' from this list. */
310 const char *i386_comment_chars = "#/";
311 #define SVR4_COMMENT_CHARS 1
312 #define PREFIX_SEPARATOR '\\'
314 #else
315 const char *i386_comment_chars = "#";
316 #define PREFIX_SEPARATOR '/'
317 #endif
319 /* This array holds the chars that only start a comment at the beginning of
320 a line. If the line seems to have the form '# 123 filename'
321 .line and .file directives will appear in the pre-processed output.
322 Note that input_file.c hand checks for '#' at the beginning of the
323 first line of the input file. This is because the compiler outputs
324 #NO_APP at the beginning of its output.
325 Also note that comments started like this one will always work if
326 '/' isn't otherwise defined. */
327 const char line_comment_chars[] = "#/";
329 const char line_separator_chars[] = ";";
331 /* Chars that can be used to separate mant from exp in floating point
332 nums. */
333 const char EXP_CHARS[] = "eE";
335 /* Chars that mean this number is a floating point constant
336 As in 0f12.456
337 or 0d1.2345e12. */
338 const char FLT_CHARS[] = "fFdDxX";
340 /* Tables for lexical analysis. */
341 static char mnemonic_chars[256];
342 static char register_chars[256];
343 static char operand_chars[256];
344 static char identifier_chars[256];
345 static char digit_chars[256];
347 /* Lexical macros. */
348 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
349 #define is_operand_char(x) (operand_chars[(unsigned char) x])
350 #define is_register_char(x) (register_chars[(unsigned char) x])
351 #define is_space_char(x) ((x) == ' ')
352 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
353 #define is_digit_char(x) (digit_chars[(unsigned char) x])
355 /* All non-digit non-letter characters that may occur in an operand. */
356 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
358 /* md_assemble() always leaves the strings it's passed unaltered. To
359 effect this we maintain a stack of saved characters that we've smashed
360 with '\0's (indicating end of strings for various sub-fields of the
361 assembler instruction). */
362 static char save_stack[32];
363 static char *save_stack_p;
364 #define END_STRING_AND_SAVE(s) \
365 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
366 #define RESTORE_END_STRING(s) \
367 do { *(s) = *--save_stack_p; } while (0)
369 /* The instruction we're assembling. */
370 static i386_insn i;
372 /* Possible templates for current insn. */
373 static const templates *current_templates;
375 /* Per instruction expressionS buffers: max displacements & immediates. */
376 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
377 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
379 /* Current operand we are working on. */
380 static int this_operand = -1;
382 /* We support four different modes. FLAG_CODE variable is used to distinguish
383 these. */
385 enum flag_code {
386 CODE_32BIT,
387 CODE_16BIT,
388 CODE_64BIT };
390 static enum flag_code flag_code;
391 static unsigned int object_64bit;
392 static unsigned int disallow_64bit_reloc;
393 static int use_rela_relocations = 0;
395 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
396 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
397 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
399 /* The ELF ABI to use. */
400 enum x86_elf_abi
402 I386_ABI,
403 X86_64_ABI,
404 X86_64_X32_ABI
407 static enum x86_elf_abi x86_elf_abi = I386_ABI;
408 #endif
410 /* The names used to print error messages. */
411 static const char *flag_code_names[] =
413 "32",
414 "16",
415 "64"
418 /* 1 for intel syntax,
419 0 if att syntax. */
420 static int intel_syntax = 0;
422 /* 1 for intel mnemonic,
423 0 if att mnemonic. */
424 static int intel_mnemonic = !SYSV386_COMPAT;
426 /* 1 if support old (<= 2.8.1) versions of gcc. */
427 static int old_gcc = OLDGCC_COMPAT;
429 /* 1 if pseudo registers are permitted. */
430 static int allow_pseudo_reg = 0;
432 /* 1 if register prefix % not required. */
433 static int allow_naked_reg = 0;
435 /* 1 if pseudo index register, eiz/riz, is allowed . */
436 static int allow_index_reg = 0;
438 static enum
440 sse_check_none = 0,
441 sse_check_warning,
442 sse_check_error
444 sse_check;
446 /* Register prefix used for error message. */
447 static const char *register_prefix = "%";
449 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
450 leave, push, and pop instructions so that gcc has the same stack
451 frame as in 32 bit mode. */
452 static char stackop_size = '\0';
454 /* Non-zero to optimize code alignment. */
455 int optimize_align_code = 1;
457 /* Non-zero to quieten some warnings. */
458 static int quiet_warnings = 0;
460 /* CPU name. */
461 static const char *cpu_arch_name = NULL;
462 static char *cpu_sub_arch_name = NULL;
464 /* CPU feature flags. */
465 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
467 /* If we have selected a cpu we are generating instructions for. */
468 static int cpu_arch_tune_set = 0;
470 /* Cpu we are generating instructions for. */
471 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
473 /* CPU feature flags of cpu we are generating instructions for. */
474 static i386_cpu_flags cpu_arch_tune_flags;
476 /* CPU instruction set architecture used. */
477 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
479 /* CPU feature flags of instruction set architecture used. */
480 i386_cpu_flags cpu_arch_isa_flags;
482 /* If set, conditional jumps are not automatically promoted to handle
483 larger than a byte offset. */
484 static unsigned int no_cond_jump_promotion = 0;
486 /* Encode SSE instructions with VEX prefix. */
487 static unsigned int sse2avx;
489 /* Encode scalar AVX instructions with specific vector length. */
490 static enum
492 vex128 = 0,
493 vex256
494 } avxscalar;
496 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
497 static symbolS *GOT_symbol;
499 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
500 unsigned int x86_dwarf2_return_column;
502 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
503 int x86_cie_data_alignment;
505 /* Interface to relax_segment.
506 There are 3 major relax states for 386 jump insns because the
507 different types of jumps add different sizes to frags when we're
508 figuring out what sort of jump to choose to reach a given label. */
510 /* Types. */
511 #define UNCOND_JUMP 0
512 #define COND_JUMP 1
513 #define COND_JUMP86 2
515 /* Sizes. */
516 #define CODE16 1
517 #define SMALL 0
518 #define SMALL16 (SMALL | CODE16)
519 #define BIG 2
520 #define BIG16 (BIG | CODE16)
522 #ifndef INLINE
523 #ifdef __GNUC__
524 #define INLINE __inline__
525 #else
526 #define INLINE
527 #endif
528 #endif
530 #define ENCODE_RELAX_STATE(type, size) \
531 ((relax_substateT) (((type) << 2) | (size)))
532 #define TYPE_FROM_RELAX_STATE(s) \
533 ((s) >> 2)
534 #define DISP_SIZE_FROM_RELAX_STATE(s) \
535 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
537 /* This table is used by relax_frag to promote short jumps to long
538 ones where necessary. SMALL (short) jumps may be promoted to BIG
539 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
540 don't allow a short jump in a 32 bit code segment to be promoted to
541 a 16 bit offset jump because it's slower (requires data size
542 prefix), and doesn't work, unless the destination is in the bottom
543 64k of the code segment (The top 16 bits of eip are zeroed). */
545 const relax_typeS md_relax_table[] =
547 /* The fields are:
548 1) most positive reach of this state,
549 2) most negative reach of this state,
550 3) how many bytes this mode will have in the variable part of the frag
551 4) which index into the table to try if we can't fit into this one. */
553 /* UNCOND_JUMP states. */
554 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
555 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
556 /* dword jmp adds 4 bytes to frag:
557 0 extra opcode bytes, 4 displacement bytes. */
558 {0, 0, 4, 0},
559 /* word jmp adds 2 byte2 to frag:
560 0 extra opcode bytes, 2 displacement bytes. */
561 {0, 0, 2, 0},
563 /* COND_JUMP states. */
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
565 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
566 /* dword conditionals adds 5 bytes to frag:
567 1 extra opcode byte, 4 displacement bytes. */
568 {0, 0, 5, 0},
569 /* word conditionals add 3 bytes to frag:
570 1 extra opcode byte, 2 displacement bytes. */
571 {0, 0, 3, 0},
573 /* COND_JUMP86 states. */
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
575 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
576 /* dword conditionals adds 5 bytes to frag:
577 1 extra opcode byte, 4 displacement bytes. */
578 {0, 0, 5, 0},
579 /* word conditionals add 4 bytes to frag:
580 1 displacement byte and a 3 byte long branch insn. */
581 {0, 0, 4, 0}
584 static const arch_entry cpu_arch[] =
586 /* Do not replace the first two entries - i386_target_format()
587 relies on them being there in this order. */
588 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
589 CPU_GENERIC32_FLAGS, 0, 0 },
590 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
591 CPU_GENERIC64_FLAGS, 0, 0 },
592 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
593 CPU_NONE_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
595 CPU_I186_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
597 CPU_I286_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
599 CPU_I386_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
601 CPU_I486_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
603 CPU_I586_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
605 CPU_I686_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
609 CPU_PENTIUMPRO_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
611 CPU_P2_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
613 CPU_P3_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
615 CPU_P4_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
617 CPU_CORE_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
619 CPU_NOCONA_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
621 CPU_CORE_FLAGS, 1, 0 },
622 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
623 CPU_CORE_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
625 CPU_CORE2_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
627 CPU_CORE2_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
629 CPU_COREI7_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
631 CPU_L1OM_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
633 CPU_K6_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
635 CPU_K6_2_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
637 CPU_ATHLON_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
639 CPU_K8_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
641 CPU_K8_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
645 CPU_AMDFAM10_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
647 CPU_BDVER1_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
649 CPU_8087_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
651 CPU_287_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
653 CPU_387_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
655 CPU_ANY87_FLAGS, 0, 1 },
656 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
657 CPU_MMX_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
659 CPU_3DNOWA_FLAGS, 0, 1 },
660 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
661 CPU_SSE_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
663 CPU_SSE2_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
665 CPU_SSE3_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
667 CPU_SSSE3_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
669 CPU_SSE4_1_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
671 CPU_SSE4_2_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
673 CPU_SSE4_2_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
675 CPU_ANY_SSE_FLAGS, 0, 1 },
676 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
677 CPU_AVX_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
679 CPU_ANY_AVX_FLAGS, 0, 1 },
680 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
681 CPU_VMX_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
683 CPU_SMX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
685 CPU_XSAVE_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
687 CPU_XSAVEOPT_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
689 CPU_AES_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
691 CPU_PCLMUL_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
693 CPU_PCLMUL_FLAGS, 1, 0 },
694 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
695 CPU_FSGSBASE_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
697 CPU_RDRND_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
699 CPU_F16C_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
701 CPU_FMA_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
703 CPU_FMA4_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
705 CPU_XOP_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
707 CPU_LWP_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
709 CPU_MOVBE_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
711 CPU_EPT_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
713 CPU_CLFLUSH_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
715 CPU_NOP_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
717 CPU_SYSCALL_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
719 CPU_RDTSCP_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
721 CPU_3DNOW_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
723 CPU_3DNOWA_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
725 CPU_PADLOCK_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
727 CPU_SVME_FLAGS, 1, 0 },
728 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
729 CPU_SVME_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
731 CPU_SSE4A_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
733 CPU_ABM_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
735 CPU_BMI_FLAGS, 0, 0 },
738 #ifdef I386COFF
739 /* Like s_lcomm_internal in gas/read.c but the alignment string
740 is allowed to be optional. */
742 static symbolS *
743 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
745 addressT align = 0;
747 SKIP_WHITESPACE ();
749 if (needs_align
750 && *input_line_pointer == ',')
752 align = parse_align (needs_align - 1);
754 if (align == (addressT) -1)
755 return NULL;
757 else
759 if (size >= 8)
760 align = 3;
761 else if (size >= 4)
762 align = 2;
763 else if (size >= 2)
764 align = 1;
765 else
766 align = 0;
769 bss_alloc (symbolP, size, align);
770 return symbolP;
773 static void
774 pe_lcomm (int needs_align)
776 s_comm_internal (needs_align * 2, pe_lcomm_internal);
778 #endif
780 const pseudo_typeS md_pseudo_table[] =
782 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
783 {"align", s_align_bytes, 0},
784 #else
785 {"align", s_align_ptwo, 0},
786 #endif
787 {"arch", set_cpu_arch, 0},
788 #ifndef I386COFF
789 {"bss", s_bss, 0},
790 #else
791 {"lcomm", pe_lcomm, 1},
792 #endif
793 {"ffloat", float_cons, 'f'},
794 {"dfloat", float_cons, 'd'},
795 {"tfloat", float_cons, 'x'},
796 {"value", cons, 2},
797 {"slong", signed_cons, 4},
798 {"noopt", s_ignore, 0},
799 {"optim", s_ignore, 0},
800 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
801 {"code16", set_code_flag, CODE_16BIT},
802 {"code32", set_code_flag, CODE_32BIT},
803 {"code64", set_code_flag, CODE_64BIT},
804 {"intel_syntax", set_intel_syntax, 1},
805 {"att_syntax", set_intel_syntax, 0},
806 {"intel_mnemonic", set_intel_mnemonic, 1},
807 {"att_mnemonic", set_intel_mnemonic, 0},
808 {"allow_index_reg", set_allow_index_reg, 1},
809 {"disallow_index_reg", set_allow_index_reg, 0},
810 {"sse_check", set_sse_check, 0},
811 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
812 {"largecomm", handle_large_common, 0},
813 #else
814 {"file", (void (*) (int)) dwarf2_directive_file, 0},
815 {"loc", dwarf2_directive_loc, 0},
816 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
817 #endif
818 #ifdef TE_PE
819 {"secrel32", pe_directive_secrel, 0},
820 #endif
821 {0, 0, 0}
824 /* For interface with expression (). */
825 extern char *input_line_pointer;
827 /* Hash table for instruction mnemonic lookup. */
828 static struct hash_control *op_hash;
830 /* Hash table for register lookup. */
831 static struct hash_control *reg_hash;
833 void
834 i386_align_code (fragS *fragP, int count)
836 /* Various efficient no-op patterns for aligning code labels.
837 Note: Don't try to assemble the instructions in the comments.
838 0L and 0w are not legal. */
839 static const char f32_1[] =
840 {0x90}; /* nop */
841 static const char f32_2[] =
842 {0x66,0x90}; /* xchg %ax,%ax */
843 static const char f32_3[] =
844 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
845 static const char f32_4[] =
846 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
847 static const char f32_5[] =
848 {0x90, /* nop */
849 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
850 static const char f32_6[] =
851 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
852 static const char f32_7[] =
853 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
854 static const char f32_8[] =
855 {0x90, /* nop */
856 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
857 static const char f32_9[] =
858 {0x89,0xf6, /* movl %esi,%esi */
859 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
860 static const char f32_10[] =
861 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
862 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
863 static const char f32_11[] =
864 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
865 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
866 static const char f32_12[] =
867 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
868 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
869 static const char f32_13[] =
870 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
871 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
872 static const char f32_14[] =
873 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
874 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
875 static const char f16_3[] =
876 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
877 static const char f16_4[] =
878 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
879 static const char f16_5[] =
880 {0x90, /* nop */
881 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
882 static const char f16_6[] =
883 {0x89,0xf6, /* mov %si,%si */
884 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
885 static const char f16_7[] =
886 {0x8d,0x74,0x00, /* lea 0(%si),%si */
887 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
888 static const char f16_8[] =
889 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
890 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
891 static const char jump_31[] =
892 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
893 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
894 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
895 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
896 static const char *const f32_patt[] = {
897 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
898 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
900 static const char *const f16_patt[] = {
901 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
903 /* nopl (%[re]ax) */
904 static const char alt_3[] =
905 {0x0f,0x1f,0x00};
906 /* nopl 0(%[re]ax) */
907 static const char alt_4[] =
908 {0x0f,0x1f,0x40,0x00};
909 /* nopl 0(%[re]ax,%[re]ax,1) */
910 static const char alt_5[] =
911 {0x0f,0x1f,0x44,0x00,0x00};
912 /* nopw 0(%[re]ax,%[re]ax,1) */
913 static const char alt_6[] =
914 {0x66,0x0f,0x1f,0x44,0x00,0x00};
915 /* nopl 0L(%[re]ax) */
916 static const char alt_7[] =
917 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
918 /* nopl 0L(%[re]ax,%[re]ax,1) */
919 static const char alt_8[] =
920 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
921 /* nopw 0L(%[re]ax,%[re]ax,1) */
922 static const char alt_9[] =
923 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
924 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
925 static const char alt_10[] =
926 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
927 /* data16
928 nopw %cs:0L(%[re]ax,%[re]ax,1) */
929 static const char alt_long_11[] =
930 {0x66,
931 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
932 /* data16
933 data16
934 nopw %cs:0L(%[re]ax,%[re]ax,1) */
935 static const char alt_long_12[] =
936 {0x66,
937 0x66,
938 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
939 /* data16
940 data16
941 data16
942 nopw %cs:0L(%[re]ax,%[re]ax,1) */
943 static const char alt_long_13[] =
944 {0x66,
945 0x66,
946 0x66,
947 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
948 /* data16
949 data16
950 data16
951 data16
952 nopw %cs:0L(%[re]ax,%[re]ax,1) */
953 static const char alt_long_14[] =
954 {0x66,
955 0x66,
956 0x66,
957 0x66,
958 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
959 /* data16
960 data16
961 data16
962 data16
963 data16
964 nopw %cs:0L(%[re]ax,%[re]ax,1) */
965 static const char alt_long_15[] =
966 {0x66,
967 0x66,
968 0x66,
969 0x66,
970 0x66,
971 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
972 /* nopl 0(%[re]ax,%[re]ax,1)
973 nopw 0(%[re]ax,%[re]ax,1) */
974 static const char alt_short_11[] =
975 {0x0f,0x1f,0x44,0x00,0x00,
976 0x66,0x0f,0x1f,0x44,0x00,0x00};
977 /* nopw 0(%[re]ax,%[re]ax,1)
978 nopw 0(%[re]ax,%[re]ax,1) */
979 static const char alt_short_12[] =
980 {0x66,0x0f,0x1f,0x44,0x00,0x00,
981 0x66,0x0f,0x1f,0x44,0x00,0x00};
982 /* nopw 0(%[re]ax,%[re]ax,1)
983 nopl 0L(%[re]ax) */
984 static const char alt_short_13[] =
985 {0x66,0x0f,0x1f,0x44,0x00,0x00,
986 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
987 /* nopl 0L(%[re]ax)
988 nopl 0L(%[re]ax) */
989 static const char alt_short_14[] =
990 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
991 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
992 /* nopl 0L(%[re]ax)
993 nopl 0L(%[re]ax,%[re]ax,1) */
994 static const char alt_short_15[] =
995 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
996 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
997 static const char *const alt_short_patt[] = {
998 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
999 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1000 alt_short_14, alt_short_15
1002 static const char *const alt_long_patt[] = {
1003 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1004 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1005 alt_long_14, alt_long_15
1008 /* Only align for at least a positive non-zero boundary. */
1009 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1010 return;
1012 /* We need to decide which NOP sequence to use for 32bit and
1013 64bit. When -mtune= is used:
1015 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1016 PROCESSOR_GENERIC32, f32_patt will be used.
1017 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1018 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1019 PROCESSOR_GENERIC64, alt_long_patt will be used.
1020 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1021 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1022 will be used.
1024 When -mtune= isn't used, alt_long_patt will be used if
1025 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1026 be used.
1028 When -march= or .arch is used, we can't use anything beyond
1029 cpu_arch_isa_flags. */
1031 if (flag_code == CODE_16BIT)
1033 if (count > 8)
1035 memcpy (fragP->fr_literal + fragP->fr_fix,
1036 jump_31, count);
1037 /* Adjust jump offset. */
1038 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1040 else
1041 memcpy (fragP->fr_literal + fragP->fr_fix,
1042 f16_patt[count - 1], count);
1044 else
1046 const char *const *patt = NULL;
1048 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1050 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1051 switch (cpu_arch_tune)
1053 case PROCESSOR_UNKNOWN:
1054 /* We use cpu_arch_isa_flags to check if we SHOULD
1055 optimize with nops. */
1056 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1057 patt = alt_long_patt;
1058 else
1059 patt = f32_patt;
1060 break;
1061 case PROCESSOR_PENTIUMPRO:
1062 case PROCESSOR_PENTIUM4:
1063 case PROCESSOR_NOCONA:
1064 case PROCESSOR_CORE:
1065 case PROCESSOR_CORE2:
1066 case PROCESSOR_COREI7:
1067 case PROCESSOR_L1OM:
1068 case PROCESSOR_GENERIC64:
1069 patt = alt_long_patt;
1070 break;
1071 case PROCESSOR_K6:
1072 case PROCESSOR_ATHLON:
1073 case PROCESSOR_K8:
1074 case PROCESSOR_AMDFAM10:
1075 case PROCESSOR_BDVER1:
1076 patt = alt_short_patt;
1077 break;
1078 case PROCESSOR_I386:
1079 case PROCESSOR_I486:
1080 case PROCESSOR_PENTIUM:
1081 case PROCESSOR_GENERIC32:
1082 patt = f32_patt;
1083 break;
1086 else
1088 switch (fragP->tc_frag_data.tune)
1090 case PROCESSOR_UNKNOWN:
1091 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1092 PROCESSOR_UNKNOWN. */
1093 abort ();
1094 break;
1096 case PROCESSOR_I386:
1097 case PROCESSOR_I486:
1098 case PROCESSOR_PENTIUM:
1099 case PROCESSOR_K6:
1100 case PROCESSOR_ATHLON:
1101 case PROCESSOR_K8:
1102 case PROCESSOR_AMDFAM10:
1103 case PROCESSOR_BDVER1:
1104 case PROCESSOR_GENERIC32:
1105 /* We use cpu_arch_isa_flags to check if we CAN optimize
1106 with nops. */
1107 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1108 patt = alt_short_patt;
1109 else
1110 patt = f32_patt;
1111 break;
1112 case PROCESSOR_PENTIUMPRO:
1113 case PROCESSOR_PENTIUM4:
1114 case PROCESSOR_NOCONA:
1115 case PROCESSOR_CORE:
1116 case PROCESSOR_CORE2:
1117 case PROCESSOR_COREI7:
1118 case PROCESSOR_L1OM:
1119 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1120 patt = alt_long_patt;
1121 else
1122 patt = f32_patt;
1123 break;
1124 case PROCESSOR_GENERIC64:
1125 patt = alt_long_patt;
1126 break;
1130 if (patt == f32_patt)
1132 /* If the padding is less than 15 bytes, we use the normal
1133 ones. Otherwise, we use a jump instruction and adjust
1134 its offset. */
1135 int limit;
1137 /* For 64bit, the limit is 3 bytes. */
1138 if (flag_code == CODE_64BIT
1139 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1140 limit = 3;
1141 else
1142 limit = 15;
1143 if (count < limit)
1144 memcpy (fragP->fr_literal + fragP->fr_fix,
1145 patt[count - 1], count);
1146 else
1148 memcpy (fragP->fr_literal + fragP->fr_fix,
1149 jump_31, count);
1150 /* Adjust jump offset. */
1151 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1154 else
1156 /* Maximum length of an instruction is 15 byte. If the
1157 padding is greater than 15 bytes and we don't use jump,
1158 we have to break it into smaller pieces. */
1159 int padding = count;
1160 while (padding > 15)
1162 padding -= 15;
1163 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1164 patt [14], 15);
1167 if (padding)
1168 memcpy (fragP->fr_literal + fragP->fr_fix,
1169 patt [padding - 1], padding);
1172 fragP->fr_var = count;
1175 static INLINE int
1176 operand_type_all_zero (const union i386_operand_type *x)
1178 switch (ARRAY_SIZE(x->array))
1180 case 3:
1181 if (x->array[2])
1182 return 0;
1183 case 2:
1184 if (x->array[1])
1185 return 0;
1186 case 1:
1187 return !x->array[0];
1188 default:
1189 abort ();
1193 static INLINE void
1194 operand_type_set (union i386_operand_type *x, unsigned int v)
1196 switch (ARRAY_SIZE(x->array))
1198 case 3:
1199 x->array[2] = v;
1200 case 2:
1201 x->array[1] = v;
1202 case 1:
1203 x->array[0] = v;
1204 break;
1205 default:
1206 abort ();
1210 static INLINE int
1211 operand_type_equal (const union i386_operand_type *x,
1212 const union i386_operand_type *y)
1214 switch (ARRAY_SIZE(x->array))
1216 case 3:
1217 if (x->array[2] != y->array[2])
1218 return 0;
1219 case 2:
1220 if (x->array[1] != y->array[1])
1221 return 0;
1222 case 1:
1223 return x->array[0] == y->array[0];
1224 break;
1225 default:
1226 abort ();
1230 static INLINE int
1231 cpu_flags_all_zero (const union i386_cpu_flags *x)
1233 switch (ARRAY_SIZE(x->array))
1235 case 3:
1236 if (x->array[2])
1237 return 0;
1238 case 2:
1239 if (x->array[1])
1240 return 0;
1241 case 1:
1242 return !x->array[0];
1243 default:
1244 abort ();
1248 static INLINE void
1249 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1251 switch (ARRAY_SIZE(x->array))
1253 case 3:
1254 x->array[2] = v;
1255 case 2:
1256 x->array[1] = v;
1257 case 1:
1258 x->array[0] = v;
1259 break;
1260 default:
1261 abort ();
1265 static INLINE int
1266 cpu_flags_equal (const union i386_cpu_flags *x,
1267 const union i386_cpu_flags *y)
1269 switch (ARRAY_SIZE(x->array))
1271 case 3:
1272 if (x->array[2] != y->array[2])
1273 return 0;
1274 case 2:
1275 if (x->array[1] != y->array[1])
1276 return 0;
1277 case 1:
1278 return x->array[0] == y->array[0];
1279 break;
1280 default:
1281 abort ();
1285 static INLINE int
1286 cpu_flags_check_cpu64 (i386_cpu_flags f)
1288 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1289 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1292 static INLINE i386_cpu_flags
1293 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1295 switch (ARRAY_SIZE (x.array))
1297 case 3:
1298 x.array [2] &= y.array [2];
1299 case 2:
1300 x.array [1] &= y.array [1];
1301 case 1:
1302 x.array [0] &= y.array [0];
1303 break;
1304 default:
1305 abort ();
1307 return x;
1310 static INLINE i386_cpu_flags
1311 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1313 switch (ARRAY_SIZE (x.array))
1315 case 3:
1316 x.array [2] |= y.array [2];
1317 case 2:
1318 x.array [1] |= y.array [1];
1319 case 1:
1320 x.array [0] |= y.array [0];
1321 break;
1322 default:
1323 abort ();
1325 return x;
1328 static INLINE i386_cpu_flags
1329 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1331 switch (ARRAY_SIZE (x.array))
1333 case 3:
1334 x.array [2] &= ~y.array [2];
1335 case 2:
1336 x.array [1] &= ~y.array [1];
1337 case 1:
1338 x.array [0] &= ~y.array [0];
1339 break;
1340 default:
1341 abort ();
1343 return x;
1346 #define CPU_FLAGS_ARCH_MATCH 0x1
1347 #define CPU_FLAGS_64BIT_MATCH 0x2
1348 #define CPU_FLAGS_AES_MATCH 0x4
1349 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1350 #define CPU_FLAGS_AVX_MATCH 0x10
1352 #define CPU_FLAGS_32BIT_MATCH \
1353 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1354 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1355 #define CPU_FLAGS_PERFECT_MATCH \
1356 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1358 /* Return CPU flags match bits. */
1360 static int
1361 cpu_flags_match (const insn_template *t)
1363 i386_cpu_flags x = t->cpu_flags;
1364 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1366 x.bitfield.cpu64 = 0;
1367 x.bitfield.cpuno64 = 0;
1369 if (cpu_flags_all_zero (&x))
1371 /* This instruction is available on all archs. */
1372 match |= CPU_FLAGS_32BIT_MATCH;
1374 else
1376 /* This instruction is available only on some archs. */
1377 i386_cpu_flags cpu = cpu_arch_flags;
1379 cpu.bitfield.cpu64 = 0;
1380 cpu.bitfield.cpuno64 = 0;
1381 cpu = cpu_flags_and (x, cpu);
1382 if (!cpu_flags_all_zero (&cpu))
1384 if (x.bitfield.cpuavx)
1386 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1387 if (cpu.bitfield.cpuavx)
1389 /* Check SSE2AVX. */
1390 if (!t->opcode_modifier.sse2avx|| sse2avx)
1392 match |= (CPU_FLAGS_ARCH_MATCH
1393 | CPU_FLAGS_AVX_MATCH);
1394 /* Check AES. */
1395 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1396 match |= CPU_FLAGS_AES_MATCH;
1397 /* Check PCLMUL. */
1398 if (!x.bitfield.cpupclmul
1399 || cpu.bitfield.cpupclmul)
1400 match |= CPU_FLAGS_PCLMUL_MATCH;
1403 else
1404 match |= CPU_FLAGS_ARCH_MATCH;
1406 else
1407 match |= CPU_FLAGS_32BIT_MATCH;
1410 return match;
1413 static INLINE i386_operand_type
1414 operand_type_and (i386_operand_type x, i386_operand_type y)
1416 switch (ARRAY_SIZE (x.array))
1418 case 3:
1419 x.array [2] &= y.array [2];
1420 case 2:
1421 x.array [1] &= y.array [1];
1422 case 1:
1423 x.array [0] &= y.array [0];
1424 break;
1425 default:
1426 abort ();
1428 return x;
1431 static INLINE i386_operand_type
1432 operand_type_or (i386_operand_type x, i386_operand_type y)
1434 switch (ARRAY_SIZE (x.array))
1436 case 3:
1437 x.array [2] |= y.array [2];
1438 case 2:
1439 x.array [1] |= y.array [1];
1440 case 1:
1441 x.array [0] |= y.array [0];
1442 break;
1443 default:
1444 abort ();
1446 return x;
1449 static INLINE i386_operand_type
1450 operand_type_xor (i386_operand_type x, i386_operand_type y)
1452 switch (ARRAY_SIZE (x.array))
1454 case 3:
1455 x.array [2] ^= y.array [2];
1456 case 2:
1457 x.array [1] ^= y.array [1];
1458 case 1:
1459 x.array [0] ^= y.array [0];
1460 break;
1461 default:
1462 abort ();
1464 return x;
1467 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1468 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1469 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1470 static const i386_operand_type inoutportreg
1471 = OPERAND_TYPE_INOUTPORTREG;
1472 static const i386_operand_type reg16_inoutportreg
1473 = OPERAND_TYPE_REG16_INOUTPORTREG;
1474 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1475 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1476 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1477 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1478 static const i386_operand_type anydisp
1479 = OPERAND_TYPE_ANYDISP;
1480 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1481 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1482 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1483 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1484 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1485 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1486 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1487 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1488 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1489 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1490 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1491 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1493 enum operand_type
1495 reg,
1496 imm,
1497 disp,
1498 anymem
1501 static INLINE int
1502 operand_type_check (i386_operand_type t, enum operand_type c)
1504 switch (c)
1506 case reg:
1507 return (t.bitfield.reg8
1508 || t.bitfield.reg16
1509 || t.bitfield.reg32
1510 || t.bitfield.reg64);
1512 case imm:
1513 return (t.bitfield.imm8
1514 || t.bitfield.imm8s
1515 || t.bitfield.imm16
1516 || t.bitfield.imm32
1517 || t.bitfield.imm32s
1518 || t.bitfield.imm64);
1520 case disp:
1521 return (t.bitfield.disp8
1522 || t.bitfield.disp16
1523 || t.bitfield.disp32
1524 || t.bitfield.disp32s
1525 || t.bitfield.disp64);
1527 case anymem:
1528 return (t.bitfield.disp8
1529 || t.bitfield.disp16
1530 || t.bitfield.disp32
1531 || t.bitfield.disp32s
1532 || t.bitfield.disp64
1533 || t.bitfield.baseindex);
1535 default:
1536 abort ();
1539 return 0;
1542 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1543 operand J for instruction template T. */
1545 static INLINE int
1546 match_reg_size (const insn_template *t, unsigned int j)
1548 return !((i.types[j].bitfield.byte
1549 && !t->operand_types[j].bitfield.byte)
1550 || (i.types[j].bitfield.word
1551 && !t->operand_types[j].bitfield.word)
1552 || (i.types[j].bitfield.dword
1553 && !t->operand_types[j].bitfield.dword)
1554 || (i.types[j].bitfield.qword
1555 && !t->operand_types[j].bitfield.qword));
1558 /* Return 1 if there is no conflict in any size on operand J for
1559 instruction template T. */
1561 static INLINE int
1562 match_mem_size (const insn_template *t, unsigned int j)
1564 return (match_reg_size (t, j)
1565 && !((i.types[j].bitfield.unspecified
1566 && !t->operand_types[j].bitfield.unspecified)
1567 || (i.types[j].bitfield.fword
1568 && !t->operand_types[j].bitfield.fword)
1569 || (i.types[j].bitfield.tbyte
1570 && !t->operand_types[j].bitfield.tbyte)
1571 || (i.types[j].bitfield.xmmword
1572 && !t->operand_types[j].bitfield.xmmword)
1573 || (i.types[j].bitfield.ymmword
1574 && !t->operand_types[j].bitfield.ymmword)));
1577 /* Return 1 if there is no size conflict on any operands for
1578 instruction template T. */
1580 static INLINE int
1581 operand_size_match (const insn_template *t)
1583 unsigned int j;
1584 int match = 1;
1586 /* Don't check jump instructions. */
1587 if (t->opcode_modifier.jump
1588 || t->opcode_modifier.jumpbyte
1589 || t->opcode_modifier.jumpdword
1590 || t->opcode_modifier.jumpintersegment)
1591 return match;
1593 /* Check memory and accumulator operand size. */
1594 for (j = 0; j < i.operands; j++)
1596 if (t->operand_types[j].bitfield.anysize)
1597 continue;
1599 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1601 match = 0;
1602 break;
1605 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1607 match = 0;
1608 break;
1612 if (match)
1613 return match;
1614 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1616 mismatch:
1617 i.error = operand_size_mismatch;
1618 return 0;
1621 /* Check reverse. */
1622 gas_assert (i.operands == 2);
1624 match = 1;
1625 for (j = 0; j < 2; j++)
1627 if (t->operand_types[j].bitfield.acc
1628 && !match_reg_size (t, j ? 0 : 1))
1629 goto mismatch;
1631 if (i.types[j].bitfield.mem
1632 && !match_mem_size (t, j ? 0 : 1))
1633 goto mismatch;
1636 return match;
1639 static INLINE int
1640 operand_type_match (i386_operand_type overlap,
1641 i386_operand_type given)
1643 i386_operand_type temp = overlap;
1645 temp.bitfield.jumpabsolute = 0;
1646 temp.bitfield.unspecified = 0;
1647 temp.bitfield.byte = 0;
1648 temp.bitfield.word = 0;
1649 temp.bitfield.dword = 0;
1650 temp.bitfield.fword = 0;
1651 temp.bitfield.qword = 0;
1652 temp.bitfield.tbyte = 0;
1653 temp.bitfield.xmmword = 0;
1654 temp.bitfield.ymmword = 0;
1655 if (operand_type_all_zero (&temp))
1656 goto mismatch;
1658 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1659 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1660 return 1;
1662 mismatch:
1663 i.error = operand_type_mismatch;
1664 return 0;
1667 /* If given types g0 and g1 are registers they must be of the same type
1668 unless the expected operand type register overlap is null.
1669 Note that Acc in a template matches every size of reg. */
1671 static INLINE int
1672 operand_type_register_match (i386_operand_type m0,
1673 i386_operand_type g0,
1674 i386_operand_type t0,
1675 i386_operand_type m1,
1676 i386_operand_type g1,
1677 i386_operand_type t1)
1679 if (!operand_type_check (g0, reg))
1680 return 1;
1682 if (!operand_type_check (g1, reg))
1683 return 1;
1685 if (g0.bitfield.reg8 == g1.bitfield.reg8
1686 && g0.bitfield.reg16 == g1.bitfield.reg16
1687 && g0.bitfield.reg32 == g1.bitfield.reg32
1688 && g0.bitfield.reg64 == g1.bitfield.reg64)
1689 return 1;
1691 if (m0.bitfield.acc)
1693 t0.bitfield.reg8 = 1;
1694 t0.bitfield.reg16 = 1;
1695 t0.bitfield.reg32 = 1;
1696 t0.bitfield.reg64 = 1;
1699 if (m1.bitfield.acc)
1701 t1.bitfield.reg8 = 1;
1702 t1.bitfield.reg16 = 1;
1703 t1.bitfield.reg32 = 1;
1704 t1.bitfield.reg64 = 1;
1707 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1708 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1709 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1710 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1711 return 1;
1713 i.error = register_type_mismatch;
1715 return 0;
1718 static INLINE unsigned int
1719 mode_from_disp_size (i386_operand_type t)
1721 if (t.bitfield.disp8)
1722 return 1;
1723 else if (t.bitfield.disp16
1724 || t.bitfield.disp32
1725 || t.bitfield.disp32s)
1726 return 2;
1727 else
1728 return 0;
1731 static INLINE int
1732 fits_in_signed_byte (offsetT num)
1734 return (num >= -128) && (num <= 127);
1737 static INLINE int
1738 fits_in_unsigned_byte (offsetT num)
1740 return (num & 0xff) == num;
1743 static INLINE int
1744 fits_in_unsigned_word (offsetT num)
1746 return (num & 0xffff) == num;
1749 static INLINE int
1750 fits_in_signed_word (offsetT num)
1752 return (-32768 <= num) && (num <= 32767);
1755 static INLINE int
1756 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1758 #ifndef BFD64
1759 return 1;
1760 #else
1761 return (!(((offsetT) -1 << 31) & num)
1762 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1763 #endif
1764 } /* fits_in_signed_long() */
1766 static INLINE int
1767 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1769 #ifndef BFD64
1770 return 1;
1771 #else
1772 return (num & (((offsetT) 2 << 31) - 1)) == num;
1773 #endif
1774 } /* fits_in_unsigned_long() */
1776 static INLINE int
1777 fits_in_imm4 (offsetT num)
1779 return (num & 0xf) == num;
1782 static i386_operand_type
1783 smallest_imm_type (offsetT num)
1785 i386_operand_type t;
1787 operand_type_set (&t, 0);
1788 t.bitfield.imm64 = 1;
1790 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1792 /* This code is disabled on the 486 because all the Imm1 forms
1793 in the opcode table are slower on the i486. They're the
1794 versions with the implicitly specified single-position
1795 displacement, which has another syntax if you really want to
1796 use that form. */
1797 t.bitfield.imm1 = 1;
1798 t.bitfield.imm8 = 1;
1799 t.bitfield.imm8s = 1;
1800 t.bitfield.imm16 = 1;
1801 t.bitfield.imm32 = 1;
1802 t.bitfield.imm32s = 1;
1804 else if (fits_in_signed_byte (num))
1806 t.bitfield.imm8 = 1;
1807 t.bitfield.imm8s = 1;
1808 t.bitfield.imm16 = 1;
1809 t.bitfield.imm32 = 1;
1810 t.bitfield.imm32s = 1;
1812 else if (fits_in_unsigned_byte (num))
1814 t.bitfield.imm8 = 1;
1815 t.bitfield.imm16 = 1;
1816 t.bitfield.imm32 = 1;
1817 t.bitfield.imm32s = 1;
1819 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1821 t.bitfield.imm16 = 1;
1822 t.bitfield.imm32 = 1;
1823 t.bitfield.imm32s = 1;
1825 else if (fits_in_signed_long (num))
1827 t.bitfield.imm32 = 1;
1828 t.bitfield.imm32s = 1;
1830 else if (fits_in_unsigned_long (num))
1831 t.bitfield.imm32 = 1;
1833 return t;
1836 static offsetT
1837 offset_in_range (offsetT val, int size)
1839 addressT mask;
1841 switch (size)
1843 case 1: mask = ((addressT) 1 << 8) - 1; break;
1844 case 2: mask = ((addressT) 1 << 16) - 1; break;
1845 case 4: mask = ((addressT) 2 << 31) - 1; break;
1846 #ifdef BFD64
1847 case 8: mask = ((addressT) 2 << 63) - 1; break;
1848 #endif
1849 default: abort ();
1852 #ifdef BFD64
1853 /* If BFD64, sign extend val for 32bit address mode. */
1854 if (flag_code != CODE_64BIT
1855 || i.prefix[ADDR_PREFIX])
1856 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1857 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1858 #endif
1860 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1862 char buf1[40], buf2[40];
1864 sprint_value (buf1, val);
1865 sprint_value (buf2, val & mask);
1866 as_warn (_("%s shortened to %s"), buf1, buf2);
1868 return val & mask;
1871 enum PREFIX_GROUP
1873 PREFIX_EXIST = 0,
1874 PREFIX_LOCK,
1875 PREFIX_REP,
1876 PREFIX_OTHER
1879 /* Returns
1880 a. PREFIX_EXIST if attempting to add a prefix where one from the
1881 same class already exists.
1882 b. PREFIX_LOCK if lock prefix is added.
1883 c. PREFIX_REP if rep/repne prefix is added.
1884 d. PREFIX_OTHER if other prefix is added.
1887 static enum PREFIX_GROUP
1888 add_prefix (unsigned int prefix)
1890 enum PREFIX_GROUP ret = PREFIX_OTHER;
1891 unsigned int q;
1893 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1894 && flag_code == CODE_64BIT)
1896 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1897 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1898 && (prefix & (REX_R | REX_X | REX_B))))
1899 ret = PREFIX_EXIST;
1900 q = REX_PREFIX;
1902 else
1904 switch (prefix)
1906 default:
1907 abort ();
1909 case CS_PREFIX_OPCODE:
1910 case DS_PREFIX_OPCODE:
1911 case ES_PREFIX_OPCODE:
1912 case FS_PREFIX_OPCODE:
1913 case GS_PREFIX_OPCODE:
1914 case SS_PREFIX_OPCODE:
1915 q = SEG_PREFIX;
1916 break;
1918 case REPNE_PREFIX_OPCODE:
1919 case REPE_PREFIX_OPCODE:
1920 q = REP_PREFIX;
1921 ret = PREFIX_REP;
1922 break;
1924 case LOCK_PREFIX_OPCODE:
1925 q = LOCK_PREFIX;
1926 ret = PREFIX_LOCK;
1927 break;
1929 case FWAIT_OPCODE:
1930 q = WAIT_PREFIX;
1931 break;
1933 case ADDR_PREFIX_OPCODE:
1934 q = ADDR_PREFIX;
1935 break;
1937 case DATA_PREFIX_OPCODE:
1938 q = DATA_PREFIX;
1939 break;
1941 if (i.prefix[q] != 0)
1942 ret = PREFIX_EXIST;
1945 if (ret)
1947 if (!i.prefix[q])
1948 ++i.prefixes;
1949 i.prefix[q] |= prefix;
1951 else
1952 as_bad (_("same type of prefix used twice"));
1954 return ret;
1957 static void
1958 update_code_flag (int value, int check)
1960 PRINTF_LIKE ((*as_error));
1962 flag_code = (enum flag_code) value;
1963 if (flag_code == CODE_64BIT)
1965 cpu_arch_flags.bitfield.cpu64 = 1;
1966 cpu_arch_flags.bitfield.cpuno64 = 0;
1968 else
1970 cpu_arch_flags.bitfield.cpu64 = 0;
1971 cpu_arch_flags.bitfield.cpuno64 = 1;
1973 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1975 if (check)
1976 as_error = as_fatal;
1977 else
1978 as_error = as_bad;
1979 (*as_error) (_("64bit mode not supported on `%s'."),
1980 cpu_arch_name ? cpu_arch_name : default_arch);
1982 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1984 if (check)
1985 as_error = as_fatal;
1986 else
1987 as_error = as_bad;
1988 (*as_error) (_("32bit mode not supported on `%s'."),
1989 cpu_arch_name ? cpu_arch_name : default_arch);
1991 stackop_size = '\0';
1994 static void
1995 set_code_flag (int value)
1997 update_code_flag (value, 0);
2000 static void
2001 set_16bit_gcc_code_flag (int new_code_flag)
2003 flag_code = (enum flag_code) new_code_flag;
2004 if (flag_code != CODE_16BIT)
2005 abort ();
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2008 stackop_size = LONG_MNEM_SUFFIX;
2011 static void
2012 set_intel_syntax (int syntax_flag)
2014 /* Find out if register prefixing is specified. */
2015 int ask_naked_reg = 0;
2017 SKIP_WHITESPACE ();
2018 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2020 char *string = input_line_pointer;
2021 int e = get_symbol_end ();
2023 if (strcmp (string, "prefix") == 0)
2024 ask_naked_reg = 1;
2025 else if (strcmp (string, "noprefix") == 0)
2026 ask_naked_reg = -1;
2027 else
2028 as_bad (_("bad argument to syntax directive."));
2029 *input_line_pointer = e;
2031 demand_empty_rest_of_line ();
2033 intel_syntax = syntax_flag;
2035 if (ask_naked_reg == 0)
2036 allow_naked_reg = (intel_syntax
2037 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2038 else
2039 allow_naked_reg = (ask_naked_reg < 0);
2041 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2043 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2044 identifier_chars['$'] = intel_syntax ? '$' : 0;
2045 register_prefix = allow_naked_reg ? "" : "%";
2048 static void
2049 set_intel_mnemonic (int mnemonic_flag)
2051 intel_mnemonic = mnemonic_flag;
2054 static void
2055 set_allow_index_reg (int flag)
2057 allow_index_reg = flag;
2060 static void
2061 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2063 SKIP_WHITESPACE ();
2065 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2067 char *string = input_line_pointer;
2068 int e = get_symbol_end ();
2070 if (strcmp (string, "none") == 0)
2071 sse_check = sse_check_none;
2072 else if (strcmp (string, "warning") == 0)
2073 sse_check = sse_check_warning;
2074 else if (strcmp (string, "error") == 0)
2075 sse_check = sse_check_error;
2076 else
2077 as_bad (_("bad argument to sse_check directive."));
2078 *input_line_pointer = e;
2080 else
2081 as_bad (_("missing argument for sse_check directive"));
2083 demand_empty_rest_of_line ();
2086 static void
2087 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2088 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2090 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2091 static const char *arch;
2093 /* Intel LIOM is only supported on ELF. */
2094 if (!IS_ELF)
2095 return;
2097 if (!arch)
2099 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2100 use default_arch. */
2101 arch = cpu_arch_name;
2102 if (!arch)
2103 arch = default_arch;
2106 /* If we are targeting Intel L1OM, we must enable it. */
2107 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2108 || new_flag.bitfield.cpul1om)
2109 return;
2111 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2112 #endif
2115 static void
2116 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2118 SKIP_WHITESPACE ();
2120 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2122 char *string = input_line_pointer;
2123 int e = get_symbol_end ();
2124 unsigned int j;
2125 i386_cpu_flags flags;
2127 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2129 if (strcmp (string, cpu_arch[j].name) == 0)
2131 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2133 if (*string != '.')
2135 cpu_arch_name = cpu_arch[j].name;
2136 cpu_sub_arch_name = NULL;
2137 cpu_arch_flags = cpu_arch[j].flags;
2138 if (flag_code == CODE_64BIT)
2140 cpu_arch_flags.bitfield.cpu64 = 1;
2141 cpu_arch_flags.bitfield.cpuno64 = 0;
2143 else
2145 cpu_arch_flags.bitfield.cpu64 = 0;
2146 cpu_arch_flags.bitfield.cpuno64 = 1;
2148 cpu_arch_isa = cpu_arch[j].type;
2149 cpu_arch_isa_flags = cpu_arch[j].flags;
2150 if (!cpu_arch_tune_set)
2152 cpu_arch_tune = cpu_arch_isa;
2153 cpu_arch_tune_flags = cpu_arch_isa_flags;
2155 break;
2158 if (!cpu_arch[j].negated)
2159 flags = cpu_flags_or (cpu_arch_flags,
2160 cpu_arch[j].flags);
2161 else
2162 flags = cpu_flags_and_not (cpu_arch_flags,
2163 cpu_arch[j].flags);
2164 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2166 if (cpu_sub_arch_name)
2168 char *name = cpu_sub_arch_name;
2169 cpu_sub_arch_name = concat (name,
2170 cpu_arch[j].name,
2171 (const char *) NULL);
2172 free (name);
2174 else
2175 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2176 cpu_arch_flags = flags;
2178 *input_line_pointer = e;
2179 demand_empty_rest_of_line ();
2180 return;
2183 if (j >= ARRAY_SIZE (cpu_arch))
2184 as_bad (_("no such architecture: `%s'"), string);
2186 *input_line_pointer = e;
2188 else
2189 as_bad (_("missing cpu architecture"));
2191 no_cond_jump_promotion = 0;
2192 if (*input_line_pointer == ','
2193 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2195 char *string = ++input_line_pointer;
2196 int e = get_symbol_end ();
2198 if (strcmp (string, "nojumps") == 0)
2199 no_cond_jump_promotion = 1;
2200 else if (strcmp (string, "jumps") == 0)
2202 else
2203 as_bad (_("no such architecture modifier: `%s'"), string);
2205 *input_line_pointer = e;
2208 demand_empty_rest_of_line ();
2211 enum bfd_architecture
2212 i386_arch (void)
2214 if (cpu_arch_isa == PROCESSOR_L1OM)
2216 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2217 || flag_code != CODE_64BIT)
2218 as_fatal (_("Intel L1OM is 64bit ELF only"));
2219 return bfd_arch_l1om;
2221 else
2222 return bfd_arch_i386;
2225 unsigned long
2226 i386_mach ()
2228 if (!strncmp (default_arch, "x86_64", 6))
2230 if (cpu_arch_isa == PROCESSOR_L1OM)
2232 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2233 || default_arch[6] != '\0')
2234 as_fatal (_("Intel L1OM is 64bit ELF only"));
2235 return bfd_mach_l1om;
2237 else if (default_arch[6] == '\0')
2238 return bfd_mach_x86_64;
2239 else
2240 return bfd_mach_x64_32;
2242 else if (!strcmp (default_arch, "i386"))
2243 return bfd_mach_i386_i386;
2244 else
2245 as_fatal (_("Unknown architecture"));
2248 void
2249 md_begin ()
2251 const char *hash_err;
2253 /* Initialize op_hash hash table. */
2254 op_hash = hash_new ();
2257 const insn_template *optab;
2258 templates *core_optab;
2260 /* Setup for loop. */
2261 optab = i386_optab;
2262 core_optab = (templates *) xmalloc (sizeof (templates));
2263 core_optab->start = optab;
2265 while (1)
2267 ++optab;
2268 if (optab->name == NULL
2269 || strcmp (optab->name, (optab - 1)->name) != 0)
2271 /* different name --> ship out current template list;
2272 add to hash table; & begin anew. */
2273 core_optab->end = optab;
2274 hash_err = hash_insert (op_hash,
2275 (optab - 1)->name,
2276 (void *) core_optab);
2277 if (hash_err)
2279 as_fatal (_("Internal Error: Can't hash %s: %s"),
2280 (optab - 1)->name,
2281 hash_err);
2283 if (optab->name == NULL)
2284 break;
2285 core_optab = (templates *) xmalloc (sizeof (templates));
2286 core_optab->start = optab;
2291 /* Initialize reg_hash hash table. */
2292 reg_hash = hash_new ();
2294 const reg_entry *regtab;
2295 unsigned int regtab_size = i386_regtab_size;
2297 for (regtab = i386_regtab; regtab_size--; regtab++)
2299 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2300 if (hash_err)
2301 as_fatal (_("Internal Error: Can't hash %s: %s"),
2302 regtab->reg_name,
2303 hash_err);
2307 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2309 int c;
2310 char *p;
2312 for (c = 0; c < 256; c++)
2314 if (ISDIGIT (c))
2316 digit_chars[c] = c;
2317 mnemonic_chars[c] = c;
2318 register_chars[c] = c;
2319 operand_chars[c] = c;
2321 else if (ISLOWER (c))
2323 mnemonic_chars[c] = c;
2324 register_chars[c] = c;
2325 operand_chars[c] = c;
2327 else if (ISUPPER (c))
2329 mnemonic_chars[c] = TOLOWER (c);
2330 register_chars[c] = mnemonic_chars[c];
2331 operand_chars[c] = c;
2334 if (ISALPHA (c) || ISDIGIT (c))
2335 identifier_chars[c] = c;
2336 else if (c >= 128)
2338 identifier_chars[c] = c;
2339 operand_chars[c] = c;
2343 #ifdef LEX_AT
2344 identifier_chars['@'] = '@';
2345 #endif
2346 #ifdef LEX_QM
2347 identifier_chars['?'] = '?';
2348 operand_chars['?'] = '?';
2349 #endif
2350 digit_chars['-'] = '-';
2351 mnemonic_chars['_'] = '_';
2352 mnemonic_chars['-'] = '-';
2353 mnemonic_chars['.'] = '.';
2354 identifier_chars['_'] = '_';
2355 identifier_chars['.'] = '.';
2357 for (p = operand_special_chars; *p != '\0'; p++)
2358 operand_chars[(unsigned char) *p] = *p;
2361 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2362 if (IS_ELF)
2364 record_alignment (text_section, 2);
2365 record_alignment (data_section, 2);
2366 record_alignment (bss_section, 2);
2368 #endif
2370 if (flag_code == CODE_64BIT)
2372 x86_dwarf2_return_column = 16;
2373 x86_cie_data_alignment = -8;
2375 else
2377 x86_dwarf2_return_column = 8;
2378 x86_cie_data_alignment = -4;
2382 void
2383 i386_print_statistics (FILE *file)
2385 hash_print_statistics (file, "i386 opcode", op_hash);
2386 hash_print_statistics (file, "i386 register", reg_hash);
2389 #ifdef DEBUG386
2391 /* Debugging routines for md_assemble. */
2392 static void pte (insn_template *);
2393 static void pt (i386_operand_type);
2394 static void pe (expressionS *);
2395 static void ps (symbolS *);
2397 static void
2398 pi (char *line, i386_insn *x)
2400 unsigned int j;
2402 fprintf (stdout, "%s: template ", line);
2403 pte (&x->tm);
2404 fprintf (stdout, " address: base %s index %s scale %x\n",
2405 x->base_reg ? x->base_reg->reg_name : "none",
2406 x->index_reg ? x->index_reg->reg_name : "none",
2407 x->log2_scale_factor);
2408 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2409 x->rm.mode, x->rm.reg, x->rm.regmem);
2410 fprintf (stdout, " sib: base %x index %x scale %x\n",
2411 x->sib.base, x->sib.index, x->sib.scale);
2412 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2413 (x->rex & REX_W) != 0,
2414 (x->rex & REX_R) != 0,
2415 (x->rex & REX_X) != 0,
2416 (x->rex & REX_B) != 0);
2417 for (j = 0; j < x->operands; j++)
2419 fprintf (stdout, " #%d: ", j + 1);
2420 pt (x->types[j]);
2421 fprintf (stdout, "\n");
2422 if (x->types[j].bitfield.reg8
2423 || x->types[j].bitfield.reg16
2424 || x->types[j].bitfield.reg32
2425 || x->types[j].bitfield.reg64
2426 || x->types[j].bitfield.regmmx
2427 || x->types[j].bitfield.regxmm
2428 || x->types[j].bitfield.regymm
2429 || x->types[j].bitfield.sreg2
2430 || x->types[j].bitfield.sreg3
2431 || x->types[j].bitfield.control
2432 || x->types[j].bitfield.debug
2433 || x->types[j].bitfield.test)
2434 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2435 if (operand_type_check (x->types[j], imm))
2436 pe (x->op[j].imms);
2437 if (operand_type_check (x->types[j], disp))
2438 pe (x->op[j].disps);
2442 static void
2443 pte (insn_template *t)
2445 unsigned int j;
2446 fprintf (stdout, " %d operands ", t->operands);
2447 fprintf (stdout, "opcode %x ", t->base_opcode);
2448 if (t->extension_opcode != None)
2449 fprintf (stdout, "ext %x ", t->extension_opcode);
2450 if (t->opcode_modifier.d)
2451 fprintf (stdout, "D");
2452 if (t->opcode_modifier.w)
2453 fprintf (stdout, "W");
2454 fprintf (stdout, "\n");
2455 for (j = 0; j < t->operands; j++)
2457 fprintf (stdout, " #%d type ", j + 1);
2458 pt (t->operand_types[j]);
2459 fprintf (stdout, "\n");
2463 static void
2464 pe (expressionS *e)
2466 fprintf (stdout, " operation %d\n", e->X_op);
2467 fprintf (stdout, " add_number %ld (%lx)\n",
2468 (long) e->X_add_number, (long) e->X_add_number);
2469 if (e->X_add_symbol)
2471 fprintf (stdout, " add_symbol ");
2472 ps (e->X_add_symbol);
2473 fprintf (stdout, "\n");
2475 if (e->X_op_symbol)
2477 fprintf (stdout, " op_symbol ");
2478 ps (e->X_op_symbol);
2479 fprintf (stdout, "\n");
2483 static void
2484 ps (symbolS *s)
2486 fprintf (stdout, "%s type %s%s",
2487 S_GET_NAME (s),
2488 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2489 segment_name (S_GET_SEGMENT (s)));
2492 static struct type_name
2494 i386_operand_type mask;
2495 const char *name;
2497 const type_names[] =
2499 { OPERAND_TYPE_REG8, "r8" },
2500 { OPERAND_TYPE_REG16, "r16" },
2501 { OPERAND_TYPE_REG32, "r32" },
2502 { OPERAND_TYPE_REG64, "r64" },
2503 { OPERAND_TYPE_IMM8, "i8" },
2504 { OPERAND_TYPE_IMM8, "i8s" },
2505 { OPERAND_TYPE_IMM16, "i16" },
2506 { OPERAND_TYPE_IMM32, "i32" },
2507 { OPERAND_TYPE_IMM32S, "i32s" },
2508 { OPERAND_TYPE_IMM64, "i64" },
2509 { OPERAND_TYPE_IMM1, "i1" },
2510 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2511 { OPERAND_TYPE_DISP8, "d8" },
2512 { OPERAND_TYPE_DISP16, "d16" },
2513 { OPERAND_TYPE_DISP32, "d32" },
2514 { OPERAND_TYPE_DISP32S, "d32s" },
2515 { OPERAND_TYPE_DISP64, "d64" },
2516 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2517 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2518 { OPERAND_TYPE_CONTROL, "control reg" },
2519 { OPERAND_TYPE_TEST, "test reg" },
2520 { OPERAND_TYPE_DEBUG, "debug reg" },
2521 { OPERAND_TYPE_FLOATREG, "FReg" },
2522 { OPERAND_TYPE_FLOATACC, "FAcc" },
2523 { OPERAND_TYPE_SREG2, "SReg2" },
2524 { OPERAND_TYPE_SREG3, "SReg3" },
2525 { OPERAND_TYPE_ACC, "Acc" },
2526 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2527 { OPERAND_TYPE_REGMMX, "rMMX" },
2528 { OPERAND_TYPE_REGXMM, "rXMM" },
2529 { OPERAND_TYPE_REGYMM, "rYMM" },
2530 { OPERAND_TYPE_ESSEG, "es" },
2533 static void
2534 pt (i386_operand_type t)
2536 unsigned int j;
2537 i386_operand_type a;
2539 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2541 a = operand_type_and (t, type_names[j].mask);
2542 if (!operand_type_all_zero (&a))
2543 fprintf (stdout, "%s, ", type_names[j].name);
2545 fflush (stdout);
2548 #endif /* DEBUG386 */
2550 static bfd_reloc_code_real_type
2551 reloc (unsigned int size,
2552 int pcrel,
2553 int sign,
2554 bfd_reloc_code_real_type other)
2556 if (other != NO_RELOC)
2558 reloc_howto_type *rel;
2560 if (size == 8)
2561 switch (other)
2563 case BFD_RELOC_X86_64_GOT32:
2564 return BFD_RELOC_X86_64_GOT64;
2565 break;
2566 case BFD_RELOC_X86_64_PLTOFF64:
2567 return BFD_RELOC_X86_64_PLTOFF64;
2568 break;
2569 case BFD_RELOC_X86_64_GOTPC32:
2570 other = BFD_RELOC_X86_64_GOTPC64;
2571 break;
2572 case BFD_RELOC_X86_64_GOTPCREL:
2573 other = BFD_RELOC_X86_64_GOTPCREL64;
2574 break;
2575 case BFD_RELOC_X86_64_TPOFF32:
2576 other = BFD_RELOC_X86_64_TPOFF64;
2577 break;
2578 case BFD_RELOC_X86_64_DTPOFF32:
2579 other = BFD_RELOC_X86_64_DTPOFF64;
2580 break;
2581 default:
2582 break;
2585 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2586 if (size == 4 && flag_code != CODE_64BIT)
2587 sign = -1;
2589 rel = bfd_reloc_type_lookup (stdoutput, other);
2590 if (!rel)
2591 as_bad (_("unknown relocation (%u)"), other);
2592 else if (size != bfd_get_reloc_size (rel))
2593 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2594 bfd_get_reloc_size (rel),
2595 size);
2596 else if (pcrel && !rel->pc_relative)
2597 as_bad (_("non-pc-relative relocation for pc-relative field"));
2598 else if ((rel->complain_on_overflow == complain_overflow_signed
2599 && !sign)
2600 || (rel->complain_on_overflow == complain_overflow_unsigned
2601 && sign > 0))
2602 as_bad (_("relocated field and relocation type differ in signedness"));
2603 else
2604 return other;
2605 return NO_RELOC;
2608 if (pcrel)
2610 if (!sign)
2611 as_bad (_("there are no unsigned pc-relative relocations"));
2612 switch (size)
2614 case 1: return BFD_RELOC_8_PCREL;
2615 case 2: return BFD_RELOC_16_PCREL;
2616 case 4: return BFD_RELOC_32_PCREL;
2617 case 8: return BFD_RELOC_64_PCREL;
2619 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2621 else
2623 if (sign > 0)
2624 switch (size)
2626 case 4: return BFD_RELOC_X86_64_32S;
2628 else
2629 switch (size)
2631 case 1: return BFD_RELOC_8;
2632 case 2: return BFD_RELOC_16;
2633 case 4: return BFD_RELOC_32;
2634 case 8: return BFD_RELOC_64;
2636 as_bad (_("cannot do %s %u byte relocation"),
2637 sign > 0 ? "signed" : "unsigned", size);
2640 return NO_RELOC;
2643 /* Here we decide which fixups can be adjusted to make them relative to
2644 the beginning of the section instead of the symbol. Basically we need
2645 to make sure that the dynamic relocations are done correctly, so in
2646 some cases we force the original symbol to be used. */
2649 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2651 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2652 if (!IS_ELF)
2653 return 1;
2655 /* Don't adjust pc-relative references to merge sections in 64-bit
2656 mode. */
2657 if (use_rela_relocations
2658 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2659 && fixP->fx_pcrel)
2660 return 0;
2662 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2663 and changed later by validate_fix. */
2664 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2665 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2666 return 0;
2668 /* adjust_reloc_syms doesn't know about the GOT. */
2669 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2670 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2671 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2672 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2673 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2674 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2675 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2676 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2677 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2678 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2679 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2680 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2681 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2682 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2683 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2684 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2685 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2686 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2687 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2688 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2689 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2690 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2691 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2692 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2693 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2694 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2695 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2696 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2697 return 0;
2698 #endif
2699 return 1;
2702 static int
2703 intel_float_operand (const char *mnemonic)
2705 /* Note that the value returned is meaningful only for opcodes with (memory)
2706 operands, hence the code here is free to improperly handle opcodes that
2707 have no operands (for better performance and smaller code). */
2709 if (mnemonic[0] != 'f')
2710 return 0; /* non-math */
2712 switch (mnemonic[1])
2714 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2715 the fs segment override prefix not currently handled because no
2716 call path can make opcodes without operands get here */
2717 case 'i':
2718 return 2 /* integer op */;
2719 case 'l':
2720 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2721 return 3; /* fldcw/fldenv */
2722 break;
2723 case 'n':
2724 if (mnemonic[2] != 'o' /* fnop */)
2725 return 3; /* non-waiting control op */
2726 break;
2727 case 'r':
2728 if (mnemonic[2] == 's')
2729 return 3; /* frstor/frstpm */
2730 break;
2731 case 's':
2732 if (mnemonic[2] == 'a')
2733 return 3; /* fsave */
2734 if (mnemonic[2] == 't')
2736 switch (mnemonic[3])
2738 case 'c': /* fstcw */
2739 case 'd': /* fstdw */
2740 case 'e': /* fstenv */
2741 case 's': /* fsts[gw] */
2742 return 3;
2745 break;
2746 case 'x':
2747 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2748 return 0; /* fxsave/fxrstor are not really math ops */
2749 break;
2752 return 1;
2755 /* Build the VEX prefix. */
2757 static void
2758 build_vex_prefix (const insn_template *t)
2760 unsigned int register_specifier;
2761 unsigned int implied_prefix;
2762 unsigned int vector_length;
2764 /* Check register specifier. */
2765 if (i.vex.register_specifier)
2767 register_specifier = i.vex.register_specifier->reg_num;
2768 if ((i.vex.register_specifier->reg_flags & RegRex))
2769 register_specifier += 8;
2770 register_specifier = ~register_specifier & 0xf;
2772 else
2773 register_specifier = 0xf;
2775 /* Use 2-byte VEX prefix by swappping destination and source
2776 operand. */
2777 if (!i.swap_operand
2778 && i.operands == i.reg_operands
2779 && i.tm.opcode_modifier.vexopcode == VEX0F
2780 && i.tm.opcode_modifier.s
2781 && i.rex == REX_B)
2783 unsigned int xchg = i.operands - 1;
2784 union i386_op temp_op;
2785 i386_operand_type temp_type;
2787 temp_type = i.types[xchg];
2788 i.types[xchg] = i.types[0];
2789 i.types[0] = temp_type;
2790 temp_op = i.op[xchg];
2791 i.op[xchg] = i.op[0];
2792 i.op[0] = temp_op;
2794 gas_assert (i.rm.mode == 3);
2796 i.rex = REX_R;
2797 xchg = i.rm.regmem;
2798 i.rm.regmem = i.rm.reg;
2799 i.rm.reg = xchg;
2801 /* Use the next insn. */
2802 i.tm = t[1];
2805 if (i.tm.opcode_modifier.vex == VEXScalar)
2806 vector_length = avxscalar;
2807 else
2808 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2810 switch ((i.tm.base_opcode >> 8) & 0xff)
2812 case 0:
2813 implied_prefix = 0;
2814 break;
2815 case DATA_PREFIX_OPCODE:
2816 implied_prefix = 1;
2817 break;
2818 case REPE_PREFIX_OPCODE:
2819 implied_prefix = 2;
2820 break;
2821 case REPNE_PREFIX_OPCODE:
2822 implied_prefix = 3;
2823 break;
2824 default:
2825 abort ();
2828 /* Use 2-byte VEX prefix if possible. */
2829 if (i.tm.opcode_modifier.vexopcode == VEX0F
2830 && i.tm.opcode_modifier.vexw != VEXW1
2831 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2833 /* 2-byte VEX prefix. */
2834 unsigned int r;
2836 i.vex.length = 2;
2837 i.vex.bytes[0] = 0xc5;
2839 /* Check the REX.R bit. */
2840 r = (i.rex & REX_R) ? 0 : 1;
2841 i.vex.bytes[1] = (r << 7
2842 | register_specifier << 3
2843 | vector_length << 2
2844 | implied_prefix);
2846 else
2848 /* 3-byte VEX prefix. */
2849 unsigned int m, w;
2851 i.vex.length = 3;
2853 switch (i.tm.opcode_modifier.vexopcode)
2855 case VEX0F:
2856 m = 0x1;
2857 i.vex.bytes[0] = 0xc4;
2858 break;
2859 case VEX0F38:
2860 m = 0x2;
2861 i.vex.bytes[0] = 0xc4;
2862 break;
2863 case VEX0F3A:
2864 m = 0x3;
2865 i.vex.bytes[0] = 0xc4;
2866 break;
2867 case XOP08:
2868 m = 0x8;
2869 i.vex.bytes[0] = 0x8f;
2870 break;
2871 case XOP09:
2872 m = 0x9;
2873 i.vex.bytes[0] = 0x8f;
2874 break;
2875 case XOP0A:
2876 m = 0xa;
2877 i.vex.bytes[0] = 0x8f;
2878 break;
2879 default:
2880 abort ();
2883 /* The high 3 bits of the second VEX byte are 1's compliment
2884 of RXB bits from REX. */
2885 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2887 /* Check the REX.W bit. */
2888 w = (i.rex & REX_W) ? 1 : 0;
2889 if (i.tm.opcode_modifier.vexw)
2891 if (w)
2892 abort ();
2894 if (i.tm.opcode_modifier.vexw == VEXW1)
2895 w = 1;
2898 i.vex.bytes[2] = (w << 7
2899 | register_specifier << 3
2900 | vector_length << 2
2901 | implied_prefix);
2905 static void
2906 process_immext (void)
2908 expressionS *exp;
2910 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2912 /* SSE3 Instructions have the fixed operands with an opcode
2913 suffix which is coded in the same place as an 8-bit immediate
2914 field would be. Here we check those operands and remove them
2915 afterwards. */
2916 unsigned int x;
2918 for (x = 0; x < i.operands; x++)
2919 if (i.op[x].regs->reg_num != x)
2920 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2921 register_prefix, i.op[x].regs->reg_name, x + 1,
2922 i.tm.name);
2924 i.operands = 0;
2927 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2928 which is coded in the same place as an 8-bit immediate field
2929 would be. Here we fake an 8-bit immediate operand from the
2930 opcode suffix stored in tm.extension_opcode.
2932 AVX instructions also use this encoding, for some of
2933 3 argument instructions. */
2935 gas_assert (i.imm_operands == 0
2936 && (i.operands <= 2
2937 || (i.tm.opcode_modifier.vex
2938 && i.operands <= 4)));
2940 exp = &im_expressions[i.imm_operands++];
2941 i.op[i.operands].imms = exp;
2942 i.types[i.operands] = imm8;
2943 i.operands++;
2944 exp->X_op = O_constant;
2945 exp->X_add_number = i.tm.extension_opcode;
2946 i.tm.extension_opcode = None;
2949 /* This is the guts of the machine-dependent assembler. LINE points to a
2950 machine dependent instruction. This function is supposed to emit
2951 the frags/bytes it assembles to. */
2953 void
2954 md_assemble (char *line)
2956 unsigned int j;
2957 char mnemonic[MAX_MNEM_SIZE];
2958 const insn_template *t;
2960 /* Initialize globals. */
2961 memset (&i, '\0', sizeof (i));
2962 for (j = 0; j < MAX_OPERANDS; j++)
2963 i.reloc[j] = NO_RELOC;
2964 memset (disp_expressions, '\0', sizeof (disp_expressions));
2965 memset (im_expressions, '\0', sizeof (im_expressions));
2966 save_stack_p = save_stack;
2968 /* First parse an instruction mnemonic & call i386_operand for the operands.
2969 We assume that the scrubber has arranged it so that line[0] is the valid
2970 start of a (possibly prefixed) mnemonic. */
2972 line = parse_insn (line, mnemonic);
2973 if (line == NULL)
2974 return;
2976 line = parse_operands (line, mnemonic);
2977 this_operand = -1;
2978 if (line == NULL)
2979 return;
2981 /* Now we've parsed the mnemonic into a set of templates, and have the
2982 operands at hand. */
2984 /* All intel opcodes have reversed operands except for "bound" and
2985 "enter". We also don't reverse intersegment "jmp" and "call"
2986 instructions with 2 immediate operands so that the immediate segment
2987 precedes the offset, as it does when in AT&T mode. */
2988 if (intel_syntax
2989 && i.operands > 1
2990 && (strcmp (mnemonic, "bound") != 0)
2991 && (strcmp (mnemonic, "invlpga") != 0)
2992 && !(operand_type_check (i.types[0], imm)
2993 && operand_type_check (i.types[1], imm)))
2994 swap_operands ();
2996 /* The order of the immediates should be reversed
2997 for 2 immediates extrq and insertq instructions */
2998 if (i.imm_operands == 2
2999 && (strcmp (mnemonic, "extrq") == 0
3000 || strcmp (mnemonic, "insertq") == 0))
3001 swap_2_operands (0, 1);
3003 if (i.imm_operands)
3004 optimize_imm ();
3006 /* Don't optimize displacement for movabs since it only takes 64bit
3007 displacement. */
3008 if (i.disp_operands
3009 && !i.disp32_encoding
3010 && (flag_code != CODE_64BIT
3011 || strcmp (mnemonic, "movabs") != 0))
3012 optimize_disp ();
3014 /* Next, we find a template that matches the given insn,
3015 making sure the overlap of the given operands types is consistent
3016 with the template operand types. */
3018 if (!(t = match_template ()))
3019 return;
3021 if (sse_check != sse_check_none
3022 && !i.tm.opcode_modifier.noavx
3023 && (i.tm.cpu_flags.bitfield.cpusse
3024 || i.tm.cpu_flags.bitfield.cpusse2
3025 || i.tm.cpu_flags.bitfield.cpusse3
3026 || i.tm.cpu_flags.bitfield.cpussse3
3027 || i.tm.cpu_flags.bitfield.cpusse4_1
3028 || i.tm.cpu_flags.bitfield.cpusse4_2))
3030 (sse_check == sse_check_warning
3031 ? as_warn
3032 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3035 /* Zap movzx and movsx suffix. The suffix has been set from
3036 "word ptr" or "byte ptr" on the source operand in Intel syntax
3037 or extracted from mnemonic in AT&T syntax. But we'll use
3038 the destination register to choose the suffix for encoding. */
3039 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3041 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3042 there is no suffix, the default will be byte extension. */
3043 if (i.reg_operands != 2
3044 && !i.suffix
3045 && intel_syntax)
3046 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3048 i.suffix = 0;
3051 if (i.tm.opcode_modifier.fwait)
3052 if (!add_prefix (FWAIT_OPCODE))
3053 return;
3055 /* Check for lock without a lockable instruction. Destination operand
3056 must be memory unless it is xchg (0x86). */
3057 if (i.prefix[LOCK_PREFIX]
3058 && (!i.tm.opcode_modifier.islockable
3059 || i.mem_operands == 0
3060 || (i.tm.base_opcode != 0x86
3061 && !operand_type_check (i.types[i.operands - 1], anymem))))
3063 as_bad (_("expecting lockable instruction after `lock'"));
3064 return;
3067 /* Check string instruction segment overrides. */
3068 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3070 if (!check_string ())
3071 return;
3072 i.disp_operands = 0;
3075 if (!process_suffix ())
3076 return;
3078 /* Update operand types. */
3079 for (j = 0; j < i.operands; j++)
3080 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3082 /* Make still unresolved immediate matches conform to size of immediate
3083 given in i.suffix. */
3084 if (!finalize_imm ())
3085 return;
3087 if (i.types[0].bitfield.imm1)
3088 i.imm_operands = 0; /* kludge for shift insns. */
3090 /* We only need to check those implicit registers for instructions
3091 with 3 operands or less. */
3092 if (i.operands <= 3)
3093 for (j = 0; j < i.operands; j++)
3094 if (i.types[j].bitfield.inoutportreg
3095 || i.types[j].bitfield.shiftcount
3096 || i.types[j].bitfield.acc
3097 || i.types[j].bitfield.floatacc)
3098 i.reg_operands--;
3100 /* ImmExt should be processed after SSE2AVX. */
3101 if (!i.tm.opcode_modifier.sse2avx
3102 && i.tm.opcode_modifier.immext)
3103 process_immext ();
3105 /* For insns with operands there are more diddles to do to the opcode. */
3106 if (i.operands)
3108 if (!process_operands ())
3109 return;
3111 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3113 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3114 as_warn (_("translating to `%sp'"), i.tm.name);
3117 if (i.tm.opcode_modifier.vex)
3118 build_vex_prefix (t);
3120 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3121 instructions may define INT_OPCODE as well, so avoid this corner
3122 case for those instructions that use MODRM. */
3123 if (i.tm.base_opcode == INT_OPCODE
3124 && !i.tm.opcode_modifier.modrm
3125 && i.op[0].imms->X_add_number == 3)
3127 i.tm.base_opcode = INT3_OPCODE;
3128 i.imm_operands = 0;
3131 if ((i.tm.opcode_modifier.jump
3132 || i.tm.opcode_modifier.jumpbyte
3133 || i.tm.opcode_modifier.jumpdword)
3134 && i.op[0].disps->X_op == O_constant)
3136 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3137 the absolute address given by the constant. Since ix86 jumps and
3138 calls are pc relative, we need to generate a reloc. */
3139 i.op[0].disps->X_add_symbol = &abs_symbol;
3140 i.op[0].disps->X_op = O_symbol;
3143 if (i.tm.opcode_modifier.rex64)
3144 i.rex |= REX_W;
3146 /* For 8 bit registers we need an empty rex prefix. Also if the
3147 instruction already has a prefix, we need to convert old
3148 registers to new ones. */
3150 if ((i.types[0].bitfield.reg8
3151 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3152 || (i.types[1].bitfield.reg8
3153 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3154 || ((i.types[0].bitfield.reg8
3155 || i.types[1].bitfield.reg8)
3156 && i.rex != 0))
3158 int x;
3160 i.rex |= REX_OPCODE;
3161 for (x = 0; x < 2; x++)
3163 /* Look for 8 bit operand that uses old registers. */
3164 if (i.types[x].bitfield.reg8
3165 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3167 /* In case it is "hi" register, give up. */
3168 if (i.op[x].regs->reg_num > 3)
3169 as_bad (_("can't encode register '%s%s' in an "
3170 "instruction requiring REX prefix."),
3171 register_prefix, i.op[x].regs->reg_name);
3173 /* Otherwise it is equivalent to the extended register.
3174 Since the encoding doesn't change this is merely
3175 cosmetic cleanup for debug output. */
3177 i.op[x].regs = i.op[x].regs + 8;
3182 if (i.rex != 0)
3183 add_prefix (REX_OPCODE | i.rex);
3185 /* We are ready to output the insn. */
3186 output_insn ();
3189 static char *
3190 parse_insn (char *line, char *mnemonic)
3192 char *l = line;
3193 char *token_start = l;
3194 char *mnem_p;
3195 int supported;
3196 const insn_template *t;
3197 char *dot_p = NULL;
3199 /* Non-zero if we found a prefix only acceptable with string insns. */
3200 const char *expecting_string_instruction = NULL;
3202 while (1)
3204 mnem_p = mnemonic;
3205 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3207 if (*mnem_p == '.')
3208 dot_p = mnem_p;
3209 mnem_p++;
3210 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3212 as_bad (_("no such instruction: `%s'"), token_start);
3213 return NULL;
3215 l++;
3217 if (!is_space_char (*l)
3218 && *l != END_OF_INSN
3219 && (intel_syntax
3220 || (*l != PREFIX_SEPARATOR
3221 && *l != ',')))
3223 as_bad (_("invalid character %s in mnemonic"),
3224 output_invalid (*l));
3225 return NULL;
3227 if (token_start == l)
3229 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3230 as_bad (_("expecting prefix; got nothing"));
3231 else
3232 as_bad (_("expecting mnemonic; got nothing"));
3233 return NULL;
3236 /* Look up instruction (or prefix) via hash table. */
3237 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3239 if (*l != END_OF_INSN
3240 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3241 && current_templates
3242 && current_templates->start->opcode_modifier.isprefix)
3244 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3246 as_bad ((flag_code != CODE_64BIT
3247 ? _("`%s' is only supported in 64-bit mode")
3248 : _("`%s' is not supported in 64-bit mode")),
3249 current_templates->start->name);
3250 return NULL;
3252 /* If we are in 16-bit mode, do not allow addr16 or data16.
3253 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3254 if ((current_templates->start->opcode_modifier.size16
3255 || current_templates->start->opcode_modifier.size32)
3256 && flag_code != CODE_64BIT
3257 && (current_templates->start->opcode_modifier.size32
3258 ^ (flag_code == CODE_16BIT)))
3260 as_bad (_("redundant %s prefix"),
3261 current_templates->start->name);
3262 return NULL;
3264 /* Add prefix, checking for repeated prefixes. */
3265 switch (add_prefix (current_templates->start->base_opcode))
3267 case PREFIX_EXIST:
3268 return NULL;
3269 case PREFIX_REP:
3270 expecting_string_instruction = current_templates->start->name;
3271 break;
3272 default:
3273 break;
3275 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3276 token_start = ++l;
3278 else
3279 break;
3282 if (!current_templates)
3284 /* Check if we should swap operand or force 32bit displacement in
3285 encoding. */
3286 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3287 i.swap_operand = 1;
3288 else if (mnem_p - 4 == dot_p
3289 && dot_p[1] == 'd'
3290 && dot_p[2] == '3'
3291 && dot_p[3] == '2')
3292 i.disp32_encoding = 1;
3293 else
3294 goto check_suffix;
3295 mnem_p = dot_p;
3296 *dot_p = '\0';
3297 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3300 if (!current_templates)
3302 check_suffix:
3303 /* See if we can get a match by trimming off a suffix. */
3304 switch (mnem_p[-1])
3306 case WORD_MNEM_SUFFIX:
3307 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3308 i.suffix = SHORT_MNEM_SUFFIX;
3309 else
3310 case BYTE_MNEM_SUFFIX:
3311 case QWORD_MNEM_SUFFIX:
3312 i.suffix = mnem_p[-1];
3313 mnem_p[-1] = '\0';
3314 current_templates = (const templates *) hash_find (op_hash,
3315 mnemonic);
3316 break;
3317 case SHORT_MNEM_SUFFIX:
3318 case LONG_MNEM_SUFFIX:
3319 if (!intel_syntax)
3321 i.suffix = mnem_p[-1];
3322 mnem_p[-1] = '\0';
3323 current_templates = (const templates *) hash_find (op_hash,
3324 mnemonic);
3326 break;
3328 /* Intel Syntax. */
3329 case 'd':
3330 if (intel_syntax)
3332 if (intel_float_operand (mnemonic) == 1)
3333 i.suffix = SHORT_MNEM_SUFFIX;
3334 else
3335 i.suffix = LONG_MNEM_SUFFIX;
3336 mnem_p[-1] = '\0';
3337 current_templates = (const templates *) hash_find (op_hash,
3338 mnemonic);
3340 break;
3342 if (!current_templates)
3344 as_bad (_("no such instruction: `%s'"), token_start);
3345 return NULL;
3349 if (current_templates->start->opcode_modifier.jump
3350 || current_templates->start->opcode_modifier.jumpbyte)
3352 /* Check for a branch hint. We allow ",pt" and ",pn" for
3353 predict taken and predict not taken respectively.
3354 I'm not sure that branch hints actually do anything on loop
3355 and jcxz insns (JumpByte) for current Pentium4 chips. They
3356 may work in the future and it doesn't hurt to accept them
3357 now. */
3358 if (l[0] == ',' && l[1] == 'p')
3360 if (l[2] == 't')
3362 if (!add_prefix (DS_PREFIX_OPCODE))
3363 return NULL;
3364 l += 3;
3366 else if (l[2] == 'n')
3368 if (!add_prefix (CS_PREFIX_OPCODE))
3369 return NULL;
3370 l += 3;
3374 /* Any other comma loses. */
3375 if (*l == ',')
3377 as_bad (_("invalid character %s in mnemonic"),
3378 output_invalid (*l));
3379 return NULL;
3382 /* Check if instruction is supported on specified architecture. */
3383 supported = 0;
3384 for (t = current_templates->start; t < current_templates->end; ++t)
3386 supported |= cpu_flags_match (t);
3387 if (supported == CPU_FLAGS_PERFECT_MATCH)
3388 goto skip;
3391 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3393 as_bad (flag_code == CODE_64BIT
3394 ? _("`%s' is not supported in 64-bit mode")
3395 : _("`%s' is only supported in 64-bit mode"),
3396 current_templates->start->name);
3397 return NULL;
3399 if (supported != CPU_FLAGS_PERFECT_MATCH)
3401 as_bad (_("`%s' is not supported on `%s%s'"),
3402 current_templates->start->name,
3403 cpu_arch_name ? cpu_arch_name : default_arch,
3404 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3405 return NULL;
3408 skip:
3409 if (!cpu_arch_flags.bitfield.cpui386
3410 && (flag_code != CODE_16BIT))
3412 as_warn (_("use .code16 to ensure correct addressing mode"));
3415 /* Check for rep/repne without a string instruction. */
3416 if (expecting_string_instruction)
3418 static templates override;
3420 for (t = current_templates->start; t < current_templates->end; ++t)
3421 if (t->opcode_modifier.isstring)
3422 break;
3423 if (t >= current_templates->end)
3425 as_bad (_("expecting string instruction after `%s'"),
3426 expecting_string_instruction);
3427 return NULL;
3429 for (override.start = t; t < current_templates->end; ++t)
3430 if (!t->opcode_modifier.isstring)
3431 break;
3432 override.end = t;
3433 current_templates = &override;
3436 return l;
3439 static char *
3440 parse_operands (char *l, const char *mnemonic)
3442 char *token_start;
3444 /* 1 if operand is pending after ','. */
3445 unsigned int expecting_operand = 0;
3447 /* Non-zero if operand parens not balanced. */
3448 unsigned int paren_not_balanced;
3450 while (*l != END_OF_INSN)
3452 /* Skip optional white space before operand. */
3453 if (is_space_char (*l))
3454 ++l;
3455 if (!is_operand_char (*l) && *l != END_OF_INSN)
3457 as_bad (_("invalid character %s before operand %d"),
3458 output_invalid (*l),
3459 i.operands + 1);
3460 return NULL;
3462 token_start = l; /* after white space */
3463 paren_not_balanced = 0;
3464 while (paren_not_balanced || *l != ',')
3466 if (*l == END_OF_INSN)
3468 if (paren_not_balanced)
3470 if (!intel_syntax)
3471 as_bad (_("unbalanced parenthesis in operand %d."),
3472 i.operands + 1);
3473 else
3474 as_bad (_("unbalanced brackets in operand %d."),
3475 i.operands + 1);
3476 return NULL;
3478 else
3479 break; /* we are done */
3481 else if (!is_operand_char (*l) && !is_space_char (*l))
3483 as_bad (_("invalid character %s in operand %d"),
3484 output_invalid (*l),
3485 i.operands + 1);
3486 return NULL;
3488 if (!intel_syntax)
3490 if (*l == '(')
3491 ++paren_not_balanced;
3492 if (*l == ')')
3493 --paren_not_balanced;
3495 else
3497 if (*l == '[')
3498 ++paren_not_balanced;
3499 if (*l == ']')
3500 --paren_not_balanced;
3502 l++;
3504 if (l != token_start)
3505 { /* Yes, we've read in another operand. */
3506 unsigned int operand_ok;
3507 this_operand = i.operands++;
3508 i.types[this_operand].bitfield.unspecified = 1;
3509 if (i.operands > MAX_OPERANDS)
3511 as_bad (_("spurious operands; (%d operands/instruction max)"),
3512 MAX_OPERANDS);
3513 return NULL;
3515 /* Now parse operand adding info to 'i' as we go along. */
3516 END_STRING_AND_SAVE (l);
3518 if (intel_syntax)
3519 operand_ok =
3520 i386_intel_operand (token_start,
3521 intel_float_operand (mnemonic));
3522 else
3523 operand_ok = i386_att_operand (token_start);
3525 RESTORE_END_STRING (l);
3526 if (!operand_ok)
3527 return NULL;
3529 else
3531 if (expecting_operand)
3533 expecting_operand_after_comma:
3534 as_bad (_("expecting operand after ','; got nothing"));
3535 return NULL;
3537 if (*l == ',')
3539 as_bad (_("expecting operand before ','; got nothing"));
3540 return NULL;
3544 /* Now *l must be either ',' or END_OF_INSN. */
3545 if (*l == ',')
3547 if (*++l == END_OF_INSN)
3549 /* Just skip it, if it's \n complain. */
3550 goto expecting_operand_after_comma;
3552 expecting_operand = 1;
3555 return l;
3558 static void
3559 swap_2_operands (int xchg1, int xchg2)
3561 union i386_op temp_op;
3562 i386_operand_type temp_type;
3563 enum bfd_reloc_code_real temp_reloc;
3565 temp_type = i.types[xchg2];
3566 i.types[xchg2] = i.types[xchg1];
3567 i.types[xchg1] = temp_type;
3568 temp_op = i.op[xchg2];
3569 i.op[xchg2] = i.op[xchg1];
3570 i.op[xchg1] = temp_op;
3571 temp_reloc = i.reloc[xchg2];
3572 i.reloc[xchg2] = i.reloc[xchg1];
3573 i.reloc[xchg1] = temp_reloc;
3576 static void
3577 swap_operands (void)
3579 switch (i.operands)
3581 case 5:
3582 case 4:
3583 swap_2_operands (1, i.operands - 2);
3584 case 3:
3585 case 2:
3586 swap_2_operands (0, i.operands - 1);
3587 break;
3588 default:
3589 abort ();
3592 if (i.mem_operands == 2)
3594 const seg_entry *temp_seg;
3595 temp_seg = i.seg[0];
3596 i.seg[0] = i.seg[1];
3597 i.seg[1] = temp_seg;
3601 /* Try to ensure constant immediates are represented in the smallest
3602 opcode possible. */
3603 static void
3604 optimize_imm (void)
3606 char guess_suffix = 0;
3607 int op;
3609 if (i.suffix)
3610 guess_suffix = i.suffix;
3611 else if (i.reg_operands)
3613 /* Figure out a suffix from the last register operand specified.
3614 We can't do this properly yet, ie. excluding InOutPortReg,
3615 but the following works for instructions with immediates.
3616 In any case, we can't set i.suffix yet. */
3617 for (op = i.operands; --op >= 0;)
3618 if (i.types[op].bitfield.reg8)
3620 guess_suffix = BYTE_MNEM_SUFFIX;
3621 break;
3623 else if (i.types[op].bitfield.reg16)
3625 guess_suffix = WORD_MNEM_SUFFIX;
3626 break;
3628 else if (i.types[op].bitfield.reg32)
3630 guess_suffix = LONG_MNEM_SUFFIX;
3631 break;
3633 else if (i.types[op].bitfield.reg64)
3635 guess_suffix = QWORD_MNEM_SUFFIX;
3636 break;
3639 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3640 guess_suffix = WORD_MNEM_SUFFIX;
3642 for (op = i.operands; --op >= 0;)
3643 if (operand_type_check (i.types[op], imm))
3645 switch (i.op[op].imms->X_op)
3647 case O_constant:
3648 /* If a suffix is given, this operand may be shortened. */
3649 switch (guess_suffix)
3651 case LONG_MNEM_SUFFIX:
3652 i.types[op].bitfield.imm32 = 1;
3653 i.types[op].bitfield.imm64 = 1;
3654 break;
3655 case WORD_MNEM_SUFFIX:
3656 i.types[op].bitfield.imm16 = 1;
3657 i.types[op].bitfield.imm32 = 1;
3658 i.types[op].bitfield.imm32s = 1;
3659 i.types[op].bitfield.imm64 = 1;
3660 break;
3661 case BYTE_MNEM_SUFFIX:
3662 i.types[op].bitfield.imm8 = 1;
3663 i.types[op].bitfield.imm8s = 1;
3664 i.types[op].bitfield.imm16 = 1;
3665 i.types[op].bitfield.imm32 = 1;
3666 i.types[op].bitfield.imm32s = 1;
3667 i.types[op].bitfield.imm64 = 1;
3668 break;
3671 /* If this operand is at most 16 bits, convert it
3672 to a signed 16 bit number before trying to see
3673 whether it will fit in an even smaller size.
3674 This allows a 16-bit operand such as $0xffe0 to
3675 be recognised as within Imm8S range. */
3676 if ((i.types[op].bitfield.imm16)
3677 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3679 i.op[op].imms->X_add_number =
3680 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3682 if ((i.types[op].bitfield.imm32)
3683 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3684 == 0))
3686 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3687 ^ ((offsetT) 1 << 31))
3688 - ((offsetT) 1 << 31));
3690 i.types[op]
3691 = operand_type_or (i.types[op],
3692 smallest_imm_type (i.op[op].imms->X_add_number));
3694 /* We must avoid matching of Imm32 templates when 64bit
3695 only immediate is available. */
3696 if (guess_suffix == QWORD_MNEM_SUFFIX)
3697 i.types[op].bitfield.imm32 = 0;
3698 break;
3700 case O_absent:
3701 case O_register:
3702 abort ();
3704 /* Symbols and expressions. */
3705 default:
3706 /* Convert symbolic operand to proper sizes for matching, but don't
3707 prevent matching a set of insns that only supports sizes other
3708 than those matching the insn suffix. */
3710 i386_operand_type mask, allowed;
3711 const insn_template *t;
3713 operand_type_set (&mask, 0);
3714 operand_type_set (&allowed, 0);
3716 for (t = current_templates->start;
3717 t < current_templates->end;
3718 ++t)
3719 allowed = operand_type_or (allowed,
3720 t->operand_types[op]);
3721 switch (guess_suffix)
3723 case QWORD_MNEM_SUFFIX:
3724 mask.bitfield.imm64 = 1;
3725 mask.bitfield.imm32s = 1;
3726 break;
3727 case LONG_MNEM_SUFFIX:
3728 mask.bitfield.imm32 = 1;
3729 break;
3730 case WORD_MNEM_SUFFIX:
3731 mask.bitfield.imm16 = 1;
3732 break;
3733 case BYTE_MNEM_SUFFIX:
3734 mask.bitfield.imm8 = 1;
3735 break;
3736 default:
3737 break;
3739 allowed = operand_type_and (mask, allowed);
3740 if (!operand_type_all_zero (&allowed))
3741 i.types[op] = operand_type_and (i.types[op], mask);
3743 break;
3748 /* Try to use the smallest displacement type too. */
3749 static void
3750 optimize_disp (void)
3752 int op;
3754 for (op = i.operands; --op >= 0;)
3755 if (operand_type_check (i.types[op], disp))
3757 if (i.op[op].disps->X_op == O_constant)
3759 offsetT op_disp = i.op[op].disps->X_add_number;
3761 if (i.types[op].bitfield.disp16
3762 && (op_disp & ~(offsetT) 0xffff) == 0)
3764 /* If this operand is at most 16 bits, convert
3765 to a signed 16 bit number and don't use 64bit
3766 displacement. */
3767 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3768 i.types[op].bitfield.disp64 = 0;
3770 if (i.types[op].bitfield.disp32
3771 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3773 /* If this operand is at most 32 bits, convert
3774 to a signed 32 bit number and don't use 64bit
3775 displacement. */
3776 op_disp &= (((offsetT) 2 << 31) - 1);
3777 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3778 i.types[op].bitfield.disp64 = 0;
3780 if (!op_disp && i.types[op].bitfield.baseindex)
3782 i.types[op].bitfield.disp8 = 0;
3783 i.types[op].bitfield.disp16 = 0;
3784 i.types[op].bitfield.disp32 = 0;
3785 i.types[op].bitfield.disp32s = 0;
3786 i.types[op].bitfield.disp64 = 0;
3787 i.op[op].disps = 0;
3788 i.disp_operands--;
3790 else if (flag_code == CODE_64BIT)
3792 if (fits_in_signed_long (op_disp))
3794 i.types[op].bitfield.disp64 = 0;
3795 i.types[op].bitfield.disp32s = 1;
3797 if (i.prefix[ADDR_PREFIX]
3798 && fits_in_unsigned_long (op_disp))
3799 i.types[op].bitfield.disp32 = 1;
3801 if ((i.types[op].bitfield.disp32
3802 || i.types[op].bitfield.disp32s
3803 || i.types[op].bitfield.disp16)
3804 && fits_in_signed_byte (op_disp))
3805 i.types[op].bitfield.disp8 = 1;
3807 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3808 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3810 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3811 i.op[op].disps, 0, i.reloc[op]);
3812 i.types[op].bitfield.disp8 = 0;
3813 i.types[op].bitfield.disp16 = 0;
3814 i.types[op].bitfield.disp32 = 0;
3815 i.types[op].bitfield.disp32s = 0;
3816 i.types[op].bitfield.disp64 = 0;
3818 else
3819 /* We only support 64bit displacement on constants. */
3820 i.types[op].bitfield.disp64 = 0;
3824 /* Check if operands are valid for the instruction. Update VEX
3825 operand types. */
3827 static int
3828 VEX_check_operands (const insn_template *t)
3830 if (!t->opcode_modifier.vex)
3831 return 0;
3833 /* Only check VEX_Imm4, which must be the first operand. */
3834 if (t->operand_types[0].bitfield.vec_imm4)
3836 if (i.op[0].imms->X_op != O_constant
3837 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3839 i.error = bad_imm4;
3840 return 1;
3843 /* Turn off Imm8 so that update_imm won't complain. */
3844 i.types[0] = vec_imm4;
3847 return 0;
3850 static const insn_template *
3851 match_template (void)
3853 /* Points to template once we've found it. */
3854 const insn_template *t;
3855 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3856 i386_operand_type overlap4;
3857 unsigned int found_reverse_match;
3858 i386_opcode_modifier suffix_check;
3859 i386_operand_type operand_types [MAX_OPERANDS];
3860 int addr_prefix_disp;
3861 unsigned int j;
3862 unsigned int found_cpu_match;
3863 unsigned int check_register;
3865 #if MAX_OPERANDS != 5
3866 # error "MAX_OPERANDS must be 5."
3867 #endif
3869 found_reverse_match = 0;
3870 addr_prefix_disp = -1;
3872 memset (&suffix_check, 0, sizeof (suffix_check));
3873 if (i.suffix == BYTE_MNEM_SUFFIX)
3874 suffix_check.no_bsuf = 1;
3875 else if (i.suffix == WORD_MNEM_SUFFIX)
3876 suffix_check.no_wsuf = 1;
3877 else if (i.suffix == SHORT_MNEM_SUFFIX)
3878 suffix_check.no_ssuf = 1;
3879 else if (i.suffix == LONG_MNEM_SUFFIX)
3880 suffix_check.no_lsuf = 1;
3881 else if (i.suffix == QWORD_MNEM_SUFFIX)
3882 suffix_check.no_qsuf = 1;
3883 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3884 suffix_check.no_ldsuf = 1;
3886 /* Must have right number of operands. */
3887 i.error = number_of_operands_mismatch;
3889 for (t = current_templates->start; t < current_templates->end; t++)
3891 addr_prefix_disp = -1;
3893 if (i.operands != t->operands)
3894 continue;
3896 /* Check processor support. */
3897 i.error = unsupported;
3898 found_cpu_match = (cpu_flags_match (t)
3899 == CPU_FLAGS_PERFECT_MATCH);
3900 if (!found_cpu_match)
3901 continue;
3903 /* Check old gcc support. */
3904 i.error = old_gcc_only;
3905 if (!old_gcc && t->opcode_modifier.oldgcc)
3906 continue;
3908 /* Check AT&T mnemonic. */
3909 i.error = unsupported_with_intel_mnemonic;
3910 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3911 continue;
3913 /* Check AT&T/Intel syntax. */
3914 i.error = unsupported_syntax;
3915 if ((intel_syntax && t->opcode_modifier.attsyntax)
3916 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3917 continue;
3919 /* Check the suffix, except for some instructions in intel mode. */
3920 i.error = invalid_instruction_suffix;
3921 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3922 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3923 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3924 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3925 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3926 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3927 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3928 continue;
3930 if (!operand_size_match (t))
3931 continue;
3933 for (j = 0; j < MAX_OPERANDS; j++)
3934 operand_types[j] = t->operand_types[j];
3936 /* In general, don't allow 64-bit operands in 32-bit mode. */
3937 if (i.suffix == QWORD_MNEM_SUFFIX
3938 && flag_code != CODE_64BIT
3939 && (intel_syntax
3940 ? (!t->opcode_modifier.ignoresize
3941 && !intel_float_operand (t->name))
3942 : intel_float_operand (t->name) != 2)
3943 && ((!operand_types[0].bitfield.regmmx
3944 && !operand_types[0].bitfield.regxmm
3945 && !operand_types[0].bitfield.regymm)
3946 || (!operand_types[t->operands > 1].bitfield.regmmx
3947 && !!operand_types[t->operands > 1].bitfield.regxmm
3948 && !!operand_types[t->operands > 1].bitfield.regymm))
3949 && (t->base_opcode != 0x0fc7
3950 || t->extension_opcode != 1 /* cmpxchg8b */))
3951 continue;
3953 /* In general, don't allow 32-bit operands on pre-386. */
3954 else if (i.suffix == LONG_MNEM_SUFFIX
3955 && !cpu_arch_flags.bitfield.cpui386
3956 && (intel_syntax
3957 ? (!t->opcode_modifier.ignoresize
3958 && !intel_float_operand (t->name))
3959 : intel_float_operand (t->name) != 2)
3960 && ((!operand_types[0].bitfield.regmmx
3961 && !operand_types[0].bitfield.regxmm)
3962 || (!operand_types[t->operands > 1].bitfield.regmmx
3963 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3964 continue;
3966 /* Do not verify operands when there are none. */
3967 else
3969 if (!t->operands)
3970 /* We've found a match; break out of loop. */
3971 break;
3974 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3975 into Disp32/Disp16/Disp32 operand. */
3976 if (i.prefix[ADDR_PREFIX] != 0)
3978 /* There should be only one Disp operand. */
3979 switch (flag_code)
3981 case CODE_16BIT:
3982 for (j = 0; j < MAX_OPERANDS; j++)
3984 if (operand_types[j].bitfield.disp16)
3986 addr_prefix_disp = j;
3987 operand_types[j].bitfield.disp32 = 1;
3988 operand_types[j].bitfield.disp16 = 0;
3989 break;
3992 break;
3993 case CODE_32BIT:
3994 for (j = 0; j < MAX_OPERANDS; j++)
3996 if (operand_types[j].bitfield.disp32)
3998 addr_prefix_disp = j;
3999 operand_types[j].bitfield.disp32 = 0;
4000 operand_types[j].bitfield.disp16 = 1;
4001 break;
4004 break;
4005 case CODE_64BIT:
4006 for (j = 0; j < MAX_OPERANDS; j++)
4008 if (operand_types[j].bitfield.disp64)
4010 addr_prefix_disp = j;
4011 operand_types[j].bitfield.disp64 = 0;
4012 operand_types[j].bitfield.disp32 = 1;
4013 break;
4016 break;
4020 /* We check register size if needed. */
4021 check_register = t->opcode_modifier.checkregsize;
4022 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4023 switch (t->operands)
4025 case 1:
4026 if (!operand_type_match (overlap0, i.types[0]))
4027 continue;
4028 break;
4029 case 2:
4030 /* xchg %eax, %eax is a special case. It is an aliase for nop
4031 only in 32bit mode and we can use opcode 0x90. In 64bit
4032 mode, we can't use 0x90 for xchg %eax, %eax since it should
4033 zero-extend %eax to %rax. */
4034 if (flag_code == CODE_64BIT
4035 && t->base_opcode == 0x90
4036 && operand_type_equal (&i.types [0], &acc32)
4037 && operand_type_equal (&i.types [1], &acc32))
4038 continue;
4039 if (i.swap_operand)
4041 /* If we swap operand in encoding, we either match
4042 the next one or reverse direction of operands. */
4043 if (t->opcode_modifier.s)
4044 continue;
4045 else if (t->opcode_modifier.d)
4046 goto check_reverse;
4049 case 3:
4050 /* If we swap operand in encoding, we match the next one. */
4051 if (i.swap_operand && t->opcode_modifier.s)
4052 continue;
4053 case 4:
4054 case 5:
4055 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4056 if (!operand_type_match (overlap0, i.types[0])
4057 || !operand_type_match (overlap1, i.types[1])
4058 || (check_register
4059 && !operand_type_register_match (overlap0, i.types[0],
4060 operand_types[0],
4061 overlap1, i.types[1],
4062 operand_types[1])))
4064 /* Check if other direction is valid ... */
4065 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4066 continue;
4068 check_reverse:
4069 /* Try reversing direction of operands. */
4070 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4071 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4072 if (!operand_type_match (overlap0, i.types[0])
4073 || !operand_type_match (overlap1, i.types[1])
4074 || (check_register
4075 && !operand_type_register_match (overlap0,
4076 i.types[0],
4077 operand_types[1],
4078 overlap1,
4079 i.types[1],
4080 operand_types[0])))
4082 /* Does not match either direction. */
4083 continue;
4085 /* found_reverse_match holds which of D or FloatDR
4086 we've found. */
4087 if (t->opcode_modifier.d)
4088 found_reverse_match = Opcode_D;
4089 else if (t->opcode_modifier.floatd)
4090 found_reverse_match = Opcode_FloatD;
4091 else
4092 found_reverse_match = 0;
4093 if (t->opcode_modifier.floatr)
4094 found_reverse_match |= Opcode_FloatR;
4096 else
4098 /* Found a forward 2 operand match here. */
4099 switch (t->operands)
4101 case 5:
4102 overlap4 = operand_type_and (i.types[4],
4103 operand_types[4]);
4104 case 4:
4105 overlap3 = operand_type_and (i.types[3],
4106 operand_types[3]);
4107 case 3:
4108 overlap2 = operand_type_and (i.types[2],
4109 operand_types[2]);
4110 break;
4113 switch (t->operands)
4115 case 5:
4116 if (!operand_type_match (overlap4, i.types[4])
4117 || !operand_type_register_match (overlap3,
4118 i.types[3],
4119 operand_types[3],
4120 overlap4,
4121 i.types[4],
4122 operand_types[4]))
4123 continue;
4124 case 4:
4125 if (!operand_type_match (overlap3, i.types[3])
4126 || (check_register
4127 && !operand_type_register_match (overlap2,
4128 i.types[2],
4129 operand_types[2],
4130 overlap3,
4131 i.types[3],
4132 operand_types[3])))
4133 continue;
4134 case 3:
4135 /* Here we make use of the fact that there are no
4136 reverse match 3 operand instructions, and all 3
4137 operand instructions only need to be checked for
4138 register consistency between operands 2 and 3. */
4139 if (!operand_type_match (overlap2, i.types[2])
4140 || (check_register
4141 && !operand_type_register_match (overlap1,
4142 i.types[1],
4143 operand_types[1],
4144 overlap2,
4145 i.types[2],
4146 operand_types[2])))
4147 continue;
4148 break;
4151 /* Found either forward/reverse 2, 3 or 4 operand match here:
4152 slip through to break. */
4154 if (!found_cpu_match)
4156 found_reverse_match = 0;
4157 continue;
4160 /* Check if VEX operands are valid. */
4161 if (VEX_check_operands (t))
4162 continue;
4164 /* We've found a match; break out of loop. */
4165 break;
4168 if (t == current_templates->end)
4170 /* We found no match. */
4171 const char *err_msg;
4172 switch (i.error)
4174 default:
4175 abort ();
4176 case operand_size_mismatch:
4177 err_msg = _("operand size mismatch");
4178 break;
4179 case operand_type_mismatch:
4180 err_msg = _("operand type mismatch");
4181 break;
4182 case register_type_mismatch:
4183 err_msg = _("register type mismatch");
4184 break;
4185 case number_of_operands_mismatch:
4186 err_msg = _("number of operands mismatch");
4187 break;
4188 case invalid_instruction_suffix:
4189 err_msg = _("invalid instruction suffix");
4190 break;
4191 case bad_imm4:
4192 err_msg = _("Imm4 isn't the first operand");
4193 break;
4194 case old_gcc_only:
4195 err_msg = _("only supported with old gcc");
4196 break;
4197 case unsupported_with_intel_mnemonic:
4198 err_msg = _("unsupported with Intel mnemonic");
4199 break;
4200 case unsupported_syntax:
4201 err_msg = _("unsupported syntax");
4202 break;
4203 case unsupported:
4204 err_msg = _("unsupported");
4205 break;
4207 as_bad (_("%s for `%s'"), err_msg,
4208 current_templates->start->name);
4209 return NULL;
4212 if (!quiet_warnings)
4214 if (!intel_syntax
4215 && (i.types[0].bitfield.jumpabsolute
4216 != operand_types[0].bitfield.jumpabsolute))
4218 as_warn (_("indirect %s without `*'"), t->name);
4221 if (t->opcode_modifier.isprefix
4222 && t->opcode_modifier.ignoresize)
4224 /* Warn them that a data or address size prefix doesn't
4225 affect assembly of the next line of code. */
4226 as_warn (_("stand-alone `%s' prefix"), t->name);
4230 /* Copy the template we found. */
4231 i.tm = *t;
4233 if (addr_prefix_disp != -1)
4234 i.tm.operand_types[addr_prefix_disp]
4235 = operand_types[addr_prefix_disp];
4237 if (found_reverse_match)
4239 /* If we found a reverse match we must alter the opcode
4240 direction bit. found_reverse_match holds bits to change
4241 (different for int & float insns). */
4243 i.tm.base_opcode ^= found_reverse_match;
4245 i.tm.operand_types[0] = operand_types[1];
4246 i.tm.operand_types[1] = operand_types[0];
4249 return t;
4252 static int
4253 check_string (void)
4255 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4256 if (i.tm.operand_types[mem_op].bitfield.esseg)
4258 if (i.seg[0] != NULL && i.seg[0] != &es)
4260 as_bad (_("`%s' operand %d must use `%ses' segment"),
4261 i.tm.name,
4262 mem_op + 1,
4263 register_prefix);
4264 return 0;
4266 /* There's only ever one segment override allowed per instruction.
4267 This instruction possibly has a legal segment override on the
4268 second operand, so copy the segment to where non-string
4269 instructions store it, allowing common code. */
4270 i.seg[0] = i.seg[1];
4272 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4274 if (i.seg[1] != NULL && i.seg[1] != &es)
4276 as_bad (_("`%s' operand %d must use `%ses' segment"),
4277 i.tm.name,
4278 mem_op + 2,
4279 register_prefix);
4280 return 0;
4283 return 1;
4286 static int
4287 process_suffix (void)
4289 /* If matched instruction specifies an explicit instruction mnemonic
4290 suffix, use it. */
4291 if (i.tm.opcode_modifier.size16)
4292 i.suffix = WORD_MNEM_SUFFIX;
4293 else if (i.tm.opcode_modifier.size32)
4294 i.suffix = LONG_MNEM_SUFFIX;
4295 else if (i.tm.opcode_modifier.size64)
4296 i.suffix = QWORD_MNEM_SUFFIX;
4297 else if (i.reg_operands)
4299 /* If there's no instruction mnemonic suffix we try to invent one
4300 based on register operands. */
4301 if (!i.suffix)
4303 /* We take i.suffix from the last register operand specified,
4304 Destination register type is more significant than source
4305 register type. crc32 in SSE4.2 prefers source register
4306 type. */
4307 if (i.tm.base_opcode == 0xf20f38f1)
4309 if (i.types[0].bitfield.reg16)
4310 i.suffix = WORD_MNEM_SUFFIX;
4311 else if (i.types[0].bitfield.reg32)
4312 i.suffix = LONG_MNEM_SUFFIX;
4313 else if (i.types[0].bitfield.reg64)
4314 i.suffix = QWORD_MNEM_SUFFIX;
4316 else if (i.tm.base_opcode == 0xf20f38f0)
4318 if (i.types[0].bitfield.reg8)
4319 i.suffix = BYTE_MNEM_SUFFIX;
4322 if (!i.suffix)
4324 int op;
4326 if (i.tm.base_opcode == 0xf20f38f1
4327 || i.tm.base_opcode == 0xf20f38f0)
4329 /* We have to know the operand size for crc32. */
4330 as_bad (_("ambiguous memory operand size for `%s`"),
4331 i.tm.name);
4332 return 0;
4335 for (op = i.operands; --op >= 0;)
4336 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4338 if (i.types[op].bitfield.reg8)
4340 i.suffix = BYTE_MNEM_SUFFIX;
4341 break;
4343 else if (i.types[op].bitfield.reg16)
4345 i.suffix = WORD_MNEM_SUFFIX;
4346 break;
4348 else if (i.types[op].bitfield.reg32)
4350 i.suffix = LONG_MNEM_SUFFIX;
4351 break;
4353 else if (i.types[op].bitfield.reg64)
4355 i.suffix = QWORD_MNEM_SUFFIX;
4356 break;
4361 else if (i.suffix == BYTE_MNEM_SUFFIX)
4363 if (intel_syntax
4364 && i.tm.opcode_modifier.ignoresize
4365 && i.tm.opcode_modifier.no_bsuf)
4366 i.suffix = 0;
4367 else if (!check_byte_reg ())
4368 return 0;
4370 else if (i.suffix == LONG_MNEM_SUFFIX)
4372 if (intel_syntax
4373 && i.tm.opcode_modifier.ignoresize
4374 && i.tm.opcode_modifier.no_lsuf)
4375 i.suffix = 0;
4376 else if (!check_long_reg ())
4377 return 0;
4379 else if (i.suffix == QWORD_MNEM_SUFFIX)
4381 if (intel_syntax
4382 && i.tm.opcode_modifier.ignoresize
4383 && i.tm.opcode_modifier.no_qsuf)
4384 i.suffix = 0;
4385 else if (!check_qword_reg ())
4386 return 0;
4388 else if (i.suffix == WORD_MNEM_SUFFIX)
4390 if (intel_syntax
4391 && i.tm.opcode_modifier.ignoresize
4392 && i.tm.opcode_modifier.no_wsuf)
4393 i.suffix = 0;
4394 else if (!check_word_reg ())
4395 return 0;
4397 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4398 || i.suffix == YMMWORD_MNEM_SUFFIX)
4400 /* Skip if the instruction has x/y suffix. match_template
4401 should check if it is a valid suffix. */
4403 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4404 /* Do nothing if the instruction is going to ignore the prefix. */
4406 else
4407 abort ();
4409 else if (i.tm.opcode_modifier.defaultsize
4410 && !i.suffix
4411 /* exclude fldenv/frstor/fsave/fstenv */
4412 && i.tm.opcode_modifier.no_ssuf)
4414 i.suffix = stackop_size;
4416 else if (intel_syntax
4417 && !i.suffix
4418 && (i.tm.operand_types[0].bitfield.jumpabsolute
4419 || i.tm.opcode_modifier.jumpbyte
4420 || i.tm.opcode_modifier.jumpintersegment
4421 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4422 && i.tm.extension_opcode <= 3)))
4424 switch (flag_code)
4426 case CODE_64BIT:
4427 if (!i.tm.opcode_modifier.no_qsuf)
4429 i.suffix = QWORD_MNEM_SUFFIX;
4430 break;
4432 case CODE_32BIT:
4433 if (!i.tm.opcode_modifier.no_lsuf)
4434 i.suffix = LONG_MNEM_SUFFIX;
4435 break;
4436 case CODE_16BIT:
4437 if (!i.tm.opcode_modifier.no_wsuf)
4438 i.suffix = WORD_MNEM_SUFFIX;
4439 break;
4443 if (!i.suffix)
4445 if (!intel_syntax)
4447 if (i.tm.opcode_modifier.w)
4449 as_bad (_("no instruction mnemonic suffix given and "
4450 "no register operands; can't size instruction"));
4451 return 0;
4454 else
4456 unsigned int suffixes;
4458 suffixes = !i.tm.opcode_modifier.no_bsuf;
4459 if (!i.tm.opcode_modifier.no_wsuf)
4460 suffixes |= 1 << 1;
4461 if (!i.tm.opcode_modifier.no_lsuf)
4462 suffixes |= 1 << 2;
4463 if (!i.tm.opcode_modifier.no_ldsuf)
4464 suffixes |= 1 << 3;
4465 if (!i.tm.opcode_modifier.no_ssuf)
4466 suffixes |= 1 << 4;
4467 if (!i.tm.opcode_modifier.no_qsuf)
4468 suffixes |= 1 << 5;
4470 /* There are more than suffix matches. */
4471 if (i.tm.opcode_modifier.w
4472 || ((suffixes & (suffixes - 1))
4473 && !i.tm.opcode_modifier.defaultsize
4474 && !i.tm.opcode_modifier.ignoresize))
4476 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4477 return 0;
4482 /* Change the opcode based on the operand size given by i.suffix;
4483 We don't need to change things for byte insns. */
4485 if (i.suffix
4486 && i.suffix != BYTE_MNEM_SUFFIX
4487 && i.suffix != XMMWORD_MNEM_SUFFIX
4488 && i.suffix != YMMWORD_MNEM_SUFFIX)
4490 /* It's not a byte, select word/dword operation. */
4491 if (i.tm.opcode_modifier.w)
4493 if (i.tm.opcode_modifier.shortform)
4494 i.tm.base_opcode |= 8;
4495 else
4496 i.tm.base_opcode |= 1;
4499 /* Now select between word & dword operations via the operand
4500 size prefix, except for instructions that will ignore this
4501 prefix anyway. */
4502 if (i.tm.opcode_modifier.addrprefixop0)
4504 /* The address size override prefix changes the size of the
4505 first operand. */
4506 if ((flag_code == CODE_32BIT
4507 && i.op->regs[0].reg_type.bitfield.reg16)
4508 || (flag_code != CODE_32BIT
4509 && i.op->regs[0].reg_type.bitfield.reg32))
4510 if (!add_prefix (ADDR_PREFIX_OPCODE))
4511 return 0;
4513 else if (i.suffix != QWORD_MNEM_SUFFIX
4514 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4515 && !i.tm.opcode_modifier.ignoresize
4516 && !i.tm.opcode_modifier.floatmf
4517 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4518 || (flag_code == CODE_64BIT
4519 && i.tm.opcode_modifier.jumpbyte)))
4521 unsigned int prefix = DATA_PREFIX_OPCODE;
4523 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4524 prefix = ADDR_PREFIX_OPCODE;
4526 if (!add_prefix (prefix))
4527 return 0;
4530 /* Set mode64 for an operand. */
4531 if (i.suffix == QWORD_MNEM_SUFFIX
4532 && flag_code == CODE_64BIT
4533 && !i.tm.opcode_modifier.norex64)
4535 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4536 need rex64. cmpxchg8b is also a special case. */
4537 if (! (i.operands == 2
4538 && i.tm.base_opcode == 0x90
4539 && i.tm.extension_opcode == None
4540 && operand_type_equal (&i.types [0], &acc64)
4541 && operand_type_equal (&i.types [1], &acc64))
4542 && ! (i.operands == 1
4543 && i.tm.base_opcode == 0xfc7
4544 && i.tm.extension_opcode == 1
4545 && !operand_type_check (i.types [0], reg)
4546 && operand_type_check (i.types [0], anymem)))
4547 i.rex |= REX_W;
4550 /* Size floating point instruction. */
4551 if (i.suffix == LONG_MNEM_SUFFIX)
4552 if (i.tm.opcode_modifier.floatmf)
4553 i.tm.base_opcode ^= 4;
4556 return 1;
4559 static int
4560 check_byte_reg (void)
4562 int op;
4564 for (op = i.operands; --op >= 0;)
4566 /* If this is an eight bit register, it's OK. If it's the 16 or
4567 32 bit version of an eight bit register, we will just use the
4568 low portion, and that's OK too. */
4569 if (i.types[op].bitfield.reg8)
4570 continue;
4572 /* crc32 doesn't generate this warning. */
4573 if (i.tm.base_opcode == 0xf20f38f0)
4574 continue;
4576 if ((i.types[op].bitfield.reg16
4577 || i.types[op].bitfield.reg32
4578 || i.types[op].bitfield.reg64)
4579 && i.op[op].regs->reg_num < 4)
4581 /* Prohibit these changes in the 64bit mode, since the
4582 lowering is more complicated. */
4583 if (flag_code == CODE_64BIT
4584 && !i.tm.operand_types[op].bitfield.inoutportreg)
4586 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4587 register_prefix, i.op[op].regs->reg_name,
4588 i.suffix);
4589 return 0;
4591 #if REGISTER_WARNINGS
4592 if (!quiet_warnings
4593 && !i.tm.operand_types[op].bitfield.inoutportreg)
4594 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4595 register_prefix,
4596 (i.op[op].regs + (i.types[op].bitfield.reg16
4597 ? REGNAM_AL - REGNAM_AX
4598 : REGNAM_AL - REGNAM_EAX))->reg_name,
4599 register_prefix,
4600 i.op[op].regs->reg_name,
4601 i.suffix);
4602 #endif
4603 continue;
4605 /* Any other register is bad. */
4606 if (i.types[op].bitfield.reg16
4607 || i.types[op].bitfield.reg32
4608 || i.types[op].bitfield.reg64
4609 || i.types[op].bitfield.regmmx
4610 || i.types[op].bitfield.regxmm
4611 || i.types[op].bitfield.regymm
4612 || i.types[op].bitfield.sreg2
4613 || i.types[op].bitfield.sreg3
4614 || i.types[op].bitfield.control
4615 || i.types[op].bitfield.debug
4616 || i.types[op].bitfield.test
4617 || i.types[op].bitfield.floatreg
4618 || i.types[op].bitfield.floatacc)
4620 as_bad (_("`%s%s' not allowed with `%s%c'"),
4621 register_prefix,
4622 i.op[op].regs->reg_name,
4623 i.tm.name,
4624 i.suffix);
4625 return 0;
4628 return 1;
4631 static int
4632 check_long_reg (void)
4634 int op;
4636 for (op = i.operands; --op >= 0;)
4637 /* Reject eight bit registers, except where the template requires
4638 them. (eg. movzb) */
4639 if (i.types[op].bitfield.reg8
4640 && (i.tm.operand_types[op].bitfield.reg16
4641 || i.tm.operand_types[op].bitfield.reg32
4642 || i.tm.operand_types[op].bitfield.acc))
4644 as_bad (_("`%s%s' not allowed with `%s%c'"),
4645 register_prefix,
4646 i.op[op].regs->reg_name,
4647 i.tm.name,
4648 i.suffix);
4649 return 0;
4651 /* Warn if the e prefix on a general reg is missing. */
4652 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4653 && i.types[op].bitfield.reg16
4654 && (i.tm.operand_types[op].bitfield.reg32
4655 || i.tm.operand_types[op].bitfield.acc))
4657 /* Prohibit these changes in the 64bit mode, since the
4658 lowering is more complicated. */
4659 if (flag_code == CODE_64BIT)
4661 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4662 register_prefix, i.op[op].regs->reg_name,
4663 i.suffix);
4664 return 0;
4666 #if REGISTER_WARNINGS
4667 else
4668 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4669 register_prefix,
4670 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4671 register_prefix,
4672 i.op[op].regs->reg_name,
4673 i.suffix);
4674 #endif
4676 /* Warn if the r prefix on a general reg is missing. */
4677 else if (i.types[op].bitfield.reg64
4678 && (i.tm.operand_types[op].bitfield.reg32
4679 || i.tm.operand_types[op].bitfield.acc))
4681 if (intel_syntax
4682 && i.tm.opcode_modifier.toqword
4683 && !i.types[0].bitfield.regxmm)
4685 /* Convert to QWORD. We want REX byte. */
4686 i.suffix = QWORD_MNEM_SUFFIX;
4688 else
4690 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4691 register_prefix, i.op[op].regs->reg_name,
4692 i.suffix);
4693 return 0;
4696 return 1;
4699 static int
4700 check_qword_reg (void)
4702 int op;
4704 for (op = i.operands; --op >= 0; )
4705 /* Reject eight bit registers, except where the template requires
4706 them. (eg. movzb) */
4707 if (i.types[op].bitfield.reg8
4708 && (i.tm.operand_types[op].bitfield.reg16
4709 || i.tm.operand_types[op].bitfield.reg32
4710 || i.tm.operand_types[op].bitfield.acc))
4712 as_bad (_("`%s%s' not allowed with `%s%c'"),
4713 register_prefix,
4714 i.op[op].regs->reg_name,
4715 i.tm.name,
4716 i.suffix);
4717 return 0;
4719 /* Warn if the e prefix on a general reg is missing. */
4720 else if ((i.types[op].bitfield.reg16
4721 || i.types[op].bitfield.reg32)
4722 && (i.tm.operand_types[op].bitfield.reg32
4723 || i.tm.operand_types[op].bitfield.acc))
4725 /* Prohibit these changes in the 64bit mode, since the
4726 lowering is more complicated. */
4727 if (intel_syntax
4728 && i.tm.opcode_modifier.todword
4729 && !i.types[0].bitfield.regxmm)
4731 /* Convert to DWORD. We don't want REX byte. */
4732 i.suffix = LONG_MNEM_SUFFIX;
4734 else
4736 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4737 register_prefix, i.op[op].regs->reg_name,
4738 i.suffix);
4739 return 0;
4742 return 1;
4745 static int
4746 check_word_reg (void)
4748 int op;
4749 for (op = i.operands; --op >= 0;)
4750 /* Reject eight bit registers, except where the template requires
4751 them. (eg. movzb) */
4752 if (i.types[op].bitfield.reg8
4753 && (i.tm.operand_types[op].bitfield.reg16
4754 || i.tm.operand_types[op].bitfield.reg32
4755 || i.tm.operand_types[op].bitfield.acc))
4757 as_bad (_("`%s%s' not allowed with `%s%c'"),
4758 register_prefix,
4759 i.op[op].regs->reg_name,
4760 i.tm.name,
4761 i.suffix);
4762 return 0;
4764 /* Warn if the e prefix on a general reg is present. */
4765 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4766 && i.types[op].bitfield.reg32
4767 && (i.tm.operand_types[op].bitfield.reg16
4768 || i.tm.operand_types[op].bitfield.acc))
4770 /* Prohibit these changes in the 64bit mode, since the
4771 lowering is more complicated. */
4772 if (flag_code == CODE_64BIT)
4774 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4775 register_prefix, i.op[op].regs->reg_name,
4776 i.suffix);
4777 return 0;
4779 else
4780 #if REGISTER_WARNINGS
4781 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4782 register_prefix,
4783 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4784 register_prefix,
4785 i.op[op].regs->reg_name,
4786 i.suffix);
4787 #endif
4789 return 1;
4792 static int
4793 update_imm (unsigned int j)
4795 i386_operand_type overlap = i.types[j];
4796 if ((overlap.bitfield.imm8
4797 || overlap.bitfield.imm8s
4798 || overlap.bitfield.imm16
4799 || overlap.bitfield.imm32
4800 || overlap.bitfield.imm32s
4801 || overlap.bitfield.imm64)
4802 && !operand_type_equal (&overlap, &imm8)
4803 && !operand_type_equal (&overlap, &imm8s)
4804 && !operand_type_equal (&overlap, &imm16)
4805 && !operand_type_equal (&overlap, &imm32)
4806 && !operand_type_equal (&overlap, &imm32s)
4807 && !operand_type_equal (&overlap, &imm64))
4809 if (i.suffix)
4811 i386_operand_type temp;
4813 operand_type_set (&temp, 0);
4814 if (i.suffix == BYTE_MNEM_SUFFIX)
4816 temp.bitfield.imm8 = overlap.bitfield.imm8;
4817 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4819 else if (i.suffix == WORD_MNEM_SUFFIX)
4820 temp.bitfield.imm16 = overlap.bitfield.imm16;
4821 else if (i.suffix == QWORD_MNEM_SUFFIX)
4823 temp.bitfield.imm64 = overlap.bitfield.imm64;
4824 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4826 else
4827 temp.bitfield.imm32 = overlap.bitfield.imm32;
4828 overlap = temp;
4830 else if (operand_type_equal (&overlap, &imm16_32_32s)
4831 || operand_type_equal (&overlap, &imm16_32)
4832 || operand_type_equal (&overlap, &imm16_32s))
4834 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4835 overlap = imm16;
4836 else
4837 overlap = imm32s;
4839 if (!operand_type_equal (&overlap, &imm8)
4840 && !operand_type_equal (&overlap, &imm8s)
4841 && !operand_type_equal (&overlap, &imm16)
4842 && !operand_type_equal (&overlap, &imm32)
4843 && !operand_type_equal (&overlap, &imm32s)
4844 && !operand_type_equal (&overlap, &imm64))
4846 as_bad (_("no instruction mnemonic suffix given; "
4847 "can't determine immediate size"));
4848 return 0;
4851 i.types[j] = overlap;
4853 return 1;
4856 static int
4857 finalize_imm (void)
4859 unsigned int j, n;
4861 /* Update the first 2 immediate operands. */
4862 n = i.operands > 2 ? 2 : i.operands;
4863 if (n)
4865 for (j = 0; j < n; j++)
4866 if (update_imm (j) == 0)
4867 return 0;
4869 /* The 3rd operand can't be immediate operand. */
4870 gas_assert (operand_type_check (i.types[2], imm) == 0);
4873 return 1;
4876 static int
4877 bad_implicit_operand (int xmm)
4879 const char *ireg = xmm ? "xmm0" : "ymm0";
4881 if (intel_syntax)
4882 as_bad (_("the last operand of `%s' must be `%s%s'"),
4883 i.tm.name, register_prefix, ireg);
4884 else
4885 as_bad (_("the first operand of `%s' must be `%s%s'"),
4886 i.tm.name, register_prefix, ireg);
4887 return 0;
4890 static int
4891 process_operands (void)
4893 /* Default segment register this instruction will use for memory
4894 accesses. 0 means unknown. This is only for optimizing out
4895 unnecessary segment overrides. */
4896 const seg_entry *default_seg = 0;
4898 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4900 unsigned int dupl = i.operands;
4901 unsigned int dest = dupl - 1;
4902 unsigned int j;
4904 /* The destination must be an xmm register. */
4905 gas_assert (i.reg_operands
4906 && MAX_OPERANDS > dupl
4907 && operand_type_equal (&i.types[dest], &regxmm));
4909 if (i.tm.opcode_modifier.firstxmm0)
4911 /* The first operand is implicit and must be xmm0. */
4912 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4913 if (i.op[0].regs->reg_num != 0)
4914 return bad_implicit_operand (1);
4916 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4918 /* Keep xmm0 for instructions with VEX prefix and 3
4919 sources. */
4920 goto duplicate;
4922 else
4924 /* We remove the first xmm0 and keep the number of
4925 operands unchanged, which in fact duplicates the
4926 destination. */
4927 for (j = 1; j < i.operands; j++)
4929 i.op[j - 1] = i.op[j];
4930 i.types[j - 1] = i.types[j];
4931 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4935 else if (i.tm.opcode_modifier.implicit1stxmm0)
4937 gas_assert ((MAX_OPERANDS - 1) > dupl
4938 && (i.tm.opcode_modifier.vexsources
4939 == VEX3SOURCES));
4941 /* Add the implicit xmm0 for instructions with VEX prefix
4942 and 3 sources. */
4943 for (j = i.operands; j > 0; j--)
4945 i.op[j] = i.op[j - 1];
4946 i.types[j] = i.types[j - 1];
4947 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4949 i.op[0].regs
4950 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4951 i.types[0] = regxmm;
4952 i.tm.operand_types[0] = regxmm;
4954 i.operands += 2;
4955 i.reg_operands += 2;
4956 i.tm.operands += 2;
4958 dupl++;
4959 dest++;
4960 i.op[dupl] = i.op[dest];
4961 i.types[dupl] = i.types[dest];
4962 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4964 else
4966 duplicate:
4967 i.operands++;
4968 i.reg_operands++;
4969 i.tm.operands++;
4971 i.op[dupl] = i.op[dest];
4972 i.types[dupl] = i.types[dest];
4973 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4976 if (i.tm.opcode_modifier.immext)
4977 process_immext ();
4979 else if (i.tm.opcode_modifier.firstxmm0)
4981 unsigned int j;
4983 /* The first operand is implicit and must be xmm0/ymm0. */
4984 gas_assert (i.reg_operands
4985 && (operand_type_equal (&i.types[0], &regxmm)
4986 || operand_type_equal (&i.types[0], &regymm)));
4987 if (i.op[0].regs->reg_num != 0)
4988 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4990 for (j = 1; j < i.operands; j++)
4992 i.op[j - 1] = i.op[j];
4993 i.types[j - 1] = i.types[j];
4995 /* We need to adjust fields in i.tm since they are used by
4996 build_modrm_byte. */
4997 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5000 i.operands--;
5001 i.reg_operands--;
5002 i.tm.operands--;
5004 else if (i.tm.opcode_modifier.regkludge)
5006 /* The imul $imm, %reg instruction is converted into
5007 imul $imm, %reg, %reg, and the clr %reg instruction
5008 is converted into xor %reg, %reg. */
5010 unsigned int first_reg_op;
5012 if (operand_type_check (i.types[0], reg))
5013 first_reg_op = 0;
5014 else
5015 first_reg_op = 1;
5016 /* Pretend we saw the extra register operand. */
5017 gas_assert (i.reg_operands == 1
5018 && i.op[first_reg_op + 1].regs == 0);
5019 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5020 i.types[first_reg_op + 1] = i.types[first_reg_op];
5021 i.operands++;
5022 i.reg_operands++;
5025 if (i.tm.opcode_modifier.shortform)
5027 if (i.types[0].bitfield.sreg2
5028 || i.types[0].bitfield.sreg3)
5030 if (i.tm.base_opcode == POP_SEG_SHORT
5031 && i.op[0].regs->reg_num == 1)
5033 as_bad (_("you can't `pop %scs'"), register_prefix);
5034 return 0;
5036 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5037 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5038 i.rex |= REX_B;
5040 else
5042 /* The register or float register operand is in operand
5043 0 or 1. */
5044 unsigned int op;
5046 if (i.types[0].bitfield.floatreg
5047 || operand_type_check (i.types[0], reg))
5048 op = 0;
5049 else
5050 op = 1;
5051 /* Register goes in low 3 bits of opcode. */
5052 i.tm.base_opcode |= i.op[op].regs->reg_num;
5053 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5054 i.rex |= REX_B;
5055 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5057 /* Warn about some common errors, but press on regardless.
5058 The first case can be generated by gcc (<= 2.8.1). */
5059 if (i.operands == 2)
5061 /* Reversed arguments on faddp, fsubp, etc. */
5062 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5063 register_prefix, i.op[!intel_syntax].regs->reg_name,
5064 register_prefix, i.op[intel_syntax].regs->reg_name);
5066 else
5068 /* Extraneous `l' suffix on fp insn. */
5069 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5070 register_prefix, i.op[0].regs->reg_name);
5075 else if (i.tm.opcode_modifier.modrm)
5077 /* The opcode is completed (modulo i.tm.extension_opcode which
5078 must be put into the modrm byte). Now, we make the modrm and
5079 index base bytes based on all the info we've collected. */
5081 default_seg = build_modrm_byte ();
5083 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5085 default_seg = &ds;
5087 else if (i.tm.opcode_modifier.isstring)
5089 /* For the string instructions that allow a segment override
5090 on one of their operands, the default segment is ds. */
5091 default_seg = &ds;
5094 if (i.tm.base_opcode == 0x8d /* lea */
5095 && i.seg[0]
5096 && !quiet_warnings)
5097 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5099 /* If a segment was explicitly specified, and the specified segment
5100 is not the default, use an opcode prefix to select it. If we
5101 never figured out what the default segment is, then default_seg
5102 will be zero at this point, and the specified segment prefix will
5103 always be used. */
5104 if ((i.seg[0]) && (i.seg[0] != default_seg))
5106 if (!add_prefix (i.seg[0]->seg_prefix))
5107 return 0;
5109 return 1;
5112 static const seg_entry *
5113 build_modrm_byte (void)
5115 const seg_entry *default_seg = 0;
5116 unsigned int source, dest;
5117 int vex_3_sources;
5119 /* The first operand of instructions with VEX prefix and 3 sources
5120 must be VEX_Imm4. */
5121 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5122 if (vex_3_sources)
5124 unsigned int nds, reg_slot;
5125 expressionS *exp;
5127 if (i.tm.opcode_modifier.veximmext
5128 && i.tm.opcode_modifier.immext)
5130 dest = i.operands - 2;
5131 gas_assert (dest == 3);
5133 else
5134 dest = i.operands - 1;
5135 nds = dest - 1;
5137 /* There are 2 kinds of instructions:
5138 1. 5 operands: 4 register operands or 3 register operands
5139 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5140 VexW0 or VexW1. The destination must be either XMM or YMM
5141 register.
5142 2. 4 operands: 4 register operands or 3 register operands
5143 plus 1 memory operand, VexXDS, and VexImmExt */
5144 gas_assert ((i.reg_operands == 4
5145 || (i.reg_operands == 3 && i.mem_operands == 1))
5146 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5147 && (i.tm.opcode_modifier.veximmext
5148 || (i.imm_operands == 1
5149 && i.types[0].bitfield.vec_imm4
5150 && (i.tm.opcode_modifier.vexw == VEXW0
5151 || i.tm.opcode_modifier.vexw == VEXW1)
5152 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5153 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5155 if (i.imm_operands == 0)
5157 /* When there is no immediate operand, generate an 8bit
5158 immediate operand to encode the first operand. */
5159 exp = &im_expressions[i.imm_operands++];
5160 i.op[i.operands].imms = exp;
5161 i.types[i.operands] = imm8;
5162 i.operands++;
5163 /* If VexW1 is set, the first operand is the source and
5164 the second operand is encoded in the immediate operand. */
5165 if (i.tm.opcode_modifier.vexw == VEXW1)
5167 source = 0;
5168 reg_slot = 1;
5170 else
5172 source = 1;
5173 reg_slot = 0;
5176 /* FMA swaps REG and NDS. */
5177 if (i.tm.cpu_flags.bitfield.cpufma)
5179 unsigned int tmp;
5180 tmp = reg_slot;
5181 reg_slot = nds;
5182 nds = tmp;
5185 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5186 &regxmm)
5187 || operand_type_equal (&i.tm.operand_types[reg_slot],
5188 &regymm));
5189 exp->X_op = O_constant;
5190 exp->X_add_number
5191 = ((i.op[reg_slot].regs->reg_num
5192 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5193 << 4);
5195 else
5197 unsigned int imm_slot;
5199 if (i.tm.opcode_modifier.vexw == VEXW0)
5201 /* If VexW0 is set, the third operand is the source and
5202 the second operand is encoded in the immediate
5203 operand. */
5204 source = 2;
5205 reg_slot = 1;
5207 else
5209 /* VexW1 is set, the second operand is the source and
5210 the third operand is encoded in the immediate
5211 operand. */
5212 source = 1;
5213 reg_slot = 2;
5216 if (i.tm.opcode_modifier.immext)
5218 /* When ImmExt is set, the immdiate byte is the last
5219 operand. */
5220 imm_slot = i.operands - 1;
5221 source--;
5222 reg_slot--;
5224 else
5226 imm_slot = 0;
5228 /* Turn on Imm8 so that output_imm will generate it. */
5229 i.types[imm_slot].bitfield.imm8 = 1;
5232 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5233 &regxmm)
5234 || operand_type_equal (&i.tm.operand_types[reg_slot],
5235 &regymm));
5236 i.op[imm_slot].imms->X_add_number
5237 |= ((i.op[reg_slot].regs->reg_num
5238 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5239 << 4);
5242 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5243 || operand_type_equal (&i.tm.operand_types[nds],
5244 &regymm));
5245 i.vex.register_specifier = i.op[nds].regs;
5247 else
5248 source = dest = 0;
5250 /* i.reg_operands MUST be the number of real register operands;
5251 implicit registers do not count. If there are 3 register
5252 operands, it must be a instruction with VexNDS. For a
5253 instruction with VexNDD, the destination register is encoded
5254 in VEX prefix. If there are 4 register operands, it must be
5255 a instruction with VEX prefix and 3 sources. */
5256 if (i.mem_operands == 0
5257 && ((i.reg_operands == 2
5258 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5259 || (i.reg_operands == 3
5260 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5261 || (i.reg_operands == 4 && vex_3_sources)))
5263 switch (i.operands)
5265 case 2:
5266 source = 0;
5267 break;
5268 case 3:
5269 /* When there are 3 operands, one of them may be immediate,
5270 which may be the first or the last operand. Otherwise,
5271 the first operand must be shift count register (cl) or it
5272 is an instruction with VexNDS. */
5273 gas_assert (i.imm_operands == 1
5274 || (i.imm_operands == 0
5275 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5276 || i.types[0].bitfield.shiftcount)));
5277 if (operand_type_check (i.types[0], imm)
5278 || i.types[0].bitfield.shiftcount)
5279 source = 1;
5280 else
5281 source = 0;
5282 break;
5283 case 4:
5284 /* When there are 4 operands, the first two must be 8bit
5285 immediate operands. The source operand will be the 3rd
5286 one.
5288 For instructions with VexNDS, if the first operand
5289 an imm8, the source operand is the 2nd one. If the last
5290 operand is imm8, the source operand is the first one. */
5291 gas_assert ((i.imm_operands == 2
5292 && i.types[0].bitfield.imm8
5293 && i.types[1].bitfield.imm8)
5294 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5295 && i.imm_operands == 1
5296 && (i.types[0].bitfield.imm8
5297 || i.types[i.operands - 1].bitfield.imm8)));
5298 if (i.imm_operands == 2)
5299 source = 2;
5300 else
5302 if (i.types[0].bitfield.imm8)
5303 source = 1;
5304 else
5305 source = 0;
5307 break;
5308 case 5:
5309 break;
5310 default:
5311 abort ();
5314 if (!vex_3_sources)
5316 dest = source + 1;
5318 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5320 /* For instructions with VexNDS, the register-only
5321 source operand must be 32/64bit integer, XMM or
5322 YMM register. It is encoded in VEX prefix. We
5323 need to clear RegMem bit before calling
5324 operand_type_equal. */
5326 i386_operand_type op;
5327 unsigned int vvvv;
5329 /* Check register-only source operand when two source
5330 operands are swapped. */
5331 if (!i.tm.operand_types[source].bitfield.baseindex
5332 && i.tm.operand_types[dest].bitfield.baseindex)
5334 vvvv = source;
5335 source = dest;
5337 else
5338 vvvv = dest;
5340 op = i.tm.operand_types[vvvv];
5341 op.bitfield.regmem = 0;
5342 if ((dest + 1) >= i.operands
5343 || (op.bitfield.reg32 != 1
5344 && !op.bitfield.reg64 != 1
5345 && !operand_type_equal (&op, &regxmm)
5346 && !operand_type_equal (&op, &regymm)))
5347 abort ();
5348 i.vex.register_specifier = i.op[vvvv].regs;
5349 dest++;
5353 i.rm.mode = 3;
5354 /* One of the register operands will be encoded in the i.tm.reg
5355 field, the other in the combined i.tm.mode and i.tm.regmem
5356 fields. If no form of this instruction supports a memory
5357 destination operand, then we assume the source operand may
5358 sometimes be a memory operand and so we need to store the
5359 destination in the i.rm.reg field. */
5360 if (!i.tm.operand_types[dest].bitfield.regmem
5361 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5363 i.rm.reg = i.op[dest].regs->reg_num;
5364 i.rm.regmem = i.op[source].regs->reg_num;
5365 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5366 i.rex |= REX_R;
5367 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5368 i.rex |= REX_B;
5370 else
5372 i.rm.reg = i.op[source].regs->reg_num;
5373 i.rm.regmem = i.op[dest].regs->reg_num;
5374 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5375 i.rex |= REX_B;
5376 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5377 i.rex |= REX_R;
5379 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5381 if (!i.types[0].bitfield.control
5382 && !i.types[1].bitfield.control)
5383 abort ();
5384 i.rex &= ~(REX_R | REX_B);
5385 add_prefix (LOCK_PREFIX_OPCODE);
5388 else
5389 { /* If it's not 2 reg operands... */
5390 unsigned int mem;
5392 if (i.mem_operands)
5394 unsigned int fake_zero_displacement = 0;
5395 unsigned int op;
5397 for (op = 0; op < i.operands; op++)
5398 if (operand_type_check (i.types[op], anymem))
5399 break;
5400 gas_assert (op < i.operands);
5402 default_seg = &ds;
5404 if (i.base_reg == 0)
5406 i.rm.mode = 0;
5407 if (!i.disp_operands)
5408 fake_zero_displacement = 1;
5409 if (i.index_reg == 0)
5411 /* Operand is just <disp> */
5412 if (flag_code == CODE_64BIT)
5414 /* 64bit mode overwrites the 32bit absolute
5415 addressing by RIP relative addressing and
5416 absolute addressing is encoded by one of the
5417 redundant SIB forms. */
5418 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5419 i.sib.base = NO_BASE_REGISTER;
5420 i.sib.index = NO_INDEX_REGISTER;
5421 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5422 ? disp32s : disp32);
5424 else if ((flag_code == CODE_16BIT)
5425 ^ (i.prefix[ADDR_PREFIX] != 0))
5427 i.rm.regmem = NO_BASE_REGISTER_16;
5428 i.types[op] = disp16;
5430 else
5432 i.rm.regmem = NO_BASE_REGISTER;
5433 i.types[op] = disp32;
5436 else /* !i.base_reg && i.index_reg */
5438 if (i.index_reg->reg_num == RegEiz
5439 || i.index_reg->reg_num == RegRiz)
5440 i.sib.index = NO_INDEX_REGISTER;
5441 else
5442 i.sib.index = i.index_reg->reg_num;
5443 i.sib.base = NO_BASE_REGISTER;
5444 i.sib.scale = i.log2_scale_factor;
5445 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5446 i.types[op].bitfield.disp8 = 0;
5447 i.types[op].bitfield.disp16 = 0;
5448 i.types[op].bitfield.disp64 = 0;
5449 if (flag_code != CODE_64BIT)
5451 /* Must be 32 bit */
5452 i.types[op].bitfield.disp32 = 1;
5453 i.types[op].bitfield.disp32s = 0;
5455 else
5457 i.types[op].bitfield.disp32 = 0;
5458 i.types[op].bitfield.disp32s = 1;
5460 if ((i.index_reg->reg_flags & RegRex) != 0)
5461 i.rex |= REX_X;
5464 /* RIP addressing for 64bit mode. */
5465 else if (i.base_reg->reg_num == RegRip ||
5466 i.base_reg->reg_num == RegEip)
5468 i.rm.regmem = NO_BASE_REGISTER;
5469 i.types[op].bitfield.disp8 = 0;
5470 i.types[op].bitfield.disp16 = 0;
5471 i.types[op].bitfield.disp32 = 0;
5472 i.types[op].bitfield.disp32s = 1;
5473 i.types[op].bitfield.disp64 = 0;
5474 i.flags[op] |= Operand_PCrel;
5475 if (! i.disp_operands)
5476 fake_zero_displacement = 1;
5478 else if (i.base_reg->reg_type.bitfield.reg16)
5480 switch (i.base_reg->reg_num)
5482 case 3: /* (%bx) */
5483 if (i.index_reg == 0)
5484 i.rm.regmem = 7;
5485 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5486 i.rm.regmem = i.index_reg->reg_num - 6;
5487 break;
5488 case 5: /* (%bp) */
5489 default_seg = &ss;
5490 if (i.index_reg == 0)
5492 i.rm.regmem = 6;
5493 if (operand_type_check (i.types[op], disp) == 0)
5495 /* fake (%bp) into 0(%bp) */
5496 i.types[op].bitfield.disp8 = 1;
5497 fake_zero_displacement = 1;
5500 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5501 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5502 break;
5503 default: /* (%si) -> 4 or (%di) -> 5 */
5504 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5506 i.rm.mode = mode_from_disp_size (i.types[op]);
5508 else /* i.base_reg and 32/64 bit mode */
5510 if (flag_code == CODE_64BIT
5511 && operand_type_check (i.types[op], disp))
5513 i386_operand_type temp;
5514 operand_type_set (&temp, 0);
5515 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5516 i.types[op] = temp;
5517 if (i.prefix[ADDR_PREFIX] == 0)
5518 i.types[op].bitfield.disp32s = 1;
5519 else
5520 i.types[op].bitfield.disp32 = 1;
5523 i.rm.regmem = i.base_reg->reg_num;
5524 if ((i.base_reg->reg_flags & RegRex) != 0)
5525 i.rex |= REX_B;
5526 i.sib.base = i.base_reg->reg_num;
5527 /* x86-64 ignores REX prefix bit here to avoid decoder
5528 complications. */
5529 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5531 default_seg = &ss;
5532 if (i.disp_operands == 0)
5534 fake_zero_displacement = 1;
5535 i.types[op].bitfield.disp8 = 1;
5538 else if (i.base_reg->reg_num == ESP_REG_NUM)
5540 default_seg = &ss;
5542 i.sib.scale = i.log2_scale_factor;
5543 if (i.index_reg == 0)
5545 /* <disp>(%esp) becomes two byte modrm with no index
5546 register. We've already stored the code for esp
5547 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5548 Any base register besides %esp will not use the
5549 extra modrm byte. */
5550 i.sib.index = NO_INDEX_REGISTER;
5552 else
5554 if (i.index_reg->reg_num == RegEiz
5555 || i.index_reg->reg_num == RegRiz)
5556 i.sib.index = NO_INDEX_REGISTER;
5557 else
5558 i.sib.index = i.index_reg->reg_num;
5559 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5560 if ((i.index_reg->reg_flags & RegRex) != 0)
5561 i.rex |= REX_X;
5564 if (i.disp_operands
5565 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5566 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5567 i.rm.mode = 0;
5568 else
5569 i.rm.mode = mode_from_disp_size (i.types[op]);
5572 if (fake_zero_displacement)
5574 /* Fakes a zero displacement assuming that i.types[op]
5575 holds the correct displacement size. */
5576 expressionS *exp;
5578 gas_assert (i.op[op].disps == 0);
5579 exp = &disp_expressions[i.disp_operands++];
5580 i.op[op].disps = exp;
5581 exp->X_op = O_constant;
5582 exp->X_add_number = 0;
5583 exp->X_add_symbol = (symbolS *) 0;
5584 exp->X_op_symbol = (symbolS *) 0;
5587 mem = op;
5589 else
5590 mem = ~0;
5592 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5594 if (operand_type_check (i.types[0], imm))
5595 i.vex.register_specifier = NULL;
5596 else
5598 /* VEX.vvvv encodes one of the sources when the first
5599 operand is not an immediate. */
5600 if (i.tm.opcode_modifier.vexw == VEXW0)
5601 i.vex.register_specifier = i.op[0].regs;
5602 else
5603 i.vex.register_specifier = i.op[1].regs;
5606 /* Destination is a XMM register encoded in the ModRM.reg
5607 and VEX.R bit. */
5608 i.rm.reg = i.op[2].regs->reg_num;
5609 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5610 i.rex |= REX_R;
5612 /* ModRM.rm and VEX.B encodes the other source. */
5613 if (!i.mem_operands)
5615 i.rm.mode = 3;
5617 if (i.tm.opcode_modifier.vexw == VEXW0)
5618 i.rm.regmem = i.op[1].regs->reg_num;
5619 else
5620 i.rm.regmem = i.op[0].regs->reg_num;
5622 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5623 i.rex |= REX_B;
5626 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5628 i.vex.register_specifier = i.op[2].regs;
5629 if (!i.mem_operands)
5631 i.rm.mode = 3;
5632 i.rm.regmem = i.op[1].regs->reg_num;
5633 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5634 i.rex |= REX_B;
5637 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5638 (if any) based on i.tm.extension_opcode. Again, we must be
5639 careful to make sure that segment/control/debug/test/MMX
5640 registers are coded into the i.rm.reg field. */
5641 else if (i.reg_operands)
5643 unsigned int op;
5644 unsigned int vex_reg = ~0;
5646 for (op = 0; op < i.operands; op++)
5647 if (i.types[op].bitfield.reg8
5648 || i.types[op].bitfield.reg16
5649 || i.types[op].bitfield.reg32
5650 || i.types[op].bitfield.reg64
5651 || i.types[op].bitfield.regmmx
5652 || i.types[op].bitfield.regxmm
5653 || i.types[op].bitfield.regymm
5654 || i.types[op].bitfield.sreg2
5655 || i.types[op].bitfield.sreg3
5656 || i.types[op].bitfield.control
5657 || i.types[op].bitfield.debug
5658 || i.types[op].bitfield.test)
5659 break;
5661 if (vex_3_sources)
5662 op = dest;
5663 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5665 /* For instructions with VexNDS, the register-only
5666 source operand is encoded in VEX prefix. */
5667 gas_assert (mem != (unsigned int) ~0);
5669 if (op > mem)
5671 vex_reg = op++;
5672 gas_assert (op < i.operands);
5674 else
5676 /* Check register-only source operand when two source
5677 operands are swapped. */
5678 if (!i.tm.operand_types[op].bitfield.baseindex
5679 && i.tm.operand_types[op + 1].bitfield.baseindex)
5681 vex_reg = op;
5682 op += 2;
5683 gas_assert (mem == (vex_reg + 1)
5684 && op < i.operands);
5686 else
5688 vex_reg = op + 1;
5689 gas_assert (vex_reg < i.operands);
5693 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5695 /* For instructions with VexNDD, the register destination
5696 is encoded in VEX prefix. */
5697 if (i.mem_operands == 0)
5699 /* There is no memory operand. */
5700 gas_assert ((op + 2) == i.operands);
5701 vex_reg = op + 1;
5703 else
5705 /* There are only 2 operands. */
5706 gas_assert (op < 2 && i.operands == 2);
5707 vex_reg = 1;
5710 else
5711 gas_assert (op < i.operands);
5713 if (vex_reg != (unsigned int) ~0)
5715 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5717 if (type->bitfield.reg32 != 1
5718 && type->bitfield.reg64 != 1
5719 && !operand_type_equal (type, &regxmm)
5720 && !operand_type_equal (type, &regymm))
5721 abort ();
5723 i.vex.register_specifier = i.op[vex_reg].regs;
5726 /* Don't set OP operand twice. */
5727 if (vex_reg != op)
5729 /* If there is an extension opcode to put here, the
5730 register number must be put into the regmem field. */
5731 if (i.tm.extension_opcode != None)
5733 i.rm.regmem = i.op[op].regs->reg_num;
5734 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5735 i.rex |= REX_B;
5737 else
5739 i.rm.reg = i.op[op].regs->reg_num;
5740 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5741 i.rex |= REX_R;
5745 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5746 must set it to 3 to indicate this is a register operand
5747 in the regmem field. */
5748 if (!i.mem_operands)
5749 i.rm.mode = 3;
5752 /* Fill in i.rm.reg field with extension opcode (if any). */
5753 if (i.tm.extension_opcode != None)
5754 i.rm.reg = i.tm.extension_opcode;
5756 return default_seg;
5759 static void
5760 output_branch (void)
5762 char *p;
5763 int size;
5764 int code16;
5765 int prefix;
5766 relax_substateT subtype;
5767 symbolS *sym;
5768 offsetT off;
5770 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5771 size = i.disp32_encoding ? BIG : SMALL;
5773 prefix = 0;
5774 if (i.prefix[DATA_PREFIX] != 0)
5776 prefix = 1;
5777 i.prefixes -= 1;
5778 code16 ^= CODE16;
5780 /* Pentium4 branch hints. */
5781 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5782 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5784 prefix++;
5785 i.prefixes--;
5787 if (i.prefix[REX_PREFIX] != 0)
5789 prefix++;
5790 i.prefixes--;
5793 if (i.prefixes != 0 && !intel_syntax)
5794 as_warn (_("skipping prefixes on this instruction"));
5796 /* It's always a symbol; End frag & setup for relax.
5797 Make sure there is enough room in this frag for the largest
5798 instruction we may generate in md_convert_frag. This is 2
5799 bytes for the opcode and room for the prefix and largest
5800 displacement. */
5801 frag_grow (prefix + 2 + 4);
5802 /* Prefix and 1 opcode byte go in fr_fix. */
5803 p = frag_more (prefix + 1);
5804 if (i.prefix[DATA_PREFIX] != 0)
5805 *p++ = DATA_PREFIX_OPCODE;
5806 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5807 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5808 *p++ = i.prefix[SEG_PREFIX];
5809 if (i.prefix[REX_PREFIX] != 0)
5810 *p++ = i.prefix[REX_PREFIX];
5811 *p = i.tm.base_opcode;
5813 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5814 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5815 else if (cpu_arch_flags.bitfield.cpui386)
5816 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5817 else
5818 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5819 subtype |= code16;
5821 sym = i.op[0].disps->X_add_symbol;
5822 off = i.op[0].disps->X_add_number;
5824 if (i.op[0].disps->X_op != O_constant
5825 && i.op[0].disps->X_op != O_symbol)
5827 /* Handle complex expressions. */
5828 sym = make_expr_symbol (i.op[0].disps);
5829 off = 0;
5832 /* 1 possible extra opcode + 4 byte displacement go in var part.
5833 Pass reloc in fr_var. */
5834 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5837 static void
5838 output_jump (void)
5840 char *p;
5841 int size;
5842 fixS *fixP;
5844 if (i.tm.opcode_modifier.jumpbyte)
5846 /* This is a loop or jecxz type instruction. */
5847 size = 1;
5848 if (i.prefix[ADDR_PREFIX] != 0)
5850 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5851 i.prefixes -= 1;
5853 /* Pentium4 branch hints. */
5854 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5855 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5857 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5858 i.prefixes--;
5861 else
5863 int code16;
5865 code16 = 0;
5866 if (flag_code == CODE_16BIT)
5867 code16 = CODE16;
5869 if (i.prefix[DATA_PREFIX] != 0)
5871 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5872 i.prefixes -= 1;
5873 code16 ^= CODE16;
5876 size = 4;
5877 if (code16)
5878 size = 2;
5881 if (i.prefix[REX_PREFIX] != 0)
5883 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5884 i.prefixes -= 1;
5887 if (i.prefixes != 0 && !intel_syntax)
5888 as_warn (_("skipping prefixes on this instruction"));
5890 p = frag_more (1 + size);
5891 *p++ = i.tm.base_opcode;
5893 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5894 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5896 /* All jumps handled here are signed, but don't use a signed limit
5897 check for 32 and 16 bit jumps as we want to allow wrap around at
5898 4G and 64k respectively. */
5899 if (size == 1)
5900 fixP->fx_signed = 1;
5903 static void
5904 output_interseg_jump (void)
5906 char *p;
5907 int size;
5908 int prefix;
5909 int code16;
5911 code16 = 0;
5912 if (flag_code == CODE_16BIT)
5913 code16 = CODE16;
5915 prefix = 0;
5916 if (i.prefix[DATA_PREFIX] != 0)
5918 prefix = 1;
5919 i.prefixes -= 1;
5920 code16 ^= CODE16;
5922 if (i.prefix[REX_PREFIX] != 0)
5924 prefix++;
5925 i.prefixes -= 1;
5928 size = 4;
5929 if (code16)
5930 size = 2;
5932 if (i.prefixes != 0 && !intel_syntax)
5933 as_warn (_("skipping prefixes on this instruction"));
5935 /* 1 opcode; 2 segment; offset */
5936 p = frag_more (prefix + 1 + 2 + size);
5938 if (i.prefix[DATA_PREFIX] != 0)
5939 *p++ = DATA_PREFIX_OPCODE;
5941 if (i.prefix[REX_PREFIX] != 0)
5942 *p++ = i.prefix[REX_PREFIX];
5944 *p++ = i.tm.base_opcode;
5945 if (i.op[1].imms->X_op == O_constant)
5947 offsetT n = i.op[1].imms->X_add_number;
5949 if (size == 2
5950 && !fits_in_unsigned_word (n)
5951 && !fits_in_signed_word (n))
5953 as_bad (_("16-bit jump out of range"));
5954 return;
5956 md_number_to_chars (p, n, size);
5958 else
5959 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5960 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5961 if (i.op[0].imms->X_op != O_constant)
5962 as_bad (_("can't handle non absolute segment in `%s'"),
5963 i.tm.name);
5964 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5967 static void
5968 output_insn (void)
5970 fragS *insn_start_frag;
5971 offsetT insn_start_off;
5973 /* Tie dwarf2 debug info to the address at the start of the insn.
5974 We can't do this after the insn has been output as the current
5975 frag may have been closed off. eg. by frag_var. */
5976 dwarf2_emit_insn (0);
5978 insn_start_frag = frag_now;
5979 insn_start_off = frag_now_fix ();
5981 /* Output jumps. */
5982 if (i.tm.opcode_modifier.jump)
5983 output_branch ();
5984 else if (i.tm.opcode_modifier.jumpbyte
5985 || i.tm.opcode_modifier.jumpdword)
5986 output_jump ();
5987 else if (i.tm.opcode_modifier.jumpintersegment)
5988 output_interseg_jump ();
5989 else
5991 /* Output normal instructions here. */
5992 char *p;
5993 unsigned char *q;
5994 unsigned int j;
5995 unsigned int prefix;
5997 /* Since the VEX prefix contains the implicit prefix, we don't
5998 need the explicit prefix. */
5999 if (!i.tm.opcode_modifier.vex)
6001 switch (i.tm.opcode_length)
6003 case 3:
6004 if (i.tm.base_opcode & 0xff000000)
6006 prefix = (i.tm.base_opcode >> 24) & 0xff;
6007 goto check_prefix;
6009 break;
6010 case 2:
6011 if ((i.tm.base_opcode & 0xff0000) != 0)
6013 prefix = (i.tm.base_opcode >> 16) & 0xff;
6014 if (i.tm.cpu_flags.bitfield.cpupadlock)
6016 check_prefix:
6017 if (prefix != REPE_PREFIX_OPCODE
6018 || (i.prefix[REP_PREFIX]
6019 != REPE_PREFIX_OPCODE))
6020 add_prefix (prefix);
6022 else
6023 add_prefix (prefix);
6025 break;
6026 case 1:
6027 break;
6028 default:
6029 abort ();
6032 /* The prefix bytes. */
6033 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6034 if (*q)
6035 FRAG_APPEND_1_CHAR (*q);
6038 if (i.tm.opcode_modifier.vex)
6040 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6041 if (*q)
6042 switch (j)
6044 case REX_PREFIX:
6045 /* REX byte is encoded in VEX prefix. */
6046 break;
6047 case SEG_PREFIX:
6048 case ADDR_PREFIX:
6049 FRAG_APPEND_1_CHAR (*q);
6050 break;
6051 default:
6052 /* There should be no other prefixes for instructions
6053 with VEX prefix. */
6054 abort ();
6057 /* Now the VEX prefix. */
6058 p = frag_more (i.vex.length);
6059 for (j = 0; j < i.vex.length; j++)
6060 p[j] = i.vex.bytes[j];
6063 /* Now the opcode; be careful about word order here! */
6064 if (i.tm.opcode_length == 1)
6066 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6068 else
6070 switch (i.tm.opcode_length)
6072 case 3:
6073 p = frag_more (3);
6074 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6075 break;
6076 case 2:
6077 p = frag_more (2);
6078 break;
6079 default:
6080 abort ();
6081 break;
6084 /* Put out high byte first: can't use md_number_to_chars! */
6085 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6086 *p = i.tm.base_opcode & 0xff;
6089 /* Now the modrm byte and sib byte (if present). */
6090 if (i.tm.opcode_modifier.modrm)
6092 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6093 | i.rm.reg << 3
6094 | i.rm.mode << 6));
6095 /* If i.rm.regmem == ESP (4)
6096 && i.rm.mode != (Register mode)
6097 && not 16 bit
6098 ==> need second modrm byte. */
6099 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6100 && i.rm.mode != 3
6101 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6102 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6103 | i.sib.index << 3
6104 | i.sib.scale << 6));
6107 if (i.disp_operands)
6108 output_disp (insn_start_frag, insn_start_off);
6110 if (i.imm_operands)
6111 output_imm (insn_start_frag, insn_start_off);
6114 #ifdef DEBUG386
6115 if (flag_debug)
6117 pi ("" /*line*/, &i);
6119 #endif /* DEBUG386 */
6122 /* Return the size of the displacement operand N. */
6124 static int
6125 disp_size (unsigned int n)
6127 int size = 4;
6128 if (i.types[n].bitfield.disp64)
6129 size = 8;
6130 else if (i.types[n].bitfield.disp8)
6131 size = 1;
6132 else if (i.types[n].bitfield.disp16)
6133 size = 2;
6134 return size;
6137 /* Return the size of the immediate operand N. */
6139 static int
6140 imm_size (unsigned int n)
6142 int size = 4;
6143 if (i.types[n].bitfield.imm64)
6144 size = 8;
6145 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6146 size = 1;
6147 else if (i.types[n].bitfield.imm16)
6148 size = 2;
6149 return size;
6152 static void
6153 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6155 char *p;
6156 unsigned int n;
6158 for (n = 0; n < i.operands; n++)
6160 if (operand_type_check (i.types[n], disp))
6162 if (i.op[n].disps->X_op == O_constant)
6164 int size = disp_size (n);
6165 offsetT val;
6167 val = offset_in_range (i.op[n].disps->X_add_number,
6168 size);
6169 p = frag_more (size);
6170 md_number_to_chars (p, val, size);
6172 else
6174 enum bfd_reloc_code_real reloc_type;
6175 int size = disp_size (n);
6176 int sign = i.types[n].bitfield.disp32s;
6177 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6179 /* We can't have 8 bit displacement here. */
6180 gas_assert (!i.types[n].bitfield.disp8);
6182 /* The PC relative address is computed relative
6183 to the instruction boundary, so in case immediate
6184 fields follows, we need to adjust the value. */
6185 if (pcrel && i.imm_operands)
6187 unsigned int n1;
6188 int sz = 0;
6190 for (n1 = 0; n1 < i.operands; n1++)
6191 if (operand_type_check (i.types[n1], imm))
6193 /* Only one immediate is allowed for PC
6194 relative address. */
6195 gas_assert (sz == 0);
6196 sz = imm_size (n1);
6197 i.op[n].disps->X_add_number -= sz;
6199 /* We should find the immediate. */
6200 gas_assert (sz != 0);
6203 p = frag_more (size);
6204 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6205 if (GOT_symbol
6206 && GOT_symbol == i.op[n].disps->X_add_symbol
6207 && (((reloc_type == BFD_RELOC_32
6208 || reloc_type == BFD_RELOC_X86_64_32S
6209 || (reloc_type == BFD_RELOC_64
6210 && object_64bit))
6211 && (i.op[n].disps->X_op == O_symbol
6212 || (i.op[n].disps->X_op == O_add
6213 && ((symbol_get_value_expression
6214 (i.op[n].disps->X_op_symbol)->X_op)
6215 == O_subtract))))
6216 || reloc_type == BFD_RELOC_32_PCREL))
6218 offsetT add;
6220 if (insn_start_frag == frag_now)
6221 add = (p - frag_now->fr_literal) - insn_start_off;
6222 else
6224 fragS *fr;
6226 add = insn_start_frag->fr_fix - insn_start_off;
6227 for (fr = insn_start_frag->fr_next;
6228 fr && fr != frag_now; fr = fr->fr_next)
6229 add += fr->fr_fix;
6230 add += p - frag_now->fr_literal;
6233 if (!object_64bit)
6235 reloc_type = BFD_RELOC_386_GOTPC;
6236 i.op[n].imms->X_add_number += add;
6238 else if (reloc_type == BFD_RELOC_64)
6239 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6240 else
6241 /* Don't do the adjustment for x86-64, as there
6242 the pcrel addressing is relative to the _next_
6243 insn, and that is taken care of in other code. */
6244 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6246 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6247 i.op[n].disps, pcrel, reloc_type);
6253 static void
6254 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6256 char *p;
6257 unsigned int n;
6259 for (n = 0; n < i.operands; n++)
6261 if (operand_type_check (i.types[n], imm))
6263 if (i.op[n].imms->X_op == O_constant)
6265 int size = imm_size (n);
6266 offsetT val;
6268 val = offset_in_range (i.op[n].imms->X_add_number,
6269 size);
6270 p = frag_more (size);
6271 md_number_to_chars (p, val, size);
6273 else
6275 /* Not absolute_section.
6276 Need a 32-bit fixup (don't support 8bit
6277 non-absolute imms). Try to support other
6278 sizes ... */
6279 enum bfd_reloc_code_real reloc_type;
6280 int size = imm_size (n);
6281 int sign;
6283 if (i.types[n].bitfield.imm32s
6284 && (i.suffix == QWORD_MNEM_SUFFIX
6285 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6286 sign = 1;
6287 else
6288 sign = 0;
6290 p = frag_more (size);
6291 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6293 /* This is tough to explain. We end up with this one if we
6294 * have operands that look like
6295 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6296 * obtain the absolute address of the GOT, and it is strongly
6297 * preferable from a performance point of view to avoid using
6298 * a runtime relocation for this. The actual sequence of
6299 * instructions often look something like:
6301 * call .L66
6302 * .L66:
6303 * popl %ebx
6304 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6306 * The call and pop essentially return the absolute address
6307 * of the label .L66 and store it in %ebx. The linker itself
6308 * will ultimately change the first operand of the addl so
6309 * that %ebx points to the GOT, but to keep things simple, the
6310 * .o file must have this operand set so that it generates not
6311 * the absolute address of .L66, but the absolute address of
6312 * itself. This allows the linker itself simply treat a GOTPC
6313 * relocation as asking for a pcrel offset to the GOT to be
6314 * added in, and the addend of the relocation is stored in the
6315 * operand field for the instruction itself.
6317 * Our job here is to fix the operand so that it would add
6318 * the correct offset so that %ebx would point to itself. The
6319 * thing that is tricky is that .-.L66 will point to the
6320 * beginning of the instruction, so we need to further modify
6321 * the operand so that it will point to itself. There are
6322 * other cases where you have something like:
6324 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6326 * and here no correction would be required. Internally in
6327 * the assembler we treat operands of this form as not being
6328 * pcrel since the '.' is explicitly mentioned, and I wonder
6329 * whether it would simplify matters to do it this way. Who
6330 * knows. In earlier versions of the PIC patches, the
6331 * pcrel_adjust field was used to store the correction, but
6332 * since the expression is not pcrel, I felt it would be
6333 * confusing to do it this way. */
6335 if ((reloc_type == BFD_RELOC_32
6336 || reloc_type == BFD_RELOC_X86_64_32S
6337 || reloc_type == BFD_RELOC_64)
6338 && GOT_symbol
6339 && GOT_symbol == i.op[n].imms->X_add_symbol
6340 && (i.op[n].imms->X_op == O_symbol
6341 || (i.op[n].imms->X_op == O_add
6342 && ((symbol_get_value_expression
6343 (i.op[n].imms->X_op_symbol)->X_op)
6344 == O_subtract))))
6346 offsetT add;
6348 if (insn_start_frag == frag_now)
6349 add = (p - frag_now->fr_literal) - insn_start_off;
6350 else
6352 fragS *fr;
6354 add = insn_start_frag->fr_fix - insn_start_off;
6355 for (fr = insn_start_frag->fr_next;
6356 fr && fr != frag_now; fr = fr->fr_next)
6357 add += fr->fr_fix;
6358 add += p - frag_now->fr_literal;
6361 if (!object_64bit)
6362 reloc_type = BFD_RELOC_386_GOTPC;
6363 else if (size == 4)
6364 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6365 else if (size == 8)
6366 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6367 i.op[n].imms->X_add_number += add;
6369 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6370 i.op[n].imms, 0, reloc_type);
6376 /* x86_cons_fix_new is called via the expression parsing code when a
6377 reloc is needed. We use this hook to get the correct .got reloc. */
6378 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6379 static int cons_sign = -1;
6381 void
6382 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6383 expressionS *exp)
6385 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6387 got_reloc = NO_RELOC;
6389 #ifdef TE_PE
6390 if (exp->X_op == O_secrel)
6392 exp->X_op = O_symbol;
6393 r = BFD_RELOC_32_SECREL;
6395 #endif
6397 fix_new_exp (frag, off, len, exp, 0, r);
6400 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6401 # define lex_got(reloc, adjust, types) NULL
6402 #else
6403 /* Parse operands of the form
6404 <symbol>@GOTOFF+<nnn>
6405 and similar .plt or .got references.
6407 If we find one, set up the correct relocation in RELOC and copy the
6408 input string, minus the `@GOTOFF' into a malloc'd buffer for
6409 parsing by the calling routine. Return this buffer, and if ADJUST
6410 is non-null set it to the length of the string we removed from the
6411 input line. Otherwise return NULL. */
6412 static char *
6413 lex_got (enum bfd_reloc_code_real *rel,
6414 int *adjust,
6415 i386_operand_type *types)
6417 /* Some of the relocations depend on the size of what field is to
6418 be relocated. But in our callers i386_immediate and i386_displacement
6419 we don't yet know the operand size (this will be set by insn
6420 matching). Hence we record the word32 relocation here,
6421 and adjust the reloc according to the real size in reloc(). */
6422 static const struct {
6423 const char *str;
6424 int len;
6425 const enum bfd_reloc_code_real rel[2];
6426 const i386_operand_type types64;
6427 } gotrel[] = {
6428 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6429 BFD_RELOC_X86_64_PLTOFF64 },
6430 OPERAND_TYPE_IMM64 },
6431 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6432 BFD_RELOC_X86_64_PLT32 },
6433 OPERAND_TYPE_IMM32_32S_DISP32 },
6434 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6435 BFD_RELOC_X86_64_GOTPLT64 },
6436 OPERAND_TYPE_IMM64_DISP64 },
6437 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6438 BFD_RELOC_X86_64_GOTOFF64 },
6439 OPERAND_TYPE_IMM64_DISP64 },
6440 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6441 BFD_RELOC_X86_64_GOTPCREL },
6442 OPERAND_TYPE_IMM32_32S_DISP32 },
6443 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6444 BFD_RELOC_X86_64_TLSGD },
6445 OPERAND_TYPE_IMM32_32S_DISP32 },
6446 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6447 _dummy_first_bfd_reloc_code_real },
6448 OPERAND_TYPE_NONE },
6449 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6450 BFD_RELOC_X86_64_TLSLD },
6451 OPERAND_TYPE_IMM32_32S_DISP32 },
6452 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6453 BFD_RELOC_X86_64_GOTTPOFF },
6454 OPERAND_TYPE_IMM32_32S_DISP32 },
6455 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6456 BFD_RELOC_X86_64_TPOFF32 },
6457 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6458 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6459 _dummy_first_bfd_reloc_code_real },
6460 OPERAND_TYPE_NONE },
6461 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6462 BFD_RELOC_X86_64_DTPOFF32 },
6463 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6464 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6465 _dummy_first_bfd_reloc_code_real },
6466 OPERAND_TYPE_NONE },
6467 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6468 _dummy_first_bfd_reloc_code_real },
6469 OPERAND_TYPE_NONE },
6470 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6471 BFD_RELOC_X86_64_GOT32 },
6472 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6473 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6474 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6475 OPERAND_TYPE_IMM32_32S_DISP32 },
6476 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6477 BFD_RELOC_X86_64_TLSDESC_CALL },
6478 OPERAND_TYPE_IMM32_32S_DISP32 },
6480 char *cp;
6481 unsigned int j;
6483 if (!IS_ELF)
6484 return NULL;
6486 for (cp = input_line_pointer; *cp != '@'; cp++)
6487 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6488 return NULL;
6490 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6492 int len = gotrel[j].len;
6493 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6495 if (gotrel[j].rel[object_64bit] != 0)
6497 int first, second;
6498 char *tmpbuf, *past_reloc;
6500 *rel = gotrel[j].rel[object_64bit];
6501 if (adjust)
6502 *adjust = len;
6504 if (types)
6506 if (flag_code != CODE_64BIT)
6508 types->bitfield.imm32 = 1;
6509 types->bitfield.disp32 = 1;
6511 else
6512 *types = gotrel[j].types64;
6515 if (GOT_symbol == NULL)
6516 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6518 /* The length of the first part of our input line. */
6519 first = cp - input_line_pointer;
6521 /* The second part goes from after the reloc token until
6522 (and including) an end_of_line char or comma. */
6523 past_reloc = cp + 1 + len;
6524 cp = past_reloc;
6525 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6526 ++cp;
6527 second = cp + 1 - past_reloc;
6529 /* Allocate and copy string. The trailing NUL shouldn't
6530 be necessary, but be safe. */
6531 tmpbuf = (char *) xmalloc (first + second + 2);
6532 memcpy (tmpbuf, input_line_pointer, first);
6533 if (second != 0 && *past_reloc != ' ')
6534 /* Replace the relocation token with ' ', so that
6535 errors like foo@GOTOFF1 will be detected. */
6536 tmpbuf[first++] = ' ';
6537 memcpy (tmpbuf + first, past_reloc, second);
6538 tmpbuf[first + second] = '\0';
6539 return tmpbuf;
6542 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6543 gotrel[j].str, 1 << (5 + object_64bit));
6544 return NULL;
6548 /* Might be a symbol version string. Don't as_bad here. */
6549 return NULL;
6552 void
6553 x86_cons (expressionS *exp, int size)
6555 intel_syntax = -intel_syntax;
6557 exp->X_md = 0;
6558 if (size == 4 || (object_64bit && size == 8))
6560 /* Handle @GOTOFF and the like in an expression. */
6561 char *save;
6562 char *gotfree_input_line;
6563 int adjust;
6565 save = input_line_pointer;
6566 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6567 if (gotfree_input_line)
6568 input_line_pointer = gotfree_input_line;
6570 expression (exp);
6572 if (gotfree_input_line)
6574 /* expression () has merrily parsed up to the end of line,
6575 or a comma - in the wrong buffer. Transfer how far
6576 input_line_pointer has moved to the right buffer. */
6577 input_line_pointer = (save
6578 + (input_line_pointer - gotfree_input_line)
6579 + adjust);
6580 free (gotfree_input_line);
6581 if (exp->X_op == O_constant
6582 || exp->X_op == O_absent
6583 || exp->X_op == O_illegal
6584 || exp->X_op == O_register
6585 || exp->X_op == O_big)
6587 char c = *input_line_pointer;
6588 *input_line_pointer = 0;
6589 as_bad (_("missing or invalid expression `%s'"), save);
6590 *input_line_pointer = c;
6594 else
6595 expression (exp);
6597 intel_syntax = -intel_syntax;
6599 if (intel_syntax)
6600 i386_intel_simplify (exp);
6602 #endif
6604 static void
6605 signed_cons (int size)
6607 if (flag_code == CODE_64BIT)
6608 cons_sign = 1;
6609 cons (size);
6610 cons_sign = -1;
6613 #ifdef TE_PE
6614 static void
6615 pe_directive_secrel (dummy)
6616 int dummy ATTRIBUTE_UNUSED;
6618 expressionS exp;
6622 expression (&exp);
6623 if (exp.X_op == O_symbol)
6624 exp.X_op = O_secrel;
6626 emit_expr (&exp, 4);
6628 while (*input_line_pointer++ == ',');
6630 input_line_pointer--;
6631 demand_empty_rest_of_line ();
6633 #endif
6635 static int
6636 i386_immediate (char *imm_start)
6638 char *save_input_line_pointer;
6639 char *gotfree_input_line;
6640 segT exp_seg = 0;
6641 expressionS *exp;
6642 i386_operand_type types;
6644 operand_type_set (&types, ~0);
6646 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6648 as_bad (_("at most %d immediate operands are allowed"),
6649 MAX_IMMEDIATE_OPERANDS);
6650 return 0;
6653 exp = &im_expressions[i.imm_operands++];
6654 i.op[this_operand].imms = exp;
6656 if (is_space_char (*imm_start))
6657 ++imm_start;
6659 save_input_line_pointer = input_line_pointer;
6660 input_line_pointer = imm_start;
6662 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6663 if (gotfree_input_line)
6664 input_line_pointer = gotfree_input_line;
6666 exp_seg = expression (exp);
6668 SKIP_WHITESPACE ();
6669 if (*input_line_pointer)
6670 as_bad (_("junk `%s' after expression"), input_line_pointer);
6672 input_line_pointer = save_input_line_pointer;
6673 if (gotfree_input_line)
6675 free (gotfree_input_line);
6677 if (exp->X_op == O_constant || exp->X_op == O_register)
6678 exp->X_op = O_illegal;
6681 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6684 static int
6685 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6686 i386_operand_type types, const char *imm_start)
6688 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6690 if (imm_start)
6691 as_bad (_("missing or invalid immediate expression `%s'"),
6692 imm_start);
6693 return 0;
6695 else if (exp->X_op == O_constant)
6697 /* Size it properly later. */
6698 i.types[this_operand].bitfield.imm64 = 1;
6699 /* If not 64bit, sign extend val. */
6700 if (flag_code != CODE_64BIT
6701 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6702 exp->X_add_number
6703 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6705 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6706 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6707 && exp_seg != absolute_section
6708 && exp_seg != text_section
6709 && exp_seg != data_section
6710 && exp_seg != bss_section
6711 && exp_seg != undefined_section
6712 && !bfd_is_com_section (exp_seg))
6714 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6715 return 0;
6717 #endif
6718 else if (!intel_syntax && exp->X_op == O_register)
6720 if (imm_start)
6721 as_bad (_("illegal immediate register operand %s"), imm_start);
6722 return 0;
6724 else
6726 /* This is an address. The size of the address will be
6727 determined later, depending on destination register,
6728 suffix, or the default for the section. */
6729 i.types[this_operand].bitfield.imm8 = 1;
6730 i.types[this_operand].bitfield.imm16 = 1;
6731 i.types[this_operand].bitfield.imm32 = 1;
6732 i.types[this_operand].bitfield.imm32s = 1;
6733 i.types[this_operand].bitfield.imm64 = 1;
6734 i.types[this_operand] = operand_type_and (i.types[this_operand],
6735 types);
6738 return 1;
6741 static char *
6742 i386_scale (char *scale)
6744 offsetT val;
6745 char *save = input_line_pointer;
6747 input_line_pointer = scale;
6748 val = get_absolute_expression ();
6750 switch (val)
6752 case 1:
6753 i.log2_scale_factor = 0;
6754 break;
6755 case 2:
6756 i.log2_scale_factor = 1;
6757 break;
6758 case 4:
6759 i.log2_scale_factor = 2;
6760 break;
6761 case 8:
6762 i.log2_scale_factor = 3;
6763 break;
6764 default:
6766 char sep = *input_line_pointer;
6768 *input_line_pointer = '\0';
6769 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6770 scale);
6771 *input_line_pointer = sep;
6772 input_line_pointer = save;
6773 return NULL;
6776 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6778 as_warn (_("scale factor of %d without an index register"),
6779 1 << i.log2_scale_factor);
6780 i.log2_scale_factor = 0;
6782 scale = input_line_pointer;
6783 input_line_pointer = save;
6784 return scale;
6787 static int
6788 i386_displacement (char *disp_start, char *disp_end)
6790 expressionS *exp;
6791 segT exp_seg = 0;
6792 char *save_input_line_pointer;
6793 char *gotfree_input_line;
6794 int override;
6795 i386_operand_type bigdisp, types = anydisp;
6796 int ret;
6798 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6800 as_bad (_("at most %d displacement operands are allowed"),
6801 MAX_MEMORY_OPERANDS);
6802 return 0;
6805 operand_type_set (&bigdisp, 0);
6806 if ((i.types[this_operand].bitfield.jumpabsolute)
6807 || (!current_templates->start->opcode_modifier.jump
6808 && !current_templates->start->opcode_modifier.jumpdword))
6810 bigdisp.bitfield.disp32 = 1;
6811 override = (i.prefix[ADDR_PREFIX] != 0);
6812 if (flag_code == CODE_64BIT)
6814 if (!override)
6816 bigdisp.bitfield.disp32s = 1;
6817 bigdisp.bitfield.disp64 = 1;
6820 else if ((flag_code == CODE_16BIT) ^ override)
6822 bigdisp.bitfield.disp32 = 0;
6823 bigdisp.bitfield.disp16 = 1;
6826 else
6828 /* For PC-relative branches, the width of the displacement
6829 is dependent upon data size, not address size. */
6830 override = (i.prefix[DATA_PREFIX] != 0);
6831 if (flag_code == CODE_64BIT)
6833 if (override || i.suffix == WORD_MNEM_SUFFIX)
6834 bigdisp.bitfield.disp16 = 1;
6835 else
6837 bigdisp.bitfield.disp32 = 1;
6838 bigdisp.bitfield.disp32s = 1;
6841 else
6843 if (!override)
6844 override = (i.suffix == (flag_code != CODE_16BIT
6845 ? WORD_MNEM_SUFFIX
6846 : LONG_MNEM_SUFFIX));
6847 bigdisp.bitfield.disp32 = 1;
6848 if ((flag_code == CODE_16BIT) ^ override)
6850 bigdisp.bitfield.disp32 = 0;
6851 bigdisp.bitfield.disp16 = 1;
6855 i.types[this_operand] = operand_type_or (i.types[this_operand],
6856 bigdisp);
6858 exp = &disp_expressions[i.disp_operands];
6859 i.op[this_operand].disps = exp;
6860 i.disp_operands++;
6861 save_input_line_pointer = input_line_pointer;
6862 input_line_pointer = disp_start;
6863 END_STRING_AND_SAVE (disp_end);
6865 #ifndef GCC_ASM_O_HACK
6866 #define GCC_ASM_O_HACK 0
6867 #endif
6868 #if GCC_ASM_O_HACK
6869 END_STRING_AND_SAVE (disp_end + 1);
6870 if (i.types[this_operand].bitfield.baseIndex
6871 && displacement_string_end[-1] == '+')
6873 /* This hack is to avoid a warning when using the "o"
6874 constraint within gcc asm statements.
6875 For instance:
6877 #define _set_tssldt_desc(n,addr,limit,type) \
6878 __asm__ __volatile__ ( \
6879 "movw %w2,%0\n\t" \
6880 "movw %w1,2+%0\n\t" \
6881 "rorl $16,%1\n\t" \
6882 "movb %b1,4+%0\n\t" \
6883 "movb %4,5+%0\n\t" \
6884 "movb $0,6+%0\n\t" \
6885 "movb %h1,7+%0\n\t" \
6886 "rorl $16,%1" \
6887 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6889 This works great except that the output assembler ends
6890 up looking a bit weird if it turns out that there is
6891 no offset. You end up producing code that looks like:
6893 #APP
6894 movw $235,(%eax)
6895 movw %dx,2+(%eax)
6896 rorl $16,%edx
6897 movb %dl,4+(%eax)
6898 movb $137,5+(%eax)
6899 movb $0,6+(%eax)
6900 movb %dh,7+(%eax)
6901 rorl $16,%edx
6902 #NO_APP
6904 So here we provide the missing zero. */
6906 *displacement_string_end = '0';
6908 #endif
6909 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6910 if (gotfree_input_line)
6911 input_line_pointer = gotfree_input_line;
6913 exp_seg = expression (exp);
6915 SKIP_WHITESPACE ();
6916 if (*input_line_pointer)
6917 as_bad (_("junk `%s' after expression"), input_line_pointer);
6918 #if GCC_ASM_O_HACK
6919 RESTORE_END_STRING (disp_end + 1);
6920 #endif
6921 input_line_pointer = save_input_line_pointer;
6922 if (gotfree_input_line)
6924 free (gotfree_input_line);
6926 if (exp->X_op == O_constant || exp->X_op == O_register)
6927 exp->X_op = O_illegal;
6930 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6932 RESTORE_END_STRING (disp_end);
6934 return ret;
6937 static int
6938 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6939 i386_operand_type types, const char *disp_start)
6941 i386_operand_type bigdisp;
6942 int ret = 1;
6944 /* We do this to make sure that the section symbol is in
6945 the symbol table. We will ultimately change the relocation
6946 to be relative to the beginning of the section. */
6947 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6948 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6949 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6951 if (exp->X_op != O_symbol)
6952 goto inv_disp;
6954 if (S_IS_LOCAL (exp->X_add_symbol)
6955 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6956 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6957 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6958 exp->X_op = O_subtract;
6959 exp->X_op_symbol = GOT_symbol;
6960 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6961 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6962 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6963 i.reloc[this_operand] = BFD_RELOC_64;
6964 else
6965 i.reloc[this_operand] = BFD_RELOC_32;
6968 else if (exp->X_op == O_absent
6969 || exp->X_op == O_illegal
6970 || exp->X_op == O_big)
6972 inv_disp:
6973 as_bad (_("missing or invalid displacement expression `%s'"),
6974 disp_start);
6975 ret = 0;
6978 else if (flag_code == CODE_64BIT
6979 && !i.prefix[ADDR_PREFIX]
6980 && exp->X_op == O_constant)
6982 /* Since displacement is signed extended to 64bit, don't allow
6983 disp32 and turn off disp32s if they are out of range. */
6984 i.types[this_operand].bitfield.disp32 = 0;
6985 if (!fits_in_signed_long (exp->X_add_number))
6987 i.types[this_operand].bitfield.disp32s = 0;
6988 if (i.types[this_operand].bitfield.baseindex)
6990 as_bad (_("0x%lx out range of signed 32bit displacement"),
6991 (long) exp->X_add_number);
6992 ret = 0;
6997 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6998 else if (exp->X_op != O_constant
6999 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7000 && exp_seg != absolute_section
7001 && exp_seg != text_section
7002 && exp_seg != data_section
7003 && exp_seg != bss_section
7004 && exp_seg != undefined_section
7005 && !bfd_is_com_section (exp_seg))
7007 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7008 ret = 0;
7010 #endif
7012 /* Check if this is a displacement only operand. */
7013 bigdisp = i.types[this_operand];
7014 bigdisp.bitfield.disp8 = 0;
7015 bigdisp.bitfield.disp16 = 0;
7016 bigdisp.bitfield.disp32 = 0;
7017 bigdisp.bitfield.disp32s = 0;
7018 bigdisp.bitfield.disp64 = 0;
7019 if (operand_type_all_zero (&bigdisp))
7020 i.types[this_operand] = operand_type_and (i.types[this_operand],
7021 types);
7023 return ret;
7026 /* Make sure the memory operand we've been dealt is valid.
7027 Return 1 on success, 0 on a failure. */
7029 static int
7030 i386_index_check (const char *operand_string)
7032 int ok;
7033 const char *kind = "base/index";
7034 #if INFER_ADDR_PREFIX
7035 int fudged = 0;
7037 tryprefix:
7038 #endif
7039 ok = 1;
7040 if (current_templates->start->opcode_modifier.isstring
7041 && !current_templates->start->opcode_modifier.immext
7042 && (current_templates->end[-1].opcode_modifier.isstring
7043 || i.mem_operands))
7045 /* Memory operands of string insns are special in that they only allow
7046 a single register (rDI, rSI, or rBX) as their memory address. */
7047 unsigned int expected;
7049 kind = "string address";
7051 if (current_templates->start->opcode_modifier.w)
7053 i386_operand_type type = current_templates->end[-1].operand_types[0];
7055 if (!type.bitfield.baseindex
7056 || ((!i.mem_operands != !intel_syntax)
7057 && current_templates->end[-1].operand_types[1]
7058 .bitfield.baseindex))
7059 type = current_templates->end[-1].operand_types[1];
7060 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7062 else
7063 expected = 3 /* rBX */;
7065 if (!i.base_reg || i.index_reg
7066 || operand_type_check (i.types[this_operand], disp))
7067 ok = -1;
7068 else if (!(flag_code == CODE_64BIT
7069 ? i.prefix[ADDR_PREFIX]
7070 ? i.base_reg->reg_type.bitfield.reg32
7071 : i.base_reg->reg_type.bitfield.reg64
7072 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7073 ? i.base_reg->reg_type.bitfield.reg32
7074 : i.base_reg->reg_type.bitfield.reg16))
7075 ok = 0;
7076 else if (i.base_reg->reg_num != expected)
7077 ok = -1;
7079 if (ok < 0)
7081 unsigned int j;
7083 for (j = 0; j < i386_regtab_size; ++j)
7084 if ((flag_code == CODE_64BIT
7085 ? i.prefix[ADDR_PREFIX]
7086 ? i386_regtab[j].reg_type.bitfield.reg32
7087 : i386_regtab[j].reg_type.bitfield.reg64
7088 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7089 ? i386_regtab[j].reg_type.bitfield.reg32
7090 : i386_regtab[j].reg_type.bitfield.reg16)
7091 && i386_regtab[j].reg_num == expected)
7092 break;
7093 gas_assert (j < i386_regtab_size);
7094 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7095 operand_string,
7096 intel_syntax ? '[' : '(',
7097 register_prefix,
7098 i386_regtab[j].reg_name,
7099 intel_syntax ? ']' : ')');
7100 ok = 1;
7103 else if (flag_code == CODE_64BIT)
7105 if ((i.base_reg
7106 && ((i.prefix[ADDR_PREFIX] == 0
7107 && !i.base_reg->reg_type.bitfield.reg64)
7108 || (i.prefix[ADDR_PREFIX]
7109 && !i.base_reg->reg_type.bitfield.reg32))
7110 && (i.index_reg
7111 || i.base_reg->reg_num !=
7112 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7113 || (i.index_reg
7114 && (!i.index_reg->reg_type.bitfield.baseindex
7115 || (i.prefix[ADDR_PREFIX] == 0
7116 && i.index_reg->reg_num != RegRiz
7117 && !i.index_reg->reg_type.bitfield.reg64
7119 || (i.prefix[ADDR_PREFIX]
7120 && i.index_reg->reg_num != RegEiz
7121 && !i.index_reg->reg_type.bitfield.reg32))))
7122 ok = 0;
7124 else
7126 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7128 /* 16bit checks. */
7129 if ((i.base_reg
7130 && (!i.base_reg->reg_type.bitfield.reg16
7131 || !i.base_reg->reg_type.bitfield.baseindex))
7132 || (i.index_reg
7133 && (!i.index_reg->reg_type.bitfield.reg16
7134 || !i.index_reg->reg_type.bitfield.baseindex
7135 || !(i.base_reg
7136 && i.base_reg->reg_num < 6
7137 && i.index_reg->reg_num >= 6
7138 && i.log2_scale_factor == 0))))
7139 ok = 0;
7141 else
7143 /* 32bit checks. */
7144 if ((i.base_reg
7145 && !i.base_reg->reg_type.bitfield.reg32)
7146 || (i.index_reg
7147 && ((!i.index_reg->reg_type.bitfield.reg32
7148 && i.index_reg->reg_num != RegEiz)
7149 || !i.index_reg->reg_type.bitfield.baseindex)))
7150 ok = 0;
7153 if (!ok)
7155 #if INFER_ADDR_PREFIX
7156 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7158 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7159 i.prefixes += 1;
7160 /* Change the size of any displacement too. At most one of
7161 Disp16 or Disp32 is set.
7162 FIXME. There doesn't seem to be any real need for separate
7163 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7164 Removing them would probably clean up the code quite a lot. */
7165 if (flag_code != CODE_64BIT
7166 && (i.types[this_operand].bitfield.disp16
7167 || i.types[this_operand].bitfield.disp32))
7168 i.types[this_operand]
7169 = operand_type_xor (i.types[this_operand], disp16_32);
7170 fudged = 1;
7171 goto tryprefix;
7173 if (fudged)
7174 as_bad (_("`%s' is not a valid %s expression"),
7175 operand_string,
7176 kind);
7177 else
7178 #endif
7179 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7180 operand_string,
7181 flag_code_names[i.prefix[ADDR_PREFIX]
7182 ? flag_code == CODE_32BIT
7183 ? CODE_16BIT
7184 : CODE_32BIT
7185 : flag_code],
7186 kind);
7188 return ok;
7191 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7192 on error. */
7194 static int
7195 i386_att_operand (char *operand_string)
7197 const reg_entry *r;
7198 char *end_op;
7199 char *op_string = operand_string;
7201 if (is_space_char (*op_string))
7202 ++op_string;
7204 /* We check for an absolute prefix (differentiating,
7205 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7206 if (*op_string == ABSOLUTE_PREFIX)
7208 ++op_string;
7209 if (is_space_char (*op_string))
7210 ++op_string;
7211 i.types[this_operand].bitfield.jumpabsolute = 1;
7214 /* Check if operand is a register. */
7215 if ((r = parse_register (op_string, &end_op)) != NULL)
7217 i386_operand_type temp;
7219 /* Check for a segment override by searching for ':' after a
7220 segment register. */
7221 op_string = end_op;
7222 if (is_space_char (*op_string))
7223 ++op_string;
7224 if (*op_string == ':'
7225 && (r->reg_type.bitfield.sreg2
7226 || r->reg_type.bitfield.sreg3))
7228 switch (r->reg_num)
7230 case 0:
7231 i.seg[i.mem_operands] = &es;
7232 break;
7233 case 1:
7234 i.seg[i.mem_operands] = &cs;
7235 break;
7236 case 2:
7237 i.seg[i.mem_operands] = &ss;
7238 break;
7239 case 3:
7240 i.seg[i.mem_operands] = &ds;
7241 break;
7242 case 4:
7243 i.seg[i.mem_operands] = &fs;
7244 break;
7245 case 5:
7246 i.seg[i.mem_operands] = &gs;
7247 break;
7250 /* Skip the ':' and whitespace. */
7251 ++op_string;
7252 if (is_space_char (*op_string))
7253 ++op_string;
7255 if (!is_digit_char (*op_string)
7256 && !is_identifier_char (*op_string)
7257 && *op_string != '('
7258 && *op_string != ABSOLUTE_PREFIX)
7260 as_bad (_("bad memory operand `%s'"), op_string);
7261 return 0;
7263 /* Handle case of %es:*foo. */
7264 if (*op_string == ABSOLUTE_PREFIX)
7266 ++op_string;
7267 if (is_space_char (*op_string))
7268 ++op_string;
7269 i.types[this_operand].bitfield.jumpabsolute = 1;
7271 goto do_memory_reference;
7273 if (*op_string)
7275 as_bad (_("junk `%s' after register"), op_string);
7276 return 0;
7278 temp = r->reg_type;
7279 temp.bitfield.baseindex = 0;
7280 i.types[this_operand] = operand_type_or (i.types[this_operand],
7281 temp);
7282 i.types[this_operand].bitfield.unspecified = 0;
7283 i.op[this_operand].regs = r;
7284 i.reg_operands++;
7286 else if (*op_string == REGISTER_PREFIX)
7288 as_bad (_("bad register name `%s'"), op_string);
7289 return 0;
7291 else if (*op_string == IMMEDIATE_PREFIX)
7293 ++op_string;
7294 if (i.types[this_operand].bitfield.jumpabsolute)
7296 as_bad (_("immediate operand illegal with absolute jump"));
7297 return 0;
7299 if (!i386_immediate (op_string))
7300 return 0;
7302 else if (is_digit_char (*op_string)
7303 || is_identifier_char (*op_string)
7304 || *op_string == '(')
7306 /* This is a memory reference of some sort. */
7307 char *base_string;
7309 /* Start and end of displacement string expression (if found). */
7310 char *displacement_string_start;
7311 char *displacement_string_end;
7313 do_memory_reference:
7314 if ((i.mem_operands == 1
7315 && !current_templates->start->opcode_modifier.isstring)
7316 || i.mem_operands == 2)
7318 as_bad (_("too many memory references for `%s'"),
7319 current_templates->start->name);
7320 return 0;
7323 /* Check for base index form. We detect the base index form by
7324 looking for an ')' at the end of the operand, searching
7325 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7326 after the '('. */
7327 base_string = op_string + strlen (op_string);
7329 --base_string;
7330 if (is_space_char (*base_string))
7331 --base_string;
7333 /* If we only have a displacement, set-up for it to be parsed later. */
7334 displacement_string_start = op_string;
7335 displacement_string_end = base_string + 1;
7337 if (*base_string == ')')
7339 char *temp_string;
7340 unsigned int parens_balanced = 1;
7341 /* We've already checked that the number of left & right ()'s are
7342 equal, so this loop will not be infinite. */
7345 base_string--;
7346 if (*base_string == ')')
7347 parens_balanced++;
7348 if (*base_string == '(')
7349 parens_balanced--;
7351 while (parens_balanced);
7353 temp_string = base_string;
7355 /* Skip past '(' and whitespace. */
7356 ++base_string;
7357 if (is_space_char (*base_string))
7358 ++base_string;
7360 if (*base_string == ','
7361 || ((i.base_reg = parse_register (base_string, &end_op))
7362 != NULL))
7364 displacement_string_end = temp_string;
7366 i.types[this_operand].bitfield.baseindex = 1;
7368 if (i.base_reg)
7370 base_string = end_op;
7371 if (is_space_char (*base_string))
7372 ++base_string;
7375 /* There may be an index reg or scale factor here. */
7376 if (*base_string == ',')
7378 ++base_string;
7379 if (is_space_char (*base_string))
7380 ++base_string;
7382 if ((i.index_reg = parse_register (base_string, &end_op))
7383 != NULL)
7385 base_string = end_op;
7386 if (is_space_char (*base_string))
7387 ++base_string;
7388 if (*base_string == ',')
7390 ++base_string;
7391 if (is_space_char (*base_string))
7392 ++base_string;
7394 else if (*base_string != ')')
7396 as_bad (_("expecting `,' or `)' "
7397 "after index register in `%s'"),
7398 operand_string);
7399 return 0;
7402 else if (*base_string == REGISTER_PREFIX)
7404 as_bad (_("bad register name `%s'"), base_string);
7405 return 0;
7408 /* Check for scale factor. */
7409 if (*base_string != ')')
7411 char *end_scale = i386_scale (base_string);
7413 if (!end_scale)
7414 return 0;
7416 base_string = end_scale;
7417 if (is_space_char (*base_string))
7418 ++base_string;
7419 if (*base_string != ')')
7421 as_bad (_("expecting `)' "
7422 "after scale factor in `%s'"),
7423 operand_string);
7424 return 0;
7427 else if (!i.index_reg)
7429 as_bad (_("expecting index register or scale factor "
7430 "after `,'; got '%c'"),
7431 *base_string);
7432 return 0;
7435 else if (*base_string != ')')
7437 as_bad (_("expecting `,' or `)' "
7438 "after base register in `%s'"),
7439 operand_string);
7440 return 0;
7443 else if (*base_string == REGISTER_PREFIX)
7445 as_bad (_("bad register name `%s'"), base_string);
7446 return 0;
7450 /* If there's an expression beginning the operand, parse it,
7451 assuming displacement_string_start and
7452 displacement_string_end are meaningful. */
7453 if (displacement_string_start != displacement_string_end)
7455 if (!i386_displacement (displacement_string_start,
7456 displacement_string_end))
7457 return 0;
7460 /* Special case for (%dx) while doing input/output op. */
7461 if (i.base_reg
7462 && operand_type_equal (&i.base_reg->reg_type,
7463 &reg16_inoutportreg)
7464 && i.index_reg == 0
7465 && i.log2_scale_factor == 0
7466 && i.seg[i.mem_operands] == 0
7467 && !operand_type_check (i.types[this_operand], disp))
7469 i.types[this_operand] = inoutportreg;
7470 return 1;
7473 if (i386_index_check (operand_string) == 0)
7474 return 0;
7475 i.types[this_operand].bitfield.mem = 1;
7476 i.mem_operands++;
7478 else
7480 /* It's not a memory operand; argh! */
7481 as_bad (_("invalid char %s beginning operand %d `%s'"),
7482 output_invalid (*op_string),
7483 this_operand + 1,
7484 op_string);
7485 return 0;
7487 return 1; /* Normal return. */
7490 /* md_estimate_size_before_relax()
7492 Called just before relax() for rs_machine_dependent frags. The x86
7493 assembler uses these frags to handle variable size jump
7494 instructions.
7496 Any symbol that is now undefined will not become defined.
7497 Return the correct fr_subtype in the frag.
7498 Return the initial "guess for variable size of frag" to caller.
7499 The guess is actually the growth beyond the fixed part. Whatever
7500 we do to grow the fixed or variable part contributes to our
7501 returned value. */
7504 md_estimate_size_before_relax (fragP, segment)
7505 fragS *fragP;
7506 segT segment;
7508 /* We've already got fragP->fr_subtype right; all we have to do is
7509 check for un-relaxable symbols. On an ELF system, we can't relax
7510 an externally visible symbol, because it may be overridden by a
7511 shared library. */
7512 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7513 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7514 || (IS_ELF
7515 && (S_IS_EXTERNAL (fragP->fr_symbol)
7516 || S_IS_WEAK (fragP->fr_symbol)
7517 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7518 & BSF_GNU_INDIRECT_FUNCTION))))
7519 #endif
7520 #if defined (OBJ_COFF) && defined (TE_PE)
7521 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7522 && S_IS_WEAK (fragP->fr_symbol))
7523 #endif
7526 /* Symbol is undefined in this segment, or we need to keep a
7527 reloc so that weak symbols can be overridden. */
7528 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7529 enum bfd_reloc_code_real reloc_type;
7530 unsigned char *opcode;
7531 int old_fr_fix;
7533 if (fragP->fr_var != NO_RELOC)
7534 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7535 else if (size == 2)
7536 reloc_type = BFD_RELOC_16_PCREL;
7537 else
7538 reloc_type = BFD_RELOC_32_PCREL;
7540 old_fr_fix = fragP->fr_fix;
7541 opcode = (unsigned char *) fragP->fr_opcode;
7543 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7545 case UNCOND_JUMP:
7546 /* Make jmp (0xeb) a (d)word displacement jump. */
7547 opcode[0] = 0xe9;
7548 fragP->fr_fix += size;
7549 fix_new (fragP, old_fr_fix, size,
7550 fragP->fr_symbol,
7551 fragP->fr_offset, 1,
7552 reloc_type);
7553 break;
7555 case COND_JUMP86:
7556 if (size == 2
7557 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7559 /* Negate the condition, and branch past an
7560 unconditional jump. */
7561 opcode[0] ^= 1;
7562 opcode[1] = 3;
7563 /* Insert an unconditional jump. */
7564 opcode[2] = 0xe9;
7565 /* We added two extra opcode bytes, and have a two byte
7566 offset. */
7567 fragP->fr_fix += 2 + 2;
7568 fix_new (fragP, old_fr_fix + 2, 2,
7569 fragP->fr_symbol,
7570 fragP->fr_offset, 1,
7571 reloc_type);
7572 break;
7574 /* Fall through. */
7576 case COND_JUMP:
7577 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7579 fixS *fixP;
7581 fragP->fr_fix += 1;
7582 fixP = fix_new (fragP, old_fr_fix, 1,
7583 fragP->fr_symbol,
7584 fragP->fr_offset, 1,
7585 BFD_RELOC_8_PCREL);
7586 fixP->fx_signed = 1;
7587 break;
7590 /* This changes the byte-displacement jump 0x7N
7591 to the (d)word-displacement jump 0x0f,0x8N. */
7592 opcode[1] = opcode[0] + 0x10;
7593 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7594 /* We've added an opcode byte. */
7595 fragP->fr_fix += 1 + size;
7596 fix_new (fragP, old_fr_fix + 1, size,
7597 fragP->fr_symbol,
7598 fragP->fr_offset, 1,
7599 reloc_type);
7600 break;
7602 default:
7603 BAD_CASE (fragP->fr_subtype);
7604 break;
7606 frag_wane (fragP);
7607 return fragP->fr_fix - old_fr_fix;
7610 /* Guess size depending on current relax state. Initially the relax
7611 state will correspond to a short jump and we return 1, because
7612 the variable part of the frag (the branch offset) is one byte
7613 long. However, we can relax a section more than once and in that
7614 case we must either set fr_subtype back to the unrelaxed state,
7615 or return the value for the appropriate branch. */
7616 return md_relax_table[fragP->fr_subtype].rlx_length;
7619 /* Called after relax() is finished.
7621 In: Address of frag.
7622 fr_type == rs_machine_dependent.
7623 fr_subtype is what the address relaxed to.
7625 Out: Any fixSs and constants are set up.
7626 Caller will turn frag into a ".space 0". */
7628 void
7629 md_convert_frag (abfd, sec, fragP)
7630 bfd *abfd ATTRIBUTE_UNUSED;
7631 segT sec ATTRIBUTE_UNUSED;
7632 fragS *fragP;
7634 unsigned char *opcode;
7635 unsigned char *where_to_put_displacement = NULL;
7636 offsetT target_address;
7637 offsetT opcode_address;
7638 unsigned int extension = 0;
7639 offsetT displacement_from_opcode_start;
7641 opcode = (unsigned char *) fragP->fr_opcode;
7643 /* Address we want to reach in file space. */
7644 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7646 /* Address opcode resides at in file space. */
7647 opcode_address = fragP->fr_address + fragP->fr_fix;
7649 /* Displacement from opcode start to fill into instruction. */
7650 displacement_from_opcode_start = target_address - opcode_address;
7652 if ((fragP->fr_subtype & BIG) == 0)
7654 /* Don't have to change opcode. */
7655 extension = 1; /* 1 opcode + 1 displacement */
7656 where_to_put_displacement = &opcode[1];
7658 else
7660 if (no_cond_jump_promotion
7661 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7662 as_warn_where (fragP->fr_file, fragP->fr_line,
7663 _("long jump required"));
7665 switch (fragP->fr_subtype)
7667 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7668 extension = 4; /* 1 opcode + 4 displacement */
7669 opcode[0] = 0xe9;
7670 where_to_put_displacement = &opcode[1];
7671 break;
7673 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7674 extension = 2; /* 1 opcode + 2 displacement */
7675 opcode[0] = 0xe9;
7676 where_to_put_displacement = &opcode[1];
7677 break;
7679 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7680 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7681 extension = 5; /* 2 opcode + 4 displacement */
7682 opcode[1] = opcode[0] + 0x10;
7683 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7684 where_to_put_displacement = &opcode[2];
7685 break;
7687 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7688 extension = 3; /* 2 opcode + 2 displacement */
7689 opcode[1] = opcode[0] + 0x10;
7690 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7691 where_to_put_displacement = &opcode[2];
7692 break;
7694 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7695 extension = 4;
7696 opcode[0] ^= 1;
7697 opcode[1] = 3;
7698 opcode[2] = 0xe9;
7699 where_to_put_displacement = &opcode[3];
7700 break;
7702 default:
7703 BAD_CASE (fragP->fr_subtype);
7704 break;
7708 /* If size if less then four we are sure that the operand fits,
7709 but if it's 4, then it could be that the displacement is larger
7710 then -/+ 2GB. */
7711 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7712 && object_64bit
7713 && ((addressT) (displacement_from_opcode_start - extension
7714 + ((addressT) 1 << 31))
7715 > (((addressT) 2 << 31) - 1)))
7717 as_bad_where (fragP->fr_file, fragP->fr_line,
7718 _("jump target out of range"));
7719 /* Make us emit 0. */
7720 displacement_from_opcode_start = extension;
7722 /* Now put displacement after opcode. */
7723 md_number_to_chars ((char *) where_to_put_displacement,
7724 (valueT) (displacement_from_opcode_start - extension),
7725 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7726 fragP->fr_fix += extension;
7729 /* Apply a fixup (fixS) to segment data, once it has been determined
7730 by our caller that we have all the info we need to fix it up.
7732 On the 386, immediates, displacements, and data pointers are all in
7733 the same (little-endian) format, so we don't need to care about which
7734 we are handling. */
7736 void
7737 md_apply_fix (fixP, valP, seg)
7738 /* The fix we're to put in. */
7739 fixS *fixP;
7740 /* Pointer to the value of the bits. */
7741 valueT *valP;
7742 /* Segment fix is from. */
7743 segT seg ATTRIBUTE_UNUSED;
7745 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7746 valueT value = *valP;
7748 #if !defined (TE_Mach)
7749 if (fixP->fx_pcrel)
7751 switch (fixP->fx_r_type)
7753 default:
7754 break;
7756 case BFD_RELOC_64:
7757 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7758 break;
7759 case BFD_RELOC_32:
7760 case BFD_RELOC_X86_64_32S:
7761 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7762 break;
7763 case BFD_RELOC_16:
7764 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7765 break;
7766 case BFD_RELOC_8:
7767 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7768 break;
7772 if (fixP->fx_addsy != NULL
7773 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7774 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7775 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7776 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7777 && !use_rela_relocations)
7779 /* This is a hack. There should be a better way to handle this.
7780 This covers for the fact that bfd_install_relocation will
7781 subtract the current location (for partial_inplace, PC relative
7782 relocations); see more below. */
7783 #ifndef OBJ_AOUT
7784 if (IS_ELF
7785 #ifdef TE_PE
7786 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7787 #endif
7789 value += fixP->fx_where + fixP->fx_frag->fr_address;
7790 #endif
7791 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7792 if (IS_ELF)
7794 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7796 if ((sym_seg == seg
7797 || (symbol_section_p (fixP->fx_addsy)
7798 && sym_seg != absolute_section))
7799 && !generic_force_reloc (fixP))
7801 /* Yes, we add the values in twice. This is because
7802 bfd_install_relocation subtracts them out again. I think
7803 bfd_install_relocation is broken, but I don't dare change
7804 it. FIXME. */
7805 value += fixP->fx_where + fixP->fx_frag->fr_address;
7808 #endif
7809 #if defined (OBJ_COFF) && defined (TE_PE)
7810 /* For some reason, the PE format does not store a
7811 section address offset for a PC relative symbol. */
7812 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7813 || S_IS_WEAK (fixP->fx_addsy))
7814 value += md_pcrel_from (fixP);
7815 #endif
7817 #if defined (OBJ_COFF) && defined (TE_PE)
7818 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7820 value -= S_GET_VALUE (fixP->fx_addsy);
7822 #endif
7824 /* Fix a few things - the dynamic linker expects certain values here,
7825 and we must not disappoint it. */
7826 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7827 if (IS_ELF && fixP->fx_addsy)
7828 switch (fixP->fx_r_type)
7830 case BFD_RELOC_386_PLT32:
7831 case BFD_RELOC_X86_64_PLT32:
7832 /* Make the jump instruction point to the address of the operand. At
7833 runtime we merely add the offset to the actual PLT entry. */
7834 value = -4;
7835 break;
7837 case BFD_RELOC_386_TLS_GD:
7838 case BFD_RELOC_386_TLS_LDM:
7839 case BFD_RELOC_386_TLS_IE_32:
7840 case BFD_RELOC_386_TLS_IE:
7841 case BFD_RELOC_386_TLS_GOTIE:
7842 case BFD_RELOC_386_TLS_GOTDESC:
7843 case BFD_RELOC_X86_64_TLSGD:
7844 case BFD_RELOC_X86_64_TLSLD:
7845 case BFD_RELOC_X86_64_GOTTPOFF:
7846 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7847 value = 0; /* Fully resolved at runtime. No addend. */
7848 /* Fallthrough */
7849 case BFD_RELOC_386_TLS_LE:
7850 case BFD_RELOC_386_TLS_LDO_32:
7851 case BFD_RELOC_386_TLS_LE_32:
7852 case BFD_RELOC_X86_64_DTPOFF32:
7853 case BFD_RELOC_X86_64_DTPOFF64:
7854 case BFD_RELOC_X86_64_TPOFF32:
7855 case BFD_RELOC_X86_64_TPOFF64:
7856 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7857 break;
7859 case BFD_RELOC_386_TLS_DESC_CALL:
7860 case BFD_RELOC_X86_64_TLSDESC_CALL:
7861 value = 0; /* Fully resolved at runtime. No addend. */
7862 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7863 fixP->fx_done = 0;
7864 return;
7866 case BFD_RELOC_386_GOT32:
7867 case BFD_RELOC_X86_64_GOT32:
7868 value = 0; /* Fully resolved at runtime. No addend. */
7869 break;
7871 case BFD_RELOC_VTABLE_INHERIT:
7872 case BFD_RELOC_VTABLE_ENTRY:
7873 fixP->fx_done = 0;
7874 return;
7876 default:
7877 break;
7879 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7880 *valP = value;
7881 #endif /* !defined (TE_Mach) */
7883 /* Are we finished with this relocation now? */
7884 if (fixP->fx_addsy == NULL)
7885 fixP->fx_done = 1;
7886 #if defined (OBJ_COFF) && defined (TE_PE)
7887 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7889 fixP->fx_done = 0;
7890 /* Remember value for tc_gen_reloc. */
7891 fixP->fx_addnumber = value;
7892 /* Clear out the frag for now. */
7893 value = 0;
7895 #endif
7896 else if (use_rela_relocations)
7898 fixP->fx_no_overflow = 1;
7899 /* Remember value for tc_gen_reloc. */
7900 fixP->fx_addnumber = value;
7901 value = 0;
7904 md_number_to_chars (p, value, fixP->fx_size);
7907 char *
7908 md_atof (int type, char *litP, int *sizeP)
7910 /* This outputs the LITTLENUMs in REVERSE order;
7911 in accord with the bigendian 386. */
7912 return ieee_md_atof (type, litP, sizeP, FALSE);
7915 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7917 static char *
7918 output_invalid (int c)
7920 if (ISPRINT (c))
7921 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7922 "'%c'", c);
7923 else
7924 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7925 "(0x%x)", (unsigned char) c);
7926 return output_invalid_buf;
7929 /* REG_STRING starts *before* REGISTER_PREFIX. */
7931 static const reg_entry *
7932 parse_real_register (char *reg_string, char **end_op)
7934 char *s = reg_string;
7935 char *p;
7936 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7937 const reg_entry *r;
7939 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7940 if (*s == REGISTER_PREFIX)
7941 ++s;
7943 if (is_space_char (*s))
7944 ++s;
7946 p = reg_name_given;
7947 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7949 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7950 return (const reg_entry *) NULL;
7951 s++;
7954 /* For naked regs, make sure that we are not dealing with an identifier.
7955 This prevents confusing an identifier like `eax_var' with register
7956 `eax'. */
7957 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7958 return (const reg_entry *) NULL;
7960 *end_op = s;
7962 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7964 /* Handle floating point regs, allowing spaces in the (i) part. */
7965 if (r == i386_regtab /* %st is first entry of table */)
7967 if (is_space_char (*s))
7968 ++s;
7969 if (*s == '(')
7971 ++s;
7972 if (is_space_char (*s))
7973 ++s;
7974 if (*s >= '0' && *s <= '7')
7976 int fpr = *s - '0';
7977 ++s;
7978 if (is_space_char (*s))
7979 ++s;
7980 if (*s == ')')
7982 *end_op = s + 1;
7983 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7984 know (r);
7985 return r + fpr;
7988 /* We have "%st(" then garbage. */
7989 return (const reg_entry *) NULL;
7993 if (r == NULL || allow_pseudo_reg)
7994 return r;
7996 if (operand_type_all_zero (&r->reg_type))
7997 return (const reg_entry *) NULL;
7999 if ((r->reg_type.bitfield.reg32
8000 || r->reg_type.bitfield.sreg3
8001 || r->reg_type.bitfield.control
8002 || r->reg_type.bitfield.debug
8003 || r->reg_type.bitfield.test)
8004 && !cpu_arch_flags.bitfield.cpui386)
8005 return (const reg_entry *) NULL;
8007 if (r->reg_type.bitfield.floatreg
8008 && !cpu_arch_flags.bitfield.cpu8087
8009 && !cpu_arch_flags.bitfield.cpu287
8010 && !cpu_arch_flags.bitfield.cpu387)
8011 return (const reg_entry *) NULL;
8013 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8014 return (const reg_entry *) NULL;
8016 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8017 return (const reg_entry *) NULL;
8019 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8020 return (const reg_entry *) NULL;
8022 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8023 if (!allow_index_reg
8024 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8025 return (const reg_entry *) NULL;
8027 if (((r->reg_flags & (RegRex64 | RegRex))
8028 || r->reg_type.bitfield.reg64)
8029 && (!cpu_arch_flags.bitfield.cpulm
8030 || !operand_type_equal (&r->reg_type, &control))
8031 && flag_code != CODE_64BIT)
8032 return (const reg_entry *) NULL;
8034 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8035 return (const reg_entry *) NULL;
8037 return r;
8040 /* REG_STRING starts *before* REGISTER_PREFIX. */
8042 static const reg_entry *
8043 parse_register (char *reg_string, char **end_op)
8045 const reg_entry *r;
8047 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8048 r = parse_real_register (reg_string, end_op);
8049 else
8050 r = NULL;
8051 if (!r)
8053 char *save = input_line_pointer;
8054 char c;
8055 symbolS *symbolP;
8057 input_line_pointer = reg_string;
8058 c = get_symbol_end ();
8059 symbolP = symbol_find (reg_string);
8060 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8062 const expressionS *e = symbol_get_value_expression (symbolP);
8064 know (e->X_op == O_register);
8065 know (e->X_add_number >= 0
8066 && (valueT) e->X_add_number < i386_regtab_size);
8067 r = i386_regtab + e->X_add_number;
8068 *end_op = input_line_pointer;
8070 *input_line_pointer = c;
8071 input_line_pointer = save;
8073 return r;
8077 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8079 const reg_entry *r;
8080 char *end = input_line_pointer;
8082 *end = *nextcharP;
8083 r = parse_register (name, &input_line_pointer);
8084 if (r && end <= input_line_pointer)
8086 *nextcharP = *input_line_pointer;
8087 *input_line_pointer = 0;
8088 e->X_op = O_register;
8089 e->X_add_number = r - i386_regtab;
8090 return 1;
8092 input_line_pointer = end;
8093 *end = 0;
8094 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8097 void
8098 md_operand (expressionS *e)
8100 char *end;
8101 const reg_entry *r;
8103 switch (*input_line_pointer)
8105 case REGISTER_PREFIX:
8106 r = parse_real_register (input_line_pointer, &end);
8107 if (r)
8109 e->X_op = O_register;
8110 e->X_add_number = r - i386_regtab;
8111 input_line_pointer = end;
8113 break;
8115 case '[':
8116 gas_assert (intel_syntax);
8117 end = input_line_pointer++;
8118 expression (e);
8119 if (*input_line_pointer == ']')
8121 ++input_line_pointer;
8122 e->X_op_symbol = make_expr_symbol (e);
8123 e->X_add_symbol = NULL;
8124 e->X_add_number = 0;
8125 e->X_op = O_index;
8127 else
8129 e->X_op = O_absent;
8130 input_line_pointer = end;
8132 break;
8137 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8138 const char *md_shortopts = "kVQ:sqn";
8139 #else
8140 const char *md_shortopts = "qn";
8141 #endif
8143 #define OPTION_32 (OPTION_MD_BASE + 0)
8144 #define OPTION_64 (OPTION_MD_BASE + 1)
8145 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8146 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8147 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8148 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8149 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8150 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8151 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8152 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8153 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8154 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8155 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8156 #define OPTION_X32 (OPTION_MD_BASE + 13)
8158 struct option md_longopts[] =
8160 {"32", no_argument, NULL, OPTION_32},
8161 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8162 || defined (TE_PE) || defined (TE_PEP))
8163 {"64", no_argument, NULL, OPTION_64},
8164 #endif
8165 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8166 {"x32", no_argument, NULL, OPTION_X32},
8167 #endif
8168 {"divide", no_argument, NULL, OPTION_DIVIDE},
8169 {"march", required_argument, NULL, OPTION_MARCH},
8170 {"mtune", required_argument, NULL, OPTION_MTUNE},
8171 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8172 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8173 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8174 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8175 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8176 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8177 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8178 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8179 {NULL, no_argument, NULL, 0}
8181 size_t md_longopts_size = sizeof (md_longopts);
8184 md_parse_option (int c, char *arg)
8186 unsigned int j;
8187 char *arch, *next;
8189 switch (c)
8191 case 'n':
8192 optimize_align_code = 0;
8193 break;
8195 case 'q':
8196 quiet_warnings = 1;
8197 break;
8199 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8200 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8201 should be emitted or not. FIXME: Not implemented. */
8202 case 'Q':
8203 break;
8205 /* -V: SVR4 argument to print version ID. */
8206 case 'V':
8207 print_version_id ();
8208 break;
8210 /* -k: Ignore for FreeBSD compatibility. */
8211 case 'k':
8212 break;
8214 case 's':
8215 /* -s: On i386 Solaris, this tells the native assembler to use
8216 .stab instead of .stab.excl. We always use .stab anyhow. */
8217 break;
8218 #endif
8219 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8220 || defined (TE_PE) || defined (TE_PEP))
8221 case OPTION_64:
8223 const char **list, **l;
8225 list = bfd_target_list ();
8226 for (l = list; *l != NULL; l++)
8227 if (CONST_STRNEQ (*l, "elf64-x86-64")
8228 || strcmp (*l, "coff-x86-64") == 0
8229 || strcmp (*l, "pe-x86-64") == 0
8230 || strcmp (*l, "pei-x86-64") == 0)
8232 default_arch = "x86_64";
8233 break;
8235 if (*l == NULL)
8236 as_fatal (_("No compiled in support for x86_64"));
8237 free (list);
8239 break;
8240 #endif
8242 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8243 case OPTION_X32:
8244 if (IS_ELF)
8246 const char **list, **l;
8248 list = bfd_target_list ();
8249 for (l = list; *l != NULL; l++)
8250 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8252 default_arch = "x86_64:32";
8253 break;
8255 if (*l == NULL)
8256 as_fatal (_("No compiled in support for 32bit x86_64"));
8257 free (list);
8259 else
8260 as_fatal (_("32bit x86_64 is only supported for ELF"));
8261 break;
8262 #endif
8264 case OPTION_32:
8265 default_arch = "i386";
8266 break;
8268 case OPTION_DIVIDE:
8269 #ifdef SVR4_COMMENT_CHARS
8271 char *n, *t;
8272 const char *s;
8274 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8275 t = n;
8276 for (s = i386_comment_chars; *s != '\0'; s++)
8277 if (*s != '/')
8278 *t++ = *s;
8279 *t = '\0';
8280 i386_comment_chars = n;
8282 #endif
8283 break;
8285 case OPTION_MARCH:
8286 arch = xstrdup (arg);
8289 if (*arch == '.')
8290 as_fatal (_("Invalid -march= option: `%s'"), arg);
8291 next = strchr (arch, '+');
8292 if (next)
8293 *next++ = '\0';
8294 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8296 if (strcmp (arch, cpu_arch [j].name) == 0)
8298 /* Processor. */
8299 if (! cpu_arch[j].flags.bitfield.cpui386)
8300 continue;
8302 cpu_arch_name = cpu_arch[j].name;
8303 cpu_sub_arch_name = NULL;
8304 cpu_arch_flags = cpu_arch[j].flags;
8305 cpu_arch_isa = cpu_arch[j].type;
8306 cpu_arch_isa_flags = cpu_arch[j].flags;
8307 if (!cpu_arch_tune_set)
8309 cpu_arch_tune = cpu_arch_isa;
8310 cpu_arch_tune_flags = cpu_arch_isa_flags;
8312 break;
8314 else if (*cpu_arch [j].name == '.'
8315 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8317 /* ISA entension. */
8318 i386_cpu_flags flags;
8320 if (!cpu_arch[j].negated)
8321 flags = cpu_flags_or (cpu_arch_flags,
8322 cpu_arch[j].flags);
8323 else
8324 flags = cpu_flags_and_not (cpu_arch_flags,
8325 cpu_arch[j].flags);
8326 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8328 if (cpu_sub_arch_name)
8330 char *name = cpu_sub_arch_name;
8331 cpu_sub_arch_name = concat (name,
8332 cpu_arch[j].name,
8333 (const char *) NULL);
8334 free (name);
8336 else
8337 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8338 cpu_arch_flags = flags;
8340 break;
8344 if (j >= ARRAY_SIZE (cpu_arch))
8345 as_fatal (_("Invalid -march= option: `%s'"), arg);
8347 arch = next;
8349 while (next != NULL );
8350 break;
8352 case OPTION_MTUNE:
8353 if (*arg == '.')
8354 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8355 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8357 if (strcmp (arg, cpu_arch [j].name) == 0)
8359 cpu_arch_tune_set = 1;
8360 cpu_arch_tune = cpu_arch [j].type;
8361 cpu_arch_tune_flags = cpu_arch[j].flags;
8362 break;
8365 if (j >= ARRAY_SIZE (cpu_arch))
8366 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8367 break;
8369 case OPTION_MMNEMONIC:
8370 if (strcasecmp (arg, "att") == 0)
8371 intel_mnemonic = 0;
8372 else if (strcasecmp (arg, "intel") == 0)
8373 intel_mnemonic = 1;
8374 else
8375 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8376 break;
8378 case OPTION_MSYNTAX:
8379 if (strcasecmp (arg, "att") == 0)
8380 intel_syntax = 0;
8381 else if (strcasecmp (arg, "intel") == 0)
8382 intel_syntax = 1;
8383 else
8384 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8385 break;
8387 case OPTION_MINDEX_REG:
8388 allow_index_reg = 1;
8389 break;
8391 case OPTION_MNAKED_REG:
8392 allow_naked_reg = 1;
8393 break;
8395 case OPTION_MOLD_GCC:
8396 old_gcc = 1;
8397 break;
8399 case OPTION_MSSE2AVX:
8400 sse2avx = 1;
8401 break;
8403 case OPTION_MSSE_CHECK:
8404 if (strcasecmp (arg, "error") == 0)
8405 sse_check = sse_check_error;
8406 else if (strcasecmp (arg, "warning") == 0)
8407 sse_check = sse_check_warning;
8408 else if (strcasecmp (arg, "none") == 0)
8409 sse_check = sse_check_none;
8410 else
8411 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8412 break;
8414 case OPTION_MAVXSCALAR:
8415 if (strcasecmp (arg, "128") == 0)
8416 avxscalar = vex128;
8417 else if (strcasecmp (arg, "256") == 0)
8418 avxscalar = vex256;
8419 else
8420 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8421 break;
8423 default:
8424 return 0;
8426 return 1;
8429 #define MESSAGE_TEMPLATE \
8432 static void
8433 show_arch (FILE *stream, int ext, int check)
8435 static char message[] = MESSAGE_TEMPLATE;
8436 char *start = message + 27;
8437 char *p;
8438 int size = sizeof (MESSAGE_TEMPLATE);
8439 int left;
8440 const char *name;
8441 int len;
8442 unsigned int j;
8444 p = start;
8445 left = size - (start - message);
8446 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8448 /* Should it be skipped? */
8449 if (cpu_arch [j].skip)
8450 continue;
8452 name = cpu_arch [j].name;
8453 len = cpu_arch [j].len;
8454 if (*name == '.')
8456 /* It is an extension. Skip if we aren't asked to show it. */
8457 if (ext)
8459 name++;
8460 len--;
8462 else
8463 continue;
8465 else if (ext)
8467 /* It is an processor. Skip if we show only extension. */
8468 continue;
8470 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8472 /* It is an impossible processor - skip. */
8473 continue;
8476 /* Reserve 2 spaces for ", " or ",\0" */
8477 left -= len + 2;
8479 /* Check if there is any room. */
8480 if (left >= 0)
8482 if (p != start)
8484 *p++ = ',';
8485 *p++ = ' ';
8487 p = mempcpy (p, name, len);
8489 else
8491 /* Output the current message now and start a new one. */
8492 *p++ = ',';
8493 *p = '\0';
8494 fprintf (stream, "%s\n", message);
8495 p = start;
8496 left = size - (start - message) - len - 2;
8498 gas_assert (left >= 0);
8500 p = mempcpy (p, name, len);
8504 *p = '\0';
8505 fprintf (stream, "%s\n", message);
8508 void
8509 md_show_usage (FILE *stream)
8511 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8512 fprintf (stream, _("\
8513 -Q ignored\n\
8514 -V print assembler version number\n\
8515 -k ignored\n"));
8516 #endif
8517 fprintf (stream, _("\
8518 -n Do not optimize code alignment\n\
8519 -q quieten some warnings\n"));
8520 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8521 fprintf (stream, _("\
8522 -s ignored\n"));
8523 #endif
8524 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8525 || defined (TE_PE) || defined (TE_PEP))
8526 fprintf (stream, _("\
8527 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8528 #endif
8529 #ifdef SVR4_COMMENT_CHARS
8530 fprintf (stream, _("\
8531 --divide do not treat `/' as a comment character\n"));
8532 #else
8533 fprintf (stream, _("\
8534 --divide ignored\n"));
8535 #endif
8536 fprintf (stream, _("\
8537 -march=CPU[,+EXTENSION...]\n\
8538 generate code for CPU and EXTENSION, CPU is one of:\n"));
8539 show_arch (stream, 0, 1);
8540 fprintf (stream, _("\
8541 EXTENSION is combination of:\n"));
8542 show_arch (stream, 1, 0);
8543 fprintf (stream, _("\
8544 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8545 show_arch (stream, 0, 0);
8546 fprintf (stream, _("\
8547 -msse2avx encode SSE instructions with VEX prefix\n"));
8548 fprintf (stream, _("\
8549 -msse-check=[none|error|warning]\n\
8550 check SSE instructions\n"));
8551 fprintf (stream, _("\
8552 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8553 length\n"));
8554 fprintf (stream, _("\
8555 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8556 fprintf (stream, _("\
8557 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8558 fprintf (stream, _("\
8559 -mindex-reg support pseudo index registers\n"));
8560 fprintf (stream, _("\
8561 -mnaked-reg don't require `%%' prefix for registers\n"));
8562 fprintf (stream, _("\
8563 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8566 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8567 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8568 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8570 /* Pick the target format to use. */
8572 const char *
8573 i386_target_format (void)
8575 if (!strncmp (default_arch, "x86_64", 6))
8577 update_code_flag (CODE_64BIT, 1);
8578 if (default_arch[6] == '\0')
8579 x86_elf_abi = X86_64_ABI;
8580 else
8581 x86_elf_abi = X86_64_X32_ABI;
8583 else if (!strcmp (default_arch, "i386"))
8584 update_code_flag (CODE_32BIT, 1);
8585 else
8586 as_fatal (_("Unknown architecture"));
8588 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8589 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8590 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8591 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8593 switch (OUTPUT_FLAVOR)
8595 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8596 case bfd_target_aout_flavour:
8597 return AOUT_TARGET_FORMAT;
8598 #endif
8599 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8600 # if defined (TE_PE) || defined (TE_PEP)
8601 case bfd_target_coff_flavour:
8602 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8603 # elif defined (TE_GO32)
8604 case bfd_target_coff_flavour:
8605 return "coff-go32";
8606 # else
8607 case bfd_target_coff_flavour:
8608 return "coff-i386";
8609 # endif
8610 #endif
8611 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8612 case bfd_target_elf_flavour:
8614 const char *format;
8616 switch (x86_elf_abi)
8618 default:
8619 format = ELF_TARGET_FORMAT;
8620 break;
8621 case X86_64_ABI:
8622 use_rela_relocations = 1;
8623 object_64bit = 1;
8624 format = ELF_TARGET_FORMAT64;
8625 break;
8626 case X86_64_X32_ABI:
8627 use_rela_relocations = 1;
8628 object_64bit = 1;
8629 disallow_64bit_reloc = 1;
8630 format = ELF_TARGET_FORMAT32;
8631 break;
8633 if (cpu_arch_isa == PROCESSOR_L1OM)
8635 if (x86_elf_abi != X86_64_ABI)
8636 as_fatal (_("Intel L1OM is 64bit only"));
8637 return ELF_TARGET_L1OM_FORMAT;
8639 else
8640 return format;
8642 #endif
8643 #if defined (OBJ_MACH_O)
8644 case bfd_target_mach_o_flavour:
8645 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8646 #endif
8647 default:
8648 abort ();
8649 return NULL;
8653 #endif /* OBJ_MAYBE_ more than one */
8655 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8656 void
8657 i386_elf_emit_arch_note (void)
8659 if (IS_ELF && cpu_arch_name != NULL)
8661 char *p;
8662 asection *seg = now_seg;
8663 subsegT subseg = now_subseg;
8664 Elf_Internal_Note i_note;
8665 Elf_External_Note e_note;
8666 asection *note_secp;
8667 int len;
8669 /* Create the .note section. */
8670 note_secp = subseg_new (".note", 0);
8671 bfd_set_section_flags (stdoutput,
8672 note_secp,
8673 SEC_HAS_CONTENTS | SEC_READONLY);
8675 /* Process the arch string. */
8676 len = strlen (cpu_arch_name);
8678 i_note.namesz = len + 1;
8679 i_note.descsz = 0;
8680 i_note.type = NT_ARCH;
8681 p = frag_more (sizeof (e_note.namesz));
8682 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8683 p = frag_more (sizeof (e_note.descsz));
8684 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8685 p = frag_more (sizeof (e_note.type));
8686 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8687 p = frag_more (len + 1);
8688 strcpy (p, cpu_arch_name);
8690 frag_align (2, 0, 0);
8692 subseg_set (seg, subseg);
8695 #endif
8697 symbolS *
8698 md_undefined_symbol (name)
8699 char *name;
8701 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8702 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8703 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8704 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8706 if (!GOT_symbol)
8708 if (symbol_find (name))
8709 as_bad (_("GOT already in symbol table"));
8710 GOT_symbol = symbol_new (name, undefined_section,
8711 (valueT) 0, &zero_address_frag);
8713 return GOT_symbol;
8715 return 0;
8718 /* Round up a section size to the appropriate boundary. */
8720 valueT
8721 md_section_align (segment, size)
8722 segT segment ATTRIBUTE_UNUSED;
8723 valueT size;
8725 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8726 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8728 /* For a.out, force the section size to be aligned. If we don't do
8729 this, BFD will align it for us, but it will not write out the
8730 final bytes of the section. This may be a bug in BFD, but it is
8731 easier to fix it here since that is how the other a.out targets
8732 work. */
8733 int align;
8735 align = bfd_get_section_alignment (stdoutput, segment);
8736 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8738 #endif
8740 return size;
8743 /* On the i386, PC-relative offsets are relative to the start of the
8744 next instruction. That is, the address of the offset, plus its
8745 size, since the offset is always the last part of the insn. */
8747 long
8748 md_pcrel_from (fixS *fixP)
8750 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8753 #ifndef I386COFF
8755 static void
8756 s_bss (int ignore ATTRIBUTE_UNUSED)
8758 int temp;
8760 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8761 if (IS_ELF)
8762 obj_elf_section_change_hook ();
8763 #endif
8764 temp = get_absolute_expression ();
8765 subseg_set (bss_section, (subsegT) temp);
8766 demand_empty_rest_of_line ();
8769 #endif
8771 void
8772 i386_validate_fix (fixS *fixp)
8774 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8776 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8778 if (!object_64bit)
8779 abort ();
8780 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8782 else
8784 if (!object_64bit)
8785 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8786 else
8787 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8789 fixp->fx_subsy = 0;
8793 arelent *
8794 tc_gen_reloc (section, fixp)
8795 asection *section ATTRIBUTE_UNUSED;
8796 fixS *fixp;
8798 arelent *rel;
8799 bfd_reloc_code_real_type code;
8801 switch (fixp->fx_r_type)
8803 case BFD_RELOC_X86_64_PLT32:
8804 case BFD_RELOC_X86_64_GOT32:
8805 case BFD_RELOC_X86_64_GOTPCREL:
8806 case BFD_RELOC_386_PLT32:
8807 case BFD_RELOC_386_GOT32:
8808 case BFD_RELOC_386_GOTOFF:
8809 case BFD_RELOC_386_GOTPC:
8810 case BFD_RELOC_386_TLS_GD:
8811 case BFD_RELOC_386_TLS_LDM:
8812 case BFD_RELOC_386_TLS_LDO_32:
8813 case BFD_RELOC_386_TLS_IE_32:
8814 case BFD_RELOC_386_TLS_IE:
8815 case BFD_RELOC_386_TLS_GOTIE:
8816 case BFD_RELOC_386_TLS_LE_32:
8817 case BFD_RELOC_386_TLS_LE:
8818 case BFD_RELOC_386_TLS_GOTDESC:
8819 case BFD_RELOC_386_TLS_DESC_CALL:
8820 case BFD_RELOC_X86_64_TLSGD:
8821 case BFD_RELOC_X86_64_TLSLD:
8822 case BFD_RELOC_X86_64_DTPOFF32:
8823 case BFD_RELOC_X86_64_DTPOFF64:
8824 case BFD_RELOC_X86_64_GOTTPOFF:
8825 case BFD_RELOC_X86_64_TPOFF32:
8826 case BFD_RELOC_X86_64_TPOFF64:
8827 case BFD_RELOC_X86_64_GOTOFF64:
8828 case BFD_RELOC_X86_64_GOTPC32:
8829 case BFD_RELOC_X86_64_GOT64:
8830 case BFD_RELOC_X86_64_GOTPCREL64:
8831 case BFD_RELOC_X86_64_GOTPC64:
8832 case BFD_RELOC_X86_64_GOTPLT64:
8833 case BFD_RELOC_X86_64_PLTOFF64:
8834 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8835 case BFD_RELOC_X86_64_TLSDESC_CALL:
8836 case BFD_RELOC_RVA:
8837 case BFD_RELOC_VTABLE_ENTRY:
8838 case BFD_RELOC_VTABLE_INHERIT:
8839 #ifdef TE_PE
8840 case BFD_RELOC_32_SECREL:
8841 #endif
8842 code = fixp->fx_r_type;
8843 break;
8844 case BFD_RELOC_X86_64_32S:
8845 if (!fixp->fx_pcrel)
8847 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8848 code = fixp->fx_r_type;
8849 break;
8851 default:
8852 if (fixp->fx_pcrel)
8854 switch (fixp->fx_size)
8856 default:
8857 as_bad_where (fixp->fx_file, fixp->fx_line,
8858 _("can not do %d byte pc-relative relocation"),
8859 fixp->fx_size);
8860 code = BFD_RELOC_32_PCREL;
8861 break;
8862 case 1: code = BFD_RELOC_8_PCREL; break;
8863 case 2: code = BFD_RELOC_16_PCREL; break;
8864 case 4: code = BFD_RELOC_32_PCREL; break;
8865 #ifdef BFD64
8866 case 8: code = BFD_RELOC_64_PCREL; break;
8867 #endif
8870 else
8872 switch (fixp->fx_size)
8874 default:
8875 as_bad_where (fixp->fx_file, fixp->fx_line,
8876 _("can not do %d byte relocation"),
8877 fixp->fx_size);
8878 code = BFD_RELOC_32;
8879 break;
8880 case 1: code = BFD_RELOC_8; break;
8881 case 2: code = BFD_RELOC_16; break;
8882 case 4: code = BFD_RELOC_32; break;
8883 #ifdef BFD64
8884 case 8: code = BFD_RELOC_64; break;
8885 #endif
8888 break;
8891 if ((code == BFD_RELOC_32
8892 || code == BFD_RELOC_32_PCREL
8893 || code == BFD_RELOC_X86_64_32S)
8894 && GOT_symbol
8895 && fixp->fx_addsy == GOT_symbol)
8897 if (!object_64bit)
8898 code = BFD_RELOC_386_GOTPC;
8899 else
8900 code = BFD_RELOC_X86_64_GOTPC32;
8902 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8903 && GOT_symbol
8904 && fixp->fx_addsy == GOT_symbol)
8906 code = BFD_RELOC_X86_64_GOTPC64;
8909 rel = (arelent *) xmalloc (sizeof (arelent));
8910 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8911 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8913 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8915 if (!use_rela_relocations)
8917 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8918 vtable entry to be used in the relocation's section offset. */
8919 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8920 rel->address = fixp->fx_offset;
8921 #if defined (OBJ_COFF) && defined (TE_PE)
8922 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8923 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8924 else
8925 #endif
8926 rel->addend = 0;
8928 /* Use the rela in 64bit mode. */
8929 else
8931 if (disallow_64bit_reloc)
8932 switch (code)
8934 case BFD_RELOC_64:
8935 case BFD_RELOC_X86_64_DTPOFF64:
8936 case BFD_RELOC_X86_64_TPOFF64:
8937 case BFD_RELOC_64_PCREL:
8938 case BFD_RELOC_X86_64_GOTOFF64:
8939 case BFD_RELOC_X86_64_GOT64:
8940 case BFD_RELOC_X86_64_GOTPCREL64:
8941 case BFD_RELOC_X86_64_GOTPC64:
8942 case BFD_RELOC_X86_64_GOTPLT64:
8943 case BFD_RELOC_X86_64_PLTOFF64:
8944 as_bad_where (fixp->fx_file, fixp->fx_line,
8945 _("cannot represent relocation type %s in x32 mode"),
8946 bfd_get_reloc_code_name (code));
8947 break;
8948 default:
8949 break;
8952 if (!fixp->fx_pcrel)
8953 rel->addend = fixp->fx_offset;
8954 else
8955 switch (code)
8957 case BFD_RELOC_X86_64_PLT32:
8958 case BFD_RELOC_X86_64_GOT32:
8959 case BFD_RELOC_X86_64_GOTPCREL:
8960 case BFD_RELOC_X86_64_TLSGD:
8961 case BFD_RELOC_X86_64_TLSLD:
8962 case BFD_RELOC_X86_64_GOTTPOFF:
8963 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8964 case BFD_RELOC_X86_64_TLSDESC_CALL:
8965 rel->addend = fixp->fx_offset - fixp->fx_size;
8966 break;
8967 default:
8968 rel->addend = (section->vma
8969 - fixp->fx_size
8970 + fixp->fx_addnumber
8971 + md_pcrel_from (fixp));
8972 break;
8976 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8977 if (rel->howto == NULL)
8979 as_bad_where (fixp->fx_file, fixp->fx_line,
8980 _("cannot represent relocation type %s"),
8981 bfd_get_reloc_code_name (code));
8982 /* Set howto to a garbage value so that we can keep going. */
8983 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8984 gas_assert (rel->howto != NULL);
8987 return rel;
8990 #include "tc-i386-intel.c"
8992 void
8993 tc_x86_parse_to_dw2regnum (expressionS *exp)
8995 int saved_naked_reg;
8996 char saved_register_dot;
8998 saved_naked_reg = allow_naked_reg;
8999 allow_naked_reg = 1;
9000 saved_register_dot = register_chars['.'];
9001 register_chars['.'] = '.';
9002 allow_pseudo_reg = 1;
9003 expression_and_evaluate (exp);
9004 allow_pseudo_reg = 0;
9005 register_chars['.'] = saved_register_dot;
9006 allow_naked_reg = saved_naked_reg;
9008 if (exp->X_op == O_register && exp->X_add_number >= 0)
9010 if ((addressT) exp->X_add_number < i386_regtab_size)
9012 exp->X_op = O_constant;
9013 exp->X_add_number = i386_regtab[exp->X_add_number]
9014 .dw2_regnum[flag_code >> 1];
9016 else
9017 exp->X_op = O_illegal;
9021 void
9022 tc_x86_frame_initial_instructions (void)
9024 static unsigned int sp_regno[2];
9026 if (!sp_regno[flag_code >> 1])
9028 char *saved_input = input_line_pointer;
9029 char sp[][4] = {"esp", "rsp"};
9030 expressionS exp;
9032 input_line_pointer = sp[flag_code >> 1];
9033 tc_x86_parse_to_dw2regnum (&exp);
9034 gas_assert (exp.X_op == O_constant);
9035 sp_regno[flag_code >> 1] = exp.X_add_number;
9036 input_line_pointer = saved_input;
9039 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9040 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9044 i386_elf_section_type (const char *str, size_t len)
9046 if (flag_code == CODE_64BIT
9047 && len == sizeof ("unwind") - 1
9048 && strncmp (str, "unwind", 6) == 0)
9049 return SHT_X86_64_UNWIND;
9051 return -1;
9054 #ifdef TE_SOLARIS
9055 void
9056 i386_solaris_fix_up_eh_frame (segT sec)
9058 if (flag_code == CODE_64BIT)
9059 elf_section_type (sec) = SHT_X86_64_UNWIND;
9061 #endif
9063 #ifdef TE_PE
9064 void
9065 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9067 expressionS exp;
9069 exp.X_op = O_secrel;
9070 exp.X_add_symbol = symbol;
9071 exp.X_add_number = 0;
9072 emit_expr (&exp, size);
9074 #endif
9076 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9077 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9079 bfd_vma
9080 x86_64_section_letter (int letter, char **ptr_msg)
9082 if (flag_code == CODE_64BIT)
9084 if (letter == 'l')
9085 return SHF_X86_64_LARGE;
9087 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9089 else
9090 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9091 return -1;
9094 bfd_vma
9095 x86_64_section_word (char *str, size_t len)
9097 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9098 return SHF_X86_64_LARGE;
9100 return -1;
9103 static void
9104 handle_large_common (int small ATTRIBUTE_UNUSED)
9106 if (flag_code != CODE_64BIT)
9108 s_comm_internal (0, elf_common_parse);
9109 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9111 else
9113 static segT lbss_section;
9114 asection *saved_com_section_ptr = elf_com_section_ptr;
9115 asection *saved_bss_section = bss_section;
9117 if (lbss_section == NULL)
9119 flagword applicable;
9120 segT seg = now_seg;
9121 subsegT subseg = now_subseg;
9123 /* The .lbss section is for local .largecomm symbols. */
9124 lbss_section = subseg_new (".lbss", 0);
9125 applicable = bfd_applicable_section_flags (stdoutput);
9126 bfd_set_section_flags (stdoutput, lbss_section,
9127 applicable & SEC_ALLOC);
9128 seg_info (lbss_section)->bss = 1;
9130 subseg_set (seg, subseg);
9133 elf_com_section_ptr = &_bfd_elf_large_com_section;
9134 bss_section = lbss_section;
9136 s_comm_internal (0, elf_common_parse);
9138 elf_com_section_ptr = saved_com_section_ptr;
9139 bss_section = saved_bss_section;
9142 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */