* elf32-spu.c (build_stub): Fix malloc under-allocation.
[binutils.git] / gas / config / tc-i386.c
blob144883e8784c0d52130f8015e0687974a203c0ea
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
101 typedef struct
103 const insn_template *start;
104 const insn_template *end;
106 templates;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
115 modrm_byte;
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
123 unsigned base;
124 unsigned index;
125 unsigned scale;
127 sib_byte;
129 /* x86 arch names, types and features */
130 typedef struct
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
139 arch_entry;
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_sse_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
189 static const char *default_arch = DEFAULT_ARCH;
191 /* VEX prefix. */
192 typedef struct
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
204 union i386_op
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
211 enum i386_error
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 unsupported_vector_index_register
227 struct _i386_insn
229 /* TM holds the template for the insn were currently assembling. */
230 insn_template tm;
232 /* SUFFIX holds the instruction size suffix for byte, word, dword
233 or qword, if given. */
234 char suffix;
236 /* OPERANDS gives the number of given operands. */
237 unsigned int operands;
239 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
240 of given register, displacement, memory operands and immediate
241 operands. */
242 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244 /* TYPES [i] is the type (see above #defines) which tells us how to
245 use OP[i] for the corresponding operand. */
246 i386_operand_type types[MAX_OPERANDS];
248 /* Displacement expression, immediate expression, or register for each
249 operand. */
250 union i386_op op[MAX_OPERANDS];
252 /* Flags for operands. */
253 unsigned int flags[MAX_OPERANDS];
254 #define Operand_PCrel 1
256 /* Relocation type for operand */
257 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
260 the base index byte below. */
261 const reg_entry *base_reg;
262 const reg_entry *index_reg;
263 unsigned int log2_scale_factor;
265 /* SEG gives the seg_entries of this insn. They are zero unless
266 explicit segment overrides are given. */
267 const seg_entry *seg[2];
269 /* PREFIX holds all the given prefix opcodes (usually null).
270 PREFIXES is the number of prefix opcodes. */
271 unsigned int prefixes;
272 unsigned char prefix[MAX_PREFIXES];
274 /* RM and SIB are the modrm byte and the sib byte where the
275 addressing modes of this insn are encoded. */
276 modrm_byte rm;
277 rex_byte rex;
278 sib_byte sib;
279 vex_prefix vex;
281 /* Swap operand in encoding. */
282 unsigned int swap_operand;
284 /* Prefer 8bit or 32bit displacement in encoding. */
285 enum
287 disp_encoding_default = 0,
288 disp_encoding_8bit,
289 disp_encoding_32bit
290 } disp_encoding;
292 /* Have HLE prefix. */
293 unsigned int have_hle;
295 /* Error message. */
296 enum i386_error error;
299 typedef struct _i386_insn i386_insn;
301 /* List of chars besides those in app.c:symbol_chars that can start an
302 operand. Used to prevent the scrubber eating vital white-space. */
303 const char extra_symbol_chars[] = "*%-(["
304 #ifdef LEX_AT
306 #endif
307 #ifdef LEX_QM
309 #endif
312 #if (defined (TE_I386AIX) \
313 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
314 && !defined (TE_GNU) \
315 && !defined (TE_LINUX) \
316 && !defined (TE_NACL) \
317 && !defined (TE_NETWARE) \
318 && !defined (TE_FreeBSD) \
319 && !defined (TE_DragonFly) \
320 && !defined (TE_NetBSD)))
321 /* This array holds the chars that always start a comment. If the
322 pre-processor is disabled, these aren't very useful. The option
323 --divide will remove '/' from this list. */
324 const char *i386_comment_chars = "#/";
325 #define SVR4_COMMENT_CHARS 1
326 #define PREFIX_SEPARATOR '\\'
328 #else
329 const char *i386_comment_chars = "#";
330 #define PREFIX_SEPARATOR '/'
331 #endif
333 /* This array holds the chars that only start a comment at the beginning of
334 a line. If the line seems to have the form '# 123 filename'
335 .line and .file directives will appear in the pre-processed output.
336 Note that input_file.c hand checks for '#' at the beginning of the
337 first line of the input file. This is because the compiler outputs
338 #NO_APP at the beginning of its output.
339 Also note that comments started like this one will always work if
340 '/' isn't otherwise defined. */
341 const char line_comment_chars[] = "#/";
343 const char line_separator_chars[] = ";";
345 /* Chars that can be used to separate mant from exp in floating point
346 nums. */
347 const char EXP_CHARS[] = "eE";
349 /* Chars that mean this number is a floating point constant
350 As in 0f12.456
351 or 0d1.2345e12. */
352 const char FLT_CHARS[] = "fFdDxX";
354 /* Tables for lexical analysis. */
355 static char mnemonic_chars[256];
356 static char register_chars[256];
357 static char operand_chars[256];
358 static char identifier_chars[256];
359 static char digit_chars[256];
361 /* Lexical macros. */
362 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
363 #define is_operand_char(x) (operand_chars[(unsigned char) x])
364 #define is_register_char(x) (register_chars[(unsigned char) x])
365 #define is_space_char(x) ((x) == ' ')
366 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
367 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369 /* All non-digit non-letter characters that may occur in an operand. */
370 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372 /* md_assemble() always leaves the strings it's passed unaltered. To
373 effect this we maintain a stack of saved characters that we've smashed
374 with '\0's (indicating end of strings for various sub-fields of the
375 assembler instruction). */
376 static char save_stack[32];
377 static char *save_stack_p;
378 #define END_STRING_AND_SAVE(s) \
379 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
380 #define RESTORE_END_STRING(s) \
381 do { *(s) = *--save_stack_p; } while (0)
383 /* The instruction we're assembling. */
384 static i386_insn i;
386 /* Possible templates for current insn. */
387 static const templates *current_templates;
389 /* Per instruction expressionS buffers: max displacements & immediates. */
390 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
391 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393 /* Current operand we are working on. */
394 static int this_operand = -1;
396 /* We support four different modes. FLAG_CODE variable is used to distinguish
397 these. */
399 enum flag_code {
400 CODE_32BIT,
401 CODE_16BIT,
402 CODE_64BIT };
404 static enum flag_code flag_code;
405 static unsigned int object_64bit;
406 static unsigned int disallow_64bit_reloc;
407 static int use_rela_relocations = 0;
409 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
410 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
411 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413 /* The ELF ABI to use. */
414 enum x86_elf_abi
416 I386_ABI,
417 X86_64_ABI,
418 X86_64_X32_ABI
421 static enum x86_elf_abi x86_elf_abi = I386_ABI;
422 #endif
424 /* The names used to print error messages. */
425 static const char *flag_code_names[] =
427 "32",
428 "16",
429 "64"
432 /* 1 for intel syntax,
433 0 if att syntax. */
434 static int intel_syntax = 0;
436 /* 1 for intel mnemonic,
437 0 if att mnemonic. */
438 static int intel_mnemonic = !SYSV386_COMPAT;
440 /* 1 if support old (<= 2.8.1) versions of gcc. */
441 static int old_gcc = OLDGCC_COMPAT;
443 /* 1 if pseudo registers are permitted. */
444 static int allow_pseudo_reg = 0;
446 /* 1 if register prefix % not required. */
447 static int allow_naked_reg = 0;
449 /* 1 if pseudo index register, eiz/riz, is allowed . */
450 static int allow_index_reg = 0;
452 static enum
454 sse_check_none = 0,
455 sse_check_warning,
456 sse_check_error
458 sse_check;
460 /* Register prefix used for error message. */
461 static const char *register_prefix = "%";
463 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
464 leave, push, and pop instructions so that gcc has the same stack
465 frame as in 32 bit mode. */
466 static char stackop_size = '\0';
468 /* Non-zero to optimize code alignment. */
469 int optimize_align_code = 1;
471 /* Non-zero to quieten some warnings. */
472 static int quiet_warnings = 0;
474 /* CPU name. */
475 static const char *cpu_arch_name = NULL;
476 static char *cpu_sub_arch_name = NULL;
478 /* CPU feature flags. */
479 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481 /* If we have selected a cpu we are generating instructions for. */
482 static int cpu_arch_tune_set = 0;
484 /* Cpu we are generating instructions for. */
485 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487 /* CPU feature flags of cpu we are generating instructions for. */
488 static i386_cpu_flags cpu_arch_tune_flags;
490 /* CPU instruction set architecture used. */
491 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493 /* CPU feature flags of instruction set architecture used. */
494 i386_cpu_flags cpu_arch_isa_flags;
496 /* If set, conditional jumps are not automatically promoted to handle
497 larger than a byte offset. */
498 static unsigned int no_cond_jump_promotion = 0;
500 /* Encode SSE instructions with VEX prefix. */
501 static unsigned int sse2avx;
503 /* Encode scalar AVX instructions with specific vector length. */
504 static enum
506 vex128 = 0,
507 vex256
508 } avxscalar;
510 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
511 static symbolS *GOT_symbol;
513 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
514 unsigned int x86_dwarf2_return_column;
516 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
517 int x86_cie_data_alignment;
519 /* Interface to relax_segment.
520 There are 3 major relax states for 386 jump insns because the
521 different types of jumps add different sizes to frags when we're
522 figuring out what sort of jump to choose to reach a given label. */
524 /* Types. */
525 #define UNCOND_JUMP 0
526 #define COND_JUMP 1
527 #define COND_JUMP86 2
529 /* Sizes. */
530 #define CODE16 1
531 #define SMALL 0
532 #define SMALL16 (SMALL | CODE16)
533 #define BIG 2
534 #define BIG16 (BIG | CODE16)
536 #ifndef INLINE
537 #ifdef __GNUC__
538 #define INLINE __inline__
539 #else
540 #define INLINE
541 #endif
542 #endif
544 #define ENCODE_RELAX_STATE(type, size) \
545 ((relax_substateT) (((type) << 2) | (size)))
546 #define TYPE_FROM_RELAX_STATE(s) \
547 ((s) >> 2)
548 #define DISP_SIZE_FROM_RELAX_STATE(s) \
549 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551 /* This table is used by relax_frag to promote short jumps to long
552 ones where necessary. SMALL (short) jumps may be promoted to BIG
553 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
554 don't allow a short jump in a 32 bit code segment to be promoted to
555 a 16 bit offset jump because it's slower (requires data size
556 prefix), and doesn't work, unless the destination is in the bottom
557 64k of the code segment (The top 16 bits of eip are zeroed). */
559 const relax_typeS md_relax_table[] =
561 /* The fields are:
562 1) most positive reach of this state,
563 2) most negative reach of this state,
564 3) how many bytes this mode will have in the variable part of the frag
565 4) which index into the table to try if we can't fit into this one. */
567 /* UNCOND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
570 /* dword jmp adds 4 bytes to frag:
571 0 extra opcode bytes, 4 displacement bytes. */
572 {0, 0, 4, 0},
573 /* word jmp adds 2 byte2 to frag:
574 0 extra opcode bytes, 2 displacement bytes. */
575 {0, 0, 2, 0},
577 /* COND_JUMP states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 3 bytes to frag:
584 1 extra opcode byte, 2 displacement bytes. */
585 {0, 0, 3, 0},
587 /* COND_JUMP86 states. */
588 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
590 /* dword conditionals adds 5 bytes to frag:
591 1 extra opcode byte, 4 displacement bytes. */
592 {0, 0, 5, 0},
593 /* word conditionals add 4 bytes to frag:
594 1 displacement byte and a 3 byte long branch insn. */
595 {0, 0, 4, 0}
598 static const arch_entry cpu_arch[] =
600 /* Do not replace the first two entries - i386_target_format()
601 relies on them being there in this order. */
602 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
603 CPU_GENERIC32_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
605 CPU_GENERIC64_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
607 CPU_NONE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
609 CPU_I186_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
611 CPU_I286_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
613 CPU_I386_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
615 CPU_I486_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
619 CPU_I686_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
623 CPU_PENTIUMPRO_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
625 CPU_P2_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
627 CPU_P3_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
629 CPU_P4_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
631 CPU_CORE_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
633 CPU_NOCONA_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
635 CPU_CORE_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
637 CPU_CORE_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
639 CPU_CORE2_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
641 CPU_CORE2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
643 CPU_COREI7_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
645 CPU_L1OM_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
647 CPU_K1OM_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
649 CPU_K6_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
651 CPU_K6_2_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
653 CPU_ATHLON_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 1, 0 },
656 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
657 CPU_K8_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
661 CPU_AMDFAM10_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
663 CPU_BDVER1_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
665 CPU_BDVER2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
667 CPU_8087_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
669 CPU_287_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
671 CPU_387_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
673 CPU_ANY87_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
675 CPU_MMX_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
677 CPU_3DNOWA_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
679 CPU_SSE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
681 CPU_SSE2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
683 CPU_SSE3_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
685 CPU_SSSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_1_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_2_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
693 CPU_ANY_SSE_FLAGS, 0, 1 },
694 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
695 CPU_AVX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
697 CPU_AVX2_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
699 CPU_ANY_AVX_FLAGS, 0, 1 },
700 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
701 CPU_VMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
703 CPU_VMFUNC_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
705 CPU_SMX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
707 CPU_XSAVE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
709 CPU_XSAVEOPT_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
711 CPU_AES_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
713 CPU_PCLMUL_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 1, 0 },
716 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
717 CPU_FSGSBASE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
719 CPU_RDRND_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
721 CPU_F16C_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
723 CPU_BMI2_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
725 CPU_FMA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
727 CPU_FMA4_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
729 CPU_XOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
731 CPU_LWP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
733 CPU_MOVBE_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
735 CPU_EPT_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
737 CPU_LZCNT_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
739 CPU_HLE_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
741 CPU_RTM_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
743 CPU_INVPCID_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
745 CPU_CLFLUSH_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
747 CPU_NOP_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
749 CPU_SYSCALL_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
751 CPU_RDTSCP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
753 CPU_3DNOW_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
755 CPU_3DNOWA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
757 CPU_PADLOCK_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
759 CPU_SVME_FLAGS, 1, 0 },
760 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
761 CPU_SVME_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
763 CPU_SSE4A_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
765 CPU_ABM_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
767 CPU_BMI_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
769 CPU_TBM_FLAGS, 0, 0 },
772 #ifdef I386COFF
773 /* Like s_lcomm_internal in gas/read.c but the alignment string
774 is allowed to be optional. */
776 static symbolS *
777 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
779 addressT align = 0;
781 SKIP_WHITESPACE ();
783 if (needs_align
784 && *input_line_pointer == ',')
786 align = parse_align (needs_align - 1);
788 if (align == (addressT) -1)
789 return NULL;
791 else
793 if (size >= 8)
794 align = 3;
795 else if (size >= 4)
796 align = 2;
797 else if (size >= 2)
798 align = 1;
799 else
800 align = 0;
803 bss_alloc (symbolP, size, align);
804 return symbolP;
807 static void
808 pe_lcomm (int needs_align)
810 s_comm_internal (needs_align * 2, pe_lcomm_internal);
812 #endif
814 const pseudo_typeS md_pseudo_table[] =
816 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
817 {"align", s_align_bytes, 0},
818 #else
819 {"align", s_align_ptwo, 0},
820 #endif
821 {"arch", set_cpu_arch, 0},
822 #ifndef I386COFF
823 {"bss", s_bss, 0},
824 #else
825 {"lcomm", pe_lcomm, 1},
826 #endif
827 {"ffloat", float_cons, 'f'},
828 {"dfloat", float_cons, 'd'},
829 {"tfloat", float_cons, 'x'},
830 {"value", cons, 2},
831 {"slong", signed_cons, 4},
832 {"noopt", s_ignore, 0},
833 {"optim", s_ignore, 0},
834 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
835 {"code16", set_code_flag, CODE_16BIT},
836 {"code32", set_code_flag, CODE_32BIT},
837 {"code64", set_code_flag, CODE_64BIT},
838 {"intel_syntax", set_intel_syntax, 1},
839 {"att_syntax", set_intel_syntax, 0},
840 {"intel_mnemonic", set_intel_mnemonic, 1},
841 {"att_mnemonic", set_intel_mnemonic, 0},
842 {"allow_index_reg", set_allow_index_reg, 1},
843 {"disallow_index_reg", set_allow_index_reg, 0},
844 {"sse_check", set_sse_check, 0},
845 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
846 {"largecomm", handle_large_common, 0},
847 #else
848 {"file", (void (*) (int)) dwarf2_directive_file, 0},
849 {"loc", dwarf2_directive_loc, 0},
850 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
851 #endif
852 #ifdef TE_PE
853 {"secrel32", pe_directive_secrel, 0},
854 #endif
855 {0, 0, 0}
858 /* For interface with expression (). */
859 extern char *input_line_pointer;
861 /* Hash table for instruction mnemonic lookup. */
862 static struct hash_control *op_hash;
864 /* Hash table for register lookup. */
865 static struct hash_control *reg_hash;
867 void
868 i386_align_code (fragS *fragP, int count)
870 /* Various efficient no-op patterns for aligning code labels.
871 Note: Don't try to assemble the instructions in the comments.
872 0L and 0w are not legal. */
873 static const char f32_1[] =
874 {0x90}; /* nop */
875 static const char f32_2[] =
876 {0x66,0x90}; /* xchg %ax,%ax */
877 static const char f32_3[] =
878 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
879 static const char f32_4[] =
880 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
881 static const char f32_5[] =
882 {0x90, /* nop */
883 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
884 static const char f32_6[] =
885 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
886 static const char f32_7[] =
887 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
888 static const char f32_8[] =
889 {0x90, /* nop */
890 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
891 static const char f32_9[] =
892 {0x89,0xf6, /* movl %esi,%esi */
893 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
894 static const char f32_10[] =
895 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
896 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
897 static const char f32_11[] =
898 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f32_12[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
902 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
903 static const char f32_13[] =
904 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_14[] =
907 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
908 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
909 static const char f16_3[] =
910 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
911 static const char f16_4[] =
912 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
913 static const char f16_5[] =
914 {0x90, /* nop */
915 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
916 static const char f16_6[] =
917 {0x89,0xf6, /* mov %si,%si */
918 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
919 static const char f16_7[] =
920 {0x8d,0x74,0x00, /* lea 0(%si),%si */
921 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
922 static const char f16_8[] =
923 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
924 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
925 static const char jump_31[] =
926 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
927 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
928 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
929 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
930 static const char *const f32_patt[] = {
931 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
932 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
934 static const char *const f16_patt[] = {
935 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
937 /* nopl (%[re]ax) */
938 static const char alt_3[] =
939 {0x0f,0x1f,0x00};
940 /* nopl 0(%[re]ax) */
941 static const char alt_4[] =
942 {0x0f,0x1f,0x40,0x00};
943 /* nopl 0(%[re]ax,%[re]ax,1) */
944 static const char alt_5[] =
945 {0x0f,0x1f,0x44,0x00,0x00};
946 /* nopw 0(%[re]ax,%[re]ax,1) */
947 static const char alt_6[] =
948 {0x66,0x0f,0x1f,0x44,0x00,0x00};
949 /* nopl 0L(%[re]ax) */
950 static const char alt_7[] =
951 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
952 /* nopl 0L(%[re]ax,%[re]ax,1) */
953 static const char alt_8[] =
954 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
955 /* nopw 0L(%[re]ax,%[re]ax,1) */
956 static const char alt_9[] =
957 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
959 static const char alt_10[] =
960 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
961 /* data16
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_11[] =
964 {0x66,
965 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
966 /* data16
967 data16
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_12[] =
970 {0x66,
971 0x66,
972 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* data16
974 data16
975 data16
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_13[] =
978 {0x66,
979 0x66,
980 0x66,
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 /* data16
983 data16
984 data16
985 data16
986 nopw %cs:0L(%[re]ax,%[re]ax,1) */
987 static const char alt_long_14[] =
988 {0x66,
989 0x66,
990 0x66,
991 0x66,
992 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
993 /* data16
994 data16
995 data16
996 data16
997 data16
998 nopw %cs:0L(%[re]ax,%[re]ax,1) */
999 static const char alt_long_15[] =
1000 {0x66,
1001 0x66,
1002 0x66,
1003 0x66,
1004 0x66,
1005 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1006 /* nopl 0(%[re]ax,%[re]ax,1)
1007 nopw 0(%[re]ax,%[re]ax,1) */
1008 static const char alt_short_11[] =
1009 {0x0f,0x1f,0x44,0x00,0x00,
1010 0x66,0x0f,0x1f,0x44,0x00,0x00};
1011 /* nopw 0(%[re]ax,%[re]ax,1)
1012 nopw 0(%[re]ax,%[re]ax,1) */
1013 static const char alt_short_12[] =
1014 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1015 0x66,0x0f,0x1f,0x44,0x00,0x00};
1016 /* nopw 0(%[re]ax,%[re]ax,1)
1017 nopl 0L(%[re]ax) */
1018 static const char alt_short_13[] =
1019 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1020 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1021 /* nopl 0L(%[re]ax)
1022 nopl 0L(%[re]ax) */
1023 static const char alt_short_14[] =
1024 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1025 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1026 /* nopl 0L(%[re]ax)
1027 nopl 0L(%[re]ax,%[re]ax,1) */
1028 static const char alt_short_15[] =
1029 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1030 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1031 static const char *const alt_short_patt[] = {
1032 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1033 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1034 alt_short_14, alt_short_15
1036 static const char *const alt_long_patt[] = {
1037 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1038 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1039 alt_long_14, alt_long_15
1042 /* Only align for at least a positive non-zero boundary. */
1043 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1044 return;
1046 /* We need to decide which NOP sequence to use for 32bit and
1047 64bit. When -mtune= is used:
1049 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1050 PROCESSOR_GENERIC32, f32_patt will be used.
1051 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1052 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1053 PROCESSOR_GENERIC64, alt_long_patt will be used.
1054 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1055 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1056 will be used.
1058 When -mtune= isn't used, alt_long_patt will be used if
1059 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1060 be used.
1062 When -march= or .arch is used, we can't use anything beyond
1063 cpu_arch_isa_flags. */
1065 if (flag_code == CODE_16BIT)
1067 if (count > 8)
1069 memcpy (fragP->fr_literal + fragP->fr_fix,
1070 jump_31, count);
1071 /* Adjust jump offset. */
1072 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1074 else
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1076 f16_patt[count - 1], count);
1078 else
1080 const char *const *patt = NULL;
1082 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1084 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1085 switch (cpu_arch_tune)
1087 case PROCESSOR_UNKNOWN:
1088 /* We use cpu_arch_isa_flags to check if we SHOULD
1089 optimize with nops. */
1090 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1091 patt = alt_long_patt;
1092 else
1093 patt = f32_patt;
1094 break;
1095 case PROCESSOR_PENTIUM4:
1096 case PROCESSOR_NOCONA:
1097 case PROCESSOR_CORE:
1098 case PROCESSOR_CORE2:
1099 case PROCESSOR_COREI7:
1100 case PROCESSOR_L1OM:
1101 case PROCESSOR_K1OM:
1102 case PROCESSOR_GENERIC64:
1103 patt = alt_long_patt;
1104 break;
1105 case PROCESSOR_K6:
1106 case PROCESSOR_ATHLON:
1107 case PROCESSOR_K8:
1108 case PROCESSOR_AMDFAM10:
1109 case PROCESSOR_BD:
1110 patt = alt_short_patt;
1111 break;
1112 case PROCESSOR_I386:
1113 case PROCESSOR_I486:
1114 case PROCESSOR_PENTIUM:
1115 case PROCESSOR_PENTIUMPRO:
1116 case PROCESSOR_GENERIC32:
1117 patt = f32_patt;
1118 break;
1121 else
1123 switch (fragP->tc_frag_data.tune)
1125 case PROCESSOR_UNKNOWN:
1126 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1127 PROCESSOR_UNKNOWN. */
1128 abort ();
1129 break;
1131 case PROCESSOR_I386:
1132 case PROCESSOR_I486:
1133 case PROCESSOR_PENTIUM:
1134 case PROCESSOR_K6:
1135 case PROCESSOR_ATHLON:
1136 case PROCESSOR_K8:
1137 case PROCESSOR_AMDFAM10:
1138 case PROCESSOR_BD:
1139 case PROCESSOR_GENERIC32:
1140 /* We use cpu_arch_isa_flags to check if we CAN optimize
1141 with nops. */
1142 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1143 patt = alt_short_patt;
1144 else
1145 patt = f32_patt;
1146 break;
1147 case PROCESSOR_PENTIUMPRO:
1148 case PROCESSOR_PENTIUM4:
1149 case PROCESSOR_NOCONA:
1150 case PROCESSOR_CORE:
1151 case PROCESSOR_CORE2:
1152 case PROCESSOR_COREI7:
1153 case PROCESSOR_L1OM:
1154 case PROCESSOR_K1OM:
1155 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1156 patt = alt_long_patt;
1157 else
1158 patt = f32_patt;
1159 break;
1160 case PROCESSOR_GENERIC64:
1161 patt = alt_long_patt;
1162 break;
1166 if (patt == f32_patt)
1168 /* If the padding is less than 15 bytes, we use the normal
1169 ones. Otherwise, we use a jump instruction and adjust
1170 its offset. */
1171 int limit;
1173 /* For 64bit, the limit is 3 bytes. */
1174 if (flag_code == CODE_64BIT
1175 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1176 limit = 3;
1177 else
1178 limit = 15;
1179 if (count < limit)
1180 memcpy (fragP->fr_literal + fragP->fr_fix,
1181 patt[count - 1], count);
1182 else
1184 memcpy (fragP->fr_literal + fragP->fr_fix,
1185 jump_31, count);
1186 /* Adjust jump offset. */
1187 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1190 else
1192 /* Maximum length of an instruction is 15 byte. If the
1193 padding is greater than 15 bytes and we don't use jump,
1194 we have to break it into smaller pieces. */
1195 int padding = count;
1196 while (padding > 15)
1198 padding -= 15;
1199 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1200 patt [14], 15);
1203 if (padding)
1204 memcpy (fragP->fr_literal + fragP->fr_fix,
1205 patt [padding - 1], padding);
1208 fragP->fr_var = count;
1211 static INLINE int
1212 operand_type_all_zero (const union i386_operand_type *x)
1214 switch (ARRAY_SIZE(x->array))
1216 case 3:
1217 if (x->array[2])
1218 return 0;
1219 case 2:
1220 if (x->array[1])
1221 return 0;
1222 case 1:
1223 return !x->array[0];
1224 default:
1225 abort ();
1229 static INLINE void
1230 operand_type_set (union i386_operand_type *x, unsigned int v)
1232 switch (ARRAY_SIZE(x->array))
1234 case 3:
1235 x->array[2] = v;
1236 case 2:
1237 x->array[1] = v;
1238 case 1:
1239 x->array[0] = v;
1240 break;
1241 default:
1242 abort ();
1246 static INLINE int
1247 operand_type_equal (const union i386_operand_type *x,
1248 const union i386_operand_type *y)
1250 switch (ARRAY_SIZE(x->array))
1252 case 3:
1253 if (x->array[2] != y->array[2])
1254 return 0;
1255 case 2:
1256 if (x->array[1] != y->array[1])
1257 return 0;
1258 case 1:
1259 return x->array[0] == y->array[0];
1260 break;
1261 default:
1262 abort ();
1266 static INLINE int
1267 cpu_flags_all_zero (const union i386_cpu_flags *x)
1269 switch (ARRAY_SIZE(x->array))
1271 case 3:
1272 if (x->array[2])
1273 return 0;
1274 case 2:
1275 if (x->array[1])
1276 return 0;
1277 case 1:
1278 return !x->array[0];
1279 default:
1280 abort ();
1284 static INLINE void
1285 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1287 switch (ARRAY_SIZE(x->array))
1289 case 3:
1290 x->array[2] = v;
1291 case 2:
1292 x->array[1] = v;
1293 case 1:
1294 x->array[0] = v;
1295 break;
1296 default:
1297 abort ();
1301 static INLINE int
1302 cpu_flags_equal (const union i386_cpu_flags *x,
1303 const union i386_cpu_flags *y)
1305 switch (ARRAY_SIZE(x->array))
1307 case 3:
1308 if (x->array[2] != y->array[2])
1309 return 0;
1310 case 2:
1311 if (x->array[1] != y->array[1])
1312 return 0;
1313 case 1:
1314 return x->array[0] == y->array[0];
1315 break;
1316 default:
1317 abort ();
1321 static INLINE int
1322 cpu_flags_check_cpu64 (i386_cpu_flags f)
1324 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1325 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1328 static INLINE i386_cpu_flags
1329 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1331 switch (ARRAY_SIZE (x.array))
1333 case 3:
1334 x.array [2] &= y.array [2];
1335 case 2:
1336 x.array [1] &= y.array [1];
1337 case 1:
1338 x.array [0] &= y.array [0];
1339 break;
1340 default:
1341 abort ();
1343 return x;
1346 static INLINE i386_cpu_flags
1347 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1349 switch (ARRAY_SIZE (x.array))
1351 case 3:
1352 x.array [2] |= y.array [2];
1353 case 2:
1354 x.array [1] |= y.array [1];
1355 case 1:
1356 x.array [0] |= y.array [0];
1357 break;
1358 default:
1359 abort ();
1361 return x;
1364 static INLINE i386_cpu_flags
1365 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1367 switch (ARRAY_SIZE (x.array))
1369 case 3:
1370 x.array [2] &= ~y.array [2];
1371 case 2:
1372 x.array [1] &= ~y.array [1];
1373 case 1:
1374 x.array [0] &= ~y.array [0];
1375 break;
1376 default:
1377 abort ();
1379 return x;
1382 #define CPU_FLAGS_ARCH_MATCH 0x1
1383 #define CPU_FLAGS_64BIT_MATCH 0x2
1384 #define CPU_FLAGS_AES_MATCH 0x4
1385 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1386 #define CPU_FLAGS_AVX_MATCH 0x10
1388 #define CPU_FLAGS_32BIT_MATCH \
1389 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1390 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1391 #define CPU_FLAGS_PERFECT_MATCH \
1392 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1394 /* Return CPU flags match bits. */
1396 static int
1397 cpu_flags_match (const insn_template *t)
1399 i386_cpu_flags x = t->cpu_flags;
1400 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1402 x.bitfield.cpu64 = 0;
1403 x.bitfield.cpuno64 = 0;
1405 if (cpu_flags_all_zero (&x))
1407 /* This instruction is available on all archs. */
1408 match |= CPU_FLAGS_32BIT_MATCH;
1410 else
1412 /* This instruction is available only on some archs. */
1413 i386_cpu_flags cpu = cpu_arch_flags;
1415 cpu.bitfield.cpu64 = 0;
1416 cpu.bitfield.cpuno64 = 0;
1417 cpu = cpu_flags_and (x, cpu);
1418 if (!cpu_flags_all_zero (&cpu))
1420 if (x.bitfield.cpuavx)
1422 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1423 if (cpu.bitfield.cpuavx)
1425 /* Check SSE2AVX. */
1426 if (!t->opcode_modifier.sse2avx|| sse2avx)
1428 match |= (CPU_FLAGS_ARCH_MATCH
1429 | CPU_FLAGS_AVX_MATCH);
1430 /* Check AES. */
1431 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1432 match |= CPU_FLAGS_AES_MATCH;
1433 /* Check PCLMUL. */
1434 if (!x.bitfield.cpupclmul
1435 || cpu.bitfield.cpupclmul)
1436 match |= CPU_FLAGS_PCLMUL_MATCH;
1439 else
1440 match |= CPU_FLAGS_ARCH_MATCH;
1442 else
1443 match |= CPU_FLAGS_32BIT_MATCH;
1446 return match;
1449 static INLINE i386_operand_type
1450 operand_type_and (i386_operand_type x, i386_operand_type y)
1452 switch (ARRAY_SIZE (x.array))
1454 case 3:
1455 x.array [2] &= y.array [2];
1456 case 2:
1457 x.array [1] &= y.array [1];
1458 case 1:
1459 x.array [0] &= y.array [0];
1460 break;
1461 default:
1462 abort ();
1464 return x;
1467 static INLINE i386_operand_type
1468 operand_type_or (i386_operand_type x, i386_operand_type y)
1470 switch (ARRAY_SIZE (x.array))
1472 case 3:
1473 x.array [2] |= y.array [2];
1474 case 2:
1475 x.array [1] |= y.array [1];
1476 case 1:
1477 x.array [0] |= y.array [0];
1478 break;
1479 default:
1480 abort ();
1482 return x;
1485 static INLINE i386_operand_type
1486 operand_type_xor (i386_operand_type x, i386_operand_type y)
1488 switch (ARRAY_SIZE (x.array))
1490 case 3:
1491 x.array [2] ^= y.array [2];
1492 case 2:
1493 x.array [1] ^= y.array [1];
1494 case 1:
1495 x.array [0] ^= y.array [0];
1496 break;
1497 default:
1498 abort ();
1500 return x;
1503 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1504 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1505 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1506 static const i386_operand_type inoutportreg
1507 = OPERAND_TYPE_INOUTPORTREG;
1508 static const i386_operand_type reg16_inoutportreg
1509 = OPERAND_TYPE_REG16_INOUTPORTREG;
1510 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1511 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1512 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1513 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1514 static const i386_operand_type anydisp
1515 = OPERAND_TYPE_ANYDISP;
1516 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1517 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1518 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1519 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1520 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1521 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1522 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1523 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1524 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1525 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1526 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1527 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1529 enum operand_type
1531 reg,
1532 imm,
1533 disp,
1534 anymem
1537 static INLINE int
1538 operand_type_check (i386_operand_type t, enum operand_type c)
1540 switch (c)
1542 case reg:
1543 return (t.bitfield.reg8
1544 || t.bitfield.reg16
1545 || t.bitfield.reg32
1546 || t.bitfield.reg64);
1548 case imm:
1549 return (t.bitfield.imm8
1550 || t.bitfield.imm8s
1551 || t.bitfield.imm16
1552 || t.bitfield.imm32
1553 || t.bitfield.imm32s
1554 || t.bitfield.imm64);
1556 case disp:
1557 return (t.bitfield.disp8
1558 || t.bitfield.disp16
1559 || t.bitfield.disp32
1560 || t.bitfield.disp32s
1561 || t.bitfield.disp64);
1563 case anymem:
1564 return (t.bitfield.disp8
1565 || t.bitfield.disp16
1566 || t.bitfield.disp32
1567 || t.bitfield.disp32s
1568 || t.bitfield.disp64
1569 || t.bitfield.baseindex);
1571 default:
1572 abort ();
1575 return 0;
1578 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1579 operand J for instruction template T. */
1581 static INLINE int
1582 match_reg_size (const insn_template *t, unsigned int j)
1584 return !((i.types[j].bitfield.byte
1585 && !t->operand_types[j].bitfield.byte)
1586 || (i.types[j].bitfield.word
1587 && !t->operand_types[j].bitfield.word)
1588 || (i.types[j].bitfield.dword
1589 && !t->operand_types[j].bitfield.dword)
1590 || (i.types[j].bitfield.qword
1591 && !t->operand_types[j].bitfield.qword));
1594 /* Return 1 if there is no conflict in any size on operand J for
1595 instruction template T. */
1597 static INLINE int
1598 match_mem_size (const insn_template *t, unsigned int j)
1600 return (match_reg_size (t, j)
1601 && !((i.types[j].bitfield.unspecified
1602 && !t->operand_types[j].bitfield.unspecified)
1603 || (i.types[j].bitfield.fword
1604 && !t->operand_types[j].bitfield.fword)
1605 || (i.types[j].bitfield.tbyte
1606 && !t->operand_types[j].bitfield.tbyte)
1607 || (i.types[j].bitfield.xmmword
1608 && !t->operand_types[j].bitfield.xmmword)
1609 || (i.types[j].bitfield.ymmword
1610 && !t->operand_types[j].bitfield.ymmword)));
1613 /* Return 1 if there is no size conflict on any operands for
1614 instruction template T. */
1616 static INLINE int
1617 operand_size_match (const insn_template *t)
1619 unsigned int j;
1620 int match = 1;
1622 /* Don't check jump instructions. */
1623 if (t->opcode_modifier.jump
1624 || t->opcode_modifier.jumpbyte
1625 || t->opcode_modifier.jumpdword
1626 || t->opcode_modifier.jumpintersegment)
1627 return match;
1629 /* Check memory and accumulator operand size. */
1630 for (j = 0; j < i.operands; j++)
1632 if (t->operand_types[j].bitfield.anysize)
1633 continue;
1635 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1637 match = 0;
1638 break;
1641 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1643 match = 0;
1644 break;
1648 if (match)
1649 return match;
1650 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1652 mismatch:
1653 i.error = operand_size_mismatch;
1654 return 0;
1657 /* Check reverse. */
1658 gas_assert (i.operands == 2);
1660 match = 1;
1661 for (j = 0; j < 2; j++)
1663 if (t->operand_types[j].bitfield.acc
1664 && !match_reg_size (t, j ? 0 : 1))
1665 goto mismatch;
1667 if (i.types[j].bitfield.mem
1668 && !match_mem_size (t, j ? 0 : 1))
1669 goto mismatch;
1672 return match;
1675 static INLINE int
1676 operand_type_match (i386_operand_type overlap,
1677 i386_operand_type given)
1679 i386_operand_type temp = overlap;
1681 temp.bitfield.jumpabsolute = 0;
1682 temp.bitfield.unspecified = 0;
1683 temp.bitfield.byte = 0;
1684 temp.bitfield.word = 0;
1685 temp.bitfield.dword = 0;
1686 temp.bitfield.fword = 0;
1687 temp.bitfield.qword = 0;
1688 temp.bitfield.tbyte = 0;
1689 temp.bitfield.xmmword = 0;
1690 temp.bitfield.ymmword = 0;
1691 if (operand_type_all_zero (&temp))
1692 goto mismatch;
1694 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1695 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1696 return 1;
1698 mismatch:
1699 i.error = operand_type_mismatch;
1700 return 0;
1703 /* If given types g0 and g1 are registers they must be of the same type
1704 unless the expected operand type register overlap is null.
1705 Note that Acc in a template matches every size of reg. */
1707 static INLINE int
1708 operand_type_register_match (i386_operand_type m0,
1709 i386_operand_type g0,
1710 i386_operand_type t0,
1711 i386_operand_type m1,
1712 i386_operand_type g1,
1713 i386_operand_type t1)
1715 if (!operand_type_check (g0, reg))
1716 return 1;
1718 if (!operand_type_check (g1, reg))
1719 return 1;
1721 if (g0.bitfield.reg8 == g1.bitfield.reg8
1722 && g0.bitfield.reg16 == g1.bitfield.reg16
1723 && g0.bitfield.reg32 == g1.bitfield.reg32
1724 && g0.bitfield.reg64 == g1.bitfield.reg64)
1725 return 1;
1727 if (m0.bitfield.acc)
1729 t0.bitfield.reg8 = 1;
1730 t0.bitfield.reg16 = 1;
1731 t0.bitfield.reg32 = 1;
1732 t0.bitfield.reg64 = 1;
1735 if (m1.bitfield.acc)
1737 t1.bitfield.reg8 = 1;
1738 t1.bitfield.reg16 = 1;
1739 t1.bitfield.reg32 = 1;
1740 t1.bitfield.reg64 = 1;
1743 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1744 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1745 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1746 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1747 return 1;
1749 i.error = register_type_mismatch;
1751 return 0;
1754 static INLINE unsigned int
1755 mode_from_disp_size (i386_operand_type t)
1757 if (t.bitfield.disp8)
1758 return 1;
1759 else if (t.bitfield.disp16
1760 || t.bitfield.disp32
1761 || t.bitfield.disp32s)
1762 return 2;
1763 else
1764 return 0;
1767 static INLINE int
1768 fits_in_signed_byte (offsetT num)
1770 return (num >= -128) && (num <= 127);
1773 static INLINE int
1774 fits_in_unsigned_byte (offsetT num)
1776 return (num & 0xff) == num;
1779 static INLINE int
1780 fits_in_unsigned_word (offsetT num)
1782 return (num & 0xffff) == num;
1785 static INLINE int
1786 fits_in_signed_word (offsetT num)
1788 return (-32768 <= num) && (num <= 32767);
1791 static INLINE int
1792 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1794 #ifndef BFD64
1795 return 1;
1796 #else
1797 return (!(((offsetT) -1 << 31) & num)
1798 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1799 #endif
1800 } /* fits_in_signed_long() */
1802 static INLINE int
1803 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1805 #ifndef BFD64
1806 return 1;
1807 #else
1808 return (num & (((offsetT) 2 << 31) - 1)) == num;
1809 #endif
1810 } /* fits_in_unsigned_long() */
1812 static INLINE int
1813 fits_in_imm4 (offsetT num)
1815 return (num & 0xf) == num;
1818 static i386_operand_type
1819 smallest_imm_type (offsetT num)
1821 i386_operand_type t;
1823 operand_type_set (&t, 0);
1824 t.bitfield.imm64 = 1;
1826 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1828 /* This code is disabled on the 486 because all the Imm1 forms
1829 in the opcode table are slower on the i486. They're the
1830 versions with the implicitly specified single-position
1831 displacement, which has another syntax if you really want to
1832 use that form. */
1833 t.bitfield.imm1 = 1;
1834 t.bitfield.imm8 = 1;
1835 t.bitfield.imm8s = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1840 else if (fits_in_signed_byte (num))
1842 t.bitfield.imm8 = 1;
1843 t.bitfield.imm8s = 1;
1844 t.bitfield.imm16 = 1;
1845 t.bitfield.imm32 = 1;
1846 t.bitfield.imm32s = 1;
1848 else if (fits_in_unsigned_byte (num))
1850 t.bitfield.imm8 = 1;
1851 t.bitfield.imm16 = 1;
1852 t.bitfield.imm32 = 1;
1853 t.bitfield.imm32s = 1;
1855 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1857 t.bitfield.imm16 = 1;
1858 t.bitfield.imm32 = 1;
1859 t.bitfield.imm32s = 1;
1861 else if (fits_in_signed_long (num))
1863 t.bitfield.imm32 = 1;
1864 t.bitfield.imm32s = 1;
1866 else if (fits_in_unsigned_long (num))
1867 t.bitfield.imm32 = 1;
1869 return t;
1872 static offsetT
1873 offset_in_range (offsetT val, int size)
1875 addressT mask;
1877 switch (size)
1879 case 1: mask = ((addressT) 1 << 8) - 1; break;
1880 case 2: mask = ((addressT) 1 << 16) - 1; break;
1881 case 4: mask = ((addressT) 2 << 31) - 1; break;
1882 #ifdef BFD64
1883 case 8: mask = ((addressT) 2 << 63) - 1; break;
1884 #endif
1885 default: abort ();
1888 #ifdef BFD64
1889 /* If BFD64, sign extend val for 32bit address mode. */
1890 if (flag_code != CODE_64BIT
1891 || i.prefix[ADDR_PREFIX])
1892 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1893 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1894 #endif
1896 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1898 char buf1[40], buf2[40];
1900 sprint_value (buf1, val);
1901 sprint_value (buf2, val & mask);
1902 as_warn (_("%s shortened to %s"), buf1, buf2);
1904 return val & mask;
1907 enum PREFIX_GROUP
1909 PREFIX_EXIST = 0,
1910 PREFIX_LOCK,
1911 PREFIX_REP,
1912 PREFIX_OTHER
1915 /* Returns
1916 a. PREFIX_EXIST if attempting to add a prefix where one from the
1917 same class already exists.
1918 b. PREFIX_LOCK if lock prefix is added.
1919 c. PREFIX_REP if rep/repne prefix is added.
1920 d. PREFIX_OTHER if other prefix is added.
1923 static enum PREFIX_GROUP
1924 add_prefix (unsigned int prefix)
1926 enum PREFIX_GROUP ret = PREFIX_OTHER;
1927 unsigned int q;
1929 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1930 && flag_code == CODE_64BIT)
1932 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1933 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1934 && (prefix & (REX_R | REX_X | REX_B))))
1935 ret = PREFIX_EXIST;
1936 q = REX_PREFIX;
1938 else
1940 switch (prefix)
1942 default:
1943 abort ();
1945 case CS_PREFIX_OPCODE:
1946 case DS_PREFIX_OPCODE:
1947 case ES_PREFIX_OPCODE:
1948 case FS_PREFIX_OPCODE:
1949 case GS_PREFIX_OPCODE:
1950 case SS_PREFIX_OPCODE:
1951 q = SEG_PREFIX;
1952 break;
1954 case REPNE_PREFIX_OPCODE:
1955 case REPE_PREFIX_OPCODE:
1956 q = REP_PREFIX;
1957 ret = PREFIX_REP;
1958 break;
1960 case LOCK_PREFIX_OPCODE:
1961 q = LOCK_PREFIX;
1962 ret = PREFIX_LOCK;
1963 break;
1965 case FWAIT_OPCODE:
1966 q = WAIT_PREFIX;
1967 break;
1969 case ADDR_PREFIX_OPCODE:
1970 q = ADDR_PREFIX;
1971 break;
1973 case DATA_PREFIX_OPCODE:
1974 q = DATA_PREFIX;
1975 break;
1977 if (i.prefix[q] != 0)
1978 ret = PREFIX_EXIST;
1981 if (ret)
1983 if (!i.prefix[q])
1984 ++i.prefixes;
1985 i.prefix[q] |= prefix;
1987 else
1988 as_bad (_("same type of prefix used twice"));
1990 return ret;
1993 static void
1994 update_code_flag (int value, int check)
1996 PRINTF_LIKE ((*as_error));
1998 flag_code = (enum flag_code) value;
1999 if (flag_code == CODE_64BIT)
2001 cpu_arch_flags.bitfield.cpu64 = 1;
2002 cpu_arch_flags.bitfield.cpuno64 = 0;
2004 else
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2009 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2011 if (check)
2012 as_error = as_fatal;
2013 else
2014 as_error = as_bad;
2015 (*as_error) (_("64bit mode not supported on `%s'."),
2016 cpu_arch_name ? cpu_arch_name : default_arch);
2018 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2020 if (check)
2021 as_error = as_fatal;
2022 else
2023 as_error = as_bad;
2024 (*as_error) (_("32bit mode not supported on `%s'."),
2025 cpu_arch_name ? cpu_arch_name : default_arch);
2027 stackop_size = '\0';
2030 static void
2031 set_code_flag (int value)
2033 update_code_flag (value, 0);
2036 static void
2037 set_16bit_gcc_code_flag (int new_code_flag)
2039 flag_code = (enum flag_code) new_code_flag;
2040 if (flag_code != CODE_16BIT)
2041 abort ();
2042 cpu_arch_flags.bitfield.cpu64 = 0;
2043 cpu_arch_flags.bitfield.cpuno64 = 1;
2044 stackop_size = LONG_MNEM_SUFFIX;
2047 static void
2048 set_intel_syntax (int syntax_flag)
2050 /* Find out if register prefixing is specified. */
2051 int ask_naked_reg = 0;
2053 SKIP_WHITESPACE ();
2054 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2056 char *string = input_line_pointer;
2057 int e = get_symbol_end ();
2059 if (strcmp (string, "prefix") == 0)
2060 ask_naked_reg = 1;
2061 else if (strcmp (string, "noprefix") == 0)
2062 ask_naked_reg = -1;
2063 else
2064 as_bad (_("bad argument to syntax directive."));
2065 *input_line_pointer = e;
2067 demand_empty_rest_of_line ();
2069 intel_syntax = syntax_flag;
2071 if (ask_naked_reg == 0)
2072 allow_naked_reg = (intel_syntax
2073 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2074 else
2075 allow_naked_reg = (ask_naked_reg < 0);
2077 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2079 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2080 identifier_chars['$'] = intel_syntax ? '$' : 0;
2081 register_prefix = allow_naked_reg ? "" : "%";
2084 static void
2085 set_intel_mnemonic (int mnemonic_flag)
2087 intel_mnemonic = mnemonic_flag;
2090 static void
2091 set_allow_index_reg (int flag)
2093 allow_index_reg = flag;
2096 static void
2097 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2099 SKIP_WHITESPACE ();
2101 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2103 char *string = input_line_pointer;
2104 int e = get_symbol_end ();
2106 if (strcmp (string, "none") == 0)
2107 sse_check = sse_check_none;
2108 else if (strcmp (string, "warning") == 0)
2109 sse_check = sse_check_warning;
2110 else if (strcmp (string, "error") == 0)
2111 sse_check = sse_check_error;
2112 else
2113 as_bad (_("bad argument to sse_check directive."));
2114 *input_line_pointer = e;
2116 else
2117 as_bad (_("missing argument for sse_check directive"));
2119 demand_empty_rest_of_line ();
2122 static void
2123 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2124 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2126 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2127 static const char *arch;
2129 /* Intel LIOM is only supported on ELF. */
2130 if (!IS_ELF)
2131 return;
2133 if (!arch)
2135 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2136 use default_arch. */
2137 arch = cpu_arch_name;
2138 if (!arch)
2139 arch = default_arch;
2142 /* If we are targeting Intel L1OM, we must enable it. */
2143 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2144 || new_flag.bitfield.cpul1om)
2145 return;
2147 /* If we are targeting Intel K1OM, we must enable it. */
2148 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2149 || new_flag.bitfield.cpuk1om)
2150 return;
2152 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2153 #endif
2156 static void
2157 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2159 SKIP_WHITESPACE ();
2161 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2163 char *string = input_line_pointer;
2164 int e = get_symbol_end ();
2165 unsigned int j;
2166 i386_cpu_flags flags;
2168 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2170 if (strcmp (string, cpu_arch[j].name) == 0)
2172 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2174 if (*string != '.')
2176 cpu_arch_name = cpu_arch[j].name;
2177 cpu_sub_arch_name = NULL;
2178 cpu_arch_flags = cpu_arch[j].flags;
2179 if (flag_code == CODE_64BIT)
2181 cpu_arch_flags.bitfield.cpu64 = 1;
2182 cpu_arch_flags.bitfield.cpuno64 = 0;
2184 else
2186 cpu_arch_flags.bitfield.cpu64 = 0;
2187 cpu_arch_flags.bitfield.cpuno64 = 1;
2189 cpu_arch_isa = cpu_arch[j].type;
2190 cpu_arch_isa_flags = cpu_arch[j].flags;
2191 if (!cpu_arch_tune_set)
2193 cpu_arch_tune = cpu_arch_isa;
2194 cpu_arch_tune_flags = cpu_arch_isa_flags;
2196 break;
2199 if (!cpu_arch[j].negated)
2200 flags = cpu_flags_or (cpu_arch_flags,
2201 cpu_arch[j].flags);
2202 else
2203 flags = cpu_flags_and_not (cpu_arch_flags,
2204 cpu_arch[j].flags);
2205 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2207 if (cpu_sub_arch_name)
2209 char *name = cpu_sub_arch_name;
2210 cpu_sub_arch_name = concat (name,
2211 cpu_arch[j].name,
2212 (const char *) NULL);
2213 free (name);
2215 else
2216 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2217 cpu_arch_flags = flags;
2218 cpu_arch_isa_flags = flags;
2220 *input_line_pointer = e;
2221 demand_empty_rest_of_line ();
2222 return;
2225 if (j >= ARRAY_SIZE (cpu_arch))
2226 as_bad (_("no such architecture: `%s'"), string);
2228 *input_line_pointer = e;
2230 else
2231 as_bad (_("missing cpu architecture"));
2233 no_cond_jump_promotion = 0;
2234 if (*input_line_pointer == ','
2235 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2237 char *string = ++input_line_pointer;
2238 int e = get_symbol_end ();
2240 if (strcmp (string, "nojumps") == 0)
2241 no_cond_jump_promotion = 1;
2242 else if (strcmp (string, "jumps") == 0)
2244 else
2245 as_bad (_("no such architecture modifier: `%s'"), string);
2247 *input_line_pointer = e;
2250 demand_empty_rest_of_line ();
2253 enum bfd_architecture
2254 i386_arch (void)
2256 if (cpu_arch_isa == PROCESSOR_L1OM)
2258 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2259 || flag_code != CODE_64BIT)
2260 as_fatal (_("Intel L1OM is 64bit ELF only"));
2261 return bfd_arch_l1om;
2263 else if (cpu_arch_isa == PROCESSOR_K1OM)
2265 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2266 || flag_code != CODE_64BIT)
2267 as_fatal (_("Intel K1OM is 64bit ELF only"));
2268 return bfd_arch_k1om;
2270 else
2271 return bfd_arch_i386;
2274 unsigned long
2275 i386_mach (void)
2277 if (!strncmp (default_arch, "x86_64", 6))
2279 if (cpu_arch_isa == PROCESSOR_L1OM)
2281 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2282 || default_arch[6] != '\0')
2283 as_fatal (_("Intel L1OM is 64bit ELF only"));
2284 return bfd_mach_l1om;
2286 else if (cpu_arch_isa == PROCESSOR_K1OM)
2288 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2289 || default_arch[6] != '\0')
2290 as_fatal (_("Intel K1OM is 64bit ELF only"));
2291 return bfd_mach_k1om;
2293 else if (default_arch[6] == '\0')
2294 return bfd_mach_x86_64;
2295 else
2296 return bfd_mach_x64_32;
2298 else if (!strcmp (default_arch, "i386"))
2299 return bfd_mach_i386_i386;
2300 else
2301 as_fatal (_("unknown architecture"));
2304 void
2305 md_begin (void)
2307 const char *hash_err;
2309 /* Initialize op_hash hash table. */
2310 op_hash = hash_new ();
2313 const insn_template *optab;
2314 templates *core_optab;
2316 /* Setup for loop. */
2317 optab = i386_optab;
2318 core_optab = (templates *) xmalloc (sizeof (templates));
2319 core_optab->start = optab;
2321 while (1)
2323 ++optab;
2324 if (optab->name == NULL
2325 || strcmp (optab->name, (optab - 1)->name) != 0)
2327 /* different name --> ship out current template list;
2328 add to hash table; & begin anew. */
2329 core_optab->end = optab;
2330 hash_err = hash_insert (op_hash,
2331 (optab - 1)->name,
2332 (void *) core_optab);
2333 if (hash_err)
2335 as_fatal (_("internal Error: Can't hash %s: %s"),
2336 (optab - 1)->name,
2337 hash_err);
2339 if (optab->name == NULL)
2340 break;
2341 core_optab = (templates *) xmalloc (sizeof (templates));
2342 core_optab->start = optab;
2347 /* Initialize reg_hash hash table. */
2348 reg_hash = hash_new ();
2350 const reg_entry *regtab;
2351 unsigned int regtab_size = i386_regtab_size;
2353 for (regtab = i386_regtab; regtab_size--; regtab++)
2355 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2356 if (hash_err)
2357 as_fatal (_("internal Error: Can't hash %s: %s"),
2358 regtab->reg_name,
2359 hash_err);
2363 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2365 int c;
2366 char *p;
2368 for (c = 0; c < 256; c++)
2370 if (ISDIGIT (c))
2372 digit_chars[c] = c;
2373 mnemonic_chars[c] = c;
2374 register_chars[c] = c;
2375 operand_chars[c] = c;
2377 else if (ISLOWER (c))
2379 mnemonic_chars[c] = c;
2380 register_chars[c] = c;
2381 operand_chars[c] = c;
2383 else if (ISUPPER (c))
2385 mnemonic_chars[c] = TOLOWER (c);
2386 register_chars[c] = mnemonic_chars[c];
2387 operand_chars[c] = c;
2390 if (ISALPHA (c) || ISDIGIT (c))
2391 identifier_chars[c] = c;
2392 else if (c >= 128)
2394 identifier_chars[c] = c;
2395 operand_chars[c] = c;
2399 #ifdef LEX_AT
2400 identifier_chars['@'] = '@';
2401 #endif
2402 #ifdef LEX_QM
2403 identifier_chars['?'] = '?';
2404 operand_chars['?'] = '?';
2405 #endif
2406 digit_chars['-'] = '-';
2407 mnemonic_chars['_'] = '_';
2408 mnemonic_chars['-'] = '-';
2409 mnemonic_chars['.'] = '.';
2410 identifier_chars['_'] = '_';
2411 identifier_chars['.'] = '.';
2413 for (p = operand_special_chars; *p != '\0'; p++)
2414 operand_chars[(unsigned char) *p] = *p;
2417 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2418 if (IS_ELF)
2420 record_alignment (text_section, 2);
2421 record_alignment (data_section, 2);
2422 record_alignment (bss_section, 2);
2424 #endif
2426 if (flag_code == CODE_64BIT)
2428 #if defined (OBJ_COFF) && defined (TE_PE)
2429 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2430 ? 32 : 16);
2431 #else
2432 x86_dwarf2_return_column = 16;
2433 #endif
2434 x86_cie_data_alignment = -8;
2436 else
2438 x86_dwarf2_return_column = 8;
2439 x86_cie_data_alignment = -4;
2443 void
2444 i386_print_statistics (FILE *file)
2446 hash_print_statistics (file, "i386 opcode", op_hash);
2447 hash_print_statistics (file, "i386 register", reg_hash);
2450 #ifdef DEBUG386
2452 /* Debugging routines for md_assemble. */
2453 static void pte (insn_template *);
2454 static void pt (i386_operand_type);
2455 static void pe (expressionS *);
2456 static void ps (symbolS *);
2458 static void
2459 pi (char *line, i386_insn *x)
2461 unsigned int j;
2463 fprintf (stdout, "%s: template ", line);
2464 pte (&x->tm);
2465 fprintf (stdout, " address: base %s index %s scale %x\n",
2466 x->base_reg ? x->base_reg->reg_name : "none",
2467 x->index_reg ? x->index_reg->reg_name : "none",
2468 x->log2_scale_factor);
2469 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2470 x->rm.mode, x->rm.reg, x->rm.regmem);
2471 fprintf (stdout, " sib: base %x index %x scale %x\n",
2472 x->sib.base, x->sib.index, x->sib.scale);
2473 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2474 (x->rex & REX_W) != 0,
2475 (x->rex & REX_R) != 0,
2476 (x->rex & REX_X) != 0,
2477 (x->rex & REX_B) != 0);
2478 for (j = 0; j < x->operands; j++)
2480 fprintf (stdout, " #%d: ", j + 1);
2481 pt (x->types[j]);
2482 fprintf (stdout, "\n");
2483 if (x->types[j].bitfield.reg8
2484 || x->types[j].bitfield.reg16
2485 || x->types[j].bitfield.reg32
2486 || x->types[j].bitfield.reg64
2487 || x->types[j].bitfield.regmmx
2488 || x->types[j].bitfield.regxmm
2489 || x->types[j].bitfield.regymm
2490 || x->types[j].bitfield.sreg2
2491 || x->types[j].bitfield.sreg3
2492 || x->types[j].bitfield.control
2493 || x->types[j].bitfield.debug
2494 || x->types[j].bitfield.test)
2495 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2496 if (operand_type_check (x->types[j], imm))
2497 pe (x->op[j].imms);
2498 if (operand_type_check (x->types[j], disp))
2499 pe (x->op[j].disps);
2503 static void
2504 pte (insn_template *t)
2506 unsigned int j;
2507 fprintf (stdout, " %d operands ", t->operands);
2508 fprintf (stdout, "opcode %x ", t->base_opcode);
2509 if (t->extension_opcode != None)
2510 fprintf (stdout, "ext %x ", t->extension_opcode);
2511 if (t->opcode_modifier.d)
2512 fprintf (stdout, "D");
2513 if (t->opcode_modifier.w)
2514 fprintf (stdout, "W");
2515 fprintf (stdout, "\n");
2516 for (j = 0; j < t->operands; j++)
2518 fprintf (stdout, " #%d type ", j + 1);
2519 pt (t->operand_types[j]);
2520 fprintf (stdout, "\n");
2524 static void
2525 pe (expressionS *e)
2527 fprintf (stdout, " operation %d\n", e->X_op);
2528 fprintf (stdout, " add_number %ld (%lx)\n",
2529 (long) e->X_add_number, (long) e->X_add_number);
2530 if (e->X_add_symbol)
2532 fprintf (stdout, " add_symbol ");
2533 ps (e->X_add_symbol);
2534 fprintf (stdout, "\n");
2536 if (e->X_op_symbol)
2538 fprintf (stdout, " op_symbol ");
2539 ps (e->X_op_symbol);
2540 fprintf (stdout, "\n");
2544 static void
2545 ps (symbolS *s)
2547 fprintf (stdout, "%s type %s%s",
2548 S_GET_NAME (s),
2549 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2550 segment_name (S_GET_SEGMENT (s)));
2553 static struct type_name
2555 i386_operand_type mask;
2556 const char *name;
2558 const type_names[] =
2560 { OPERAND_TYPE_REG8, "r8" },
2561 { OPERAND_TYPE_REG16, "r16" },
2562 { OPERAND_TYPE_REG32, "r32" },
2563 { OPERAND_TYPE_REG64, "r64" },
2564 { OPERAND_TYPE_IMM8, "i8" },
2565 { OPERAND_TYPE_IMM8, "i8s" },
2566 { OPERAND_TYPE_IMM16, "i16" },
2567 { OPERAND_TYPE_IMM32, "i32" },
2568 { OPERAND_TYPE_IMM32S, "i32s" },
2569 { OPERAND_TYPE_IMM64, "i64" },
2570 { OPERAND_TYPE_IMM1, "i1" },
2571 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2572 { OPERAND_TYPE_DISP8, "d8" },
2573 { OPERAND_TYPE_DISP16, "d16" },
2574 { OPERAND_TYPE_DISP32, "d32" },
2575 { OPERAND_TYPE_DISP32S, "d32s" },
2576 { OPERAND_TYPE_DISP64, "d64" },
2577 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2578 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2579 { OPERAND_TYPE_CONTROL, "control reg" },
2580 { OPERAND_TYPE_TEST, "test reg" },
2581 { OPERAND_TYPE_DEBUG, "debug reg" },
2582 { OPERAND_TYPE_FLOATREG, "FReg" },
2583 { OPERAND_TYPE_FLOATACC, "FAcc" },
2584 { OPERAND_TYPE_SREG2, "SReg2" },
2585 { OPERAND_TYPE_SREG3, "SReg3" },
2586 { OPERAND_TYPE_ACC, "Acc" },
2587 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2588 { OPERAND_TYPE_REGMMX, "rMMX" },
2589 { OPERAND_TYPE_REGXMM, "rXMM" },
2590 { OPERAND_TYPE_REGYMM, "rYMM" },
2591 { OPERAND_TYPE_ESSEG, "es" },
2594 static void
2595 pt (i386_operand_type t)
2597 unsigned int j;
2598 i386_operand_type a;
2600 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2602 a = operand_type_and (t, type_names[j].mask);
2603 if (!operand_type_all_zero (&a))
2604 fprintf (stdout, "%s, ", type_names[j].name);
2606 fflush (stdout);
2609 #endif /* DEBUG386 */
2611 static bfd_reloc_code_real_type
2612 reloc (unsigned int size,
2613 int pcrel,
2614 int sign,
2615 bfd_reloc_code_real_type other)
2617 if (other != NO_RELOC)
2619 reloc_howto_type *rel;
2621 if (size == 8)
2622 switch (other)
2624 case BFD_RELOC_X86_64_GOT32:
2625 return BFD_RELOC_X86_64_GOT64;
2626 break;
2627 case BFD_RELOC_X86_64_PLTOFF64:
2628 return BFD_RELOC_X86_64_PLTOFF64;
2629 break;
2630 case BFD_RELOC_X86_64_GOTPC32:
2631 other = BFD_RELOC_X86_64_GOTPC64;
2632 break;
2633 case BFD_RELOC_X86_64_GOTPCREL:
2634 other = BFD_RELOC_X86_64_GOTPCREL64;
2635 break;
2636 case BFD_RELOC_X86_64_TPOFF32:
2637 other = BFD_RELOC_X86_64_TPOFF64;
2638 break;
2639 case BFD_RELOC_X86_64_DTPOFF32:
2640 other = BFD_RELOC_X86_64_DTPOFF64;
2641 break;
2642 default:
2643 break;
2646 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2647 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2648 sign = -1;
2650 rel = bfd_reloc_type_lookup (stdoutput, other);
2651 if (!rel)
2652 as_bad (_("unknown relocation (%u)"), other);
2653 else if (size != bfd_get_reloc_size (rel))
2654 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2655 bfd_get_reloc_size (rel),
2656 size);
2657 else if (pcrel && !rel->pc_relative)
2658 as_bad (_("non-pc-relative relocation for pc-relative field"));
2659 else if ((rel->complain_on_overflow == complain_overflow_signed
2660 && !sign)
2661 || (rel->complain_on_overflow == complain_overflow_unsigned
2662 && sign > 0))
2663 as_bad (_("relocated field and relocation type differ in signedness"));
2664 else
2665 return other;
2666 return NO_RELOC;
2669 if (pcrel)
2671 if (!sign)
2672 as_bad (_("there are no unsigned pc-relative relocations"));
2673 switch (size)
2675 case 1: return BFD_RELOC_8_PCREL;
2676 case 2: return BFD_RELOC_16_PCREL;
2677 case 4: return BFD_RELOC_32_PCREL;
2678 case 8: return BFD_RELOC_64_PCREL;
2680 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2682 else
2684 if (sign > 0)
2685 switch (size)
2687 case 4: return BFD_RELOC_X86_64_32S;
2689 else
2690 switch (size)
2692 case 1: return BFD_RELOC_8;
2693 case 2: return BFD_RELOC_16;
2694 case 4: return BFD_RELOC_32;
2695 case 8: return BFD_RELOC_64;
2697 as_bad (_("cannot do %s %u byte relocation"),
2698 sign > 0 ? "signed" : "unsigned", size);
2701 return NO_RELOC;
2704 /* Here we decide which fixups can be adjusted to make them relative to
2705 the beginning of the section instead of the symbol. Basically we need
2706 to make sure that the dynamic relocations are done correctly, so in
2707 some cases we force the original symbol to be used. */
2710 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2712 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2713 if (!IS_ELF)
2714 return 1;
2716 /* Don't adjust pc-relative references to merge sections in 64-bit
2717 mode. */
2718 if (use_rela_relocations
2719 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2720 && fixP->fx_pcrel)
2721 return 0;
2723 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2724 and changed later by validate_fix. */
2725 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2726 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2727 return 0;
2729 /* adjust_reloc_syms doesn't know about the GOT. */
2730 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2731 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2732 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2734 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2735 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2736 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2737 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2738 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2739 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2740 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2741 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2742 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2747 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2748 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2749 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2750 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2751 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2752 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2753 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2754 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2755 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2756 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2757 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2758 return 0;
2759 #endif
2760 return 1;
2763 static int
2764 intel_float_operand (const char *mnemonic)
2766 /* Note that the value returned is meaningful only for opcodes with (memory)
2767 operands, hence the code here is free to improperly handle opcodes that
2768 have no operands (for better performance and smaller code). */
2770 if (mnemonic[0] != 'f')
2771 return 0; /* non-math */
2773 switch (mnemonic[1])
2775 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2776 the fs segment override prefix not currently handled because no
2777 call path can make opcodes without operands get here */
2778 case 'i':
2779 return 2 /* integer op */;
2780 case 'l':
2781 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2782 return 3; /* fldcw/fldenv */
2783 break;
2784 case 'n':
2785 if (mnemonic[2] != 'o' /* fnop */)
2786 return 3; /* non-waiting control op */
2787 break;
2788 case 'r':
2789 if (mnemonic[2] == 's')
2790 return 3; /* frstor/frstpm */
2791 break;
2792 case 's':
2793 if (mnemonic[2] == 'a')
2794 return 3; /* fsave */
2795 if (mnemonic[2] == 't')
2797 switch (mnemonic[3])
2799 case 'c': /* fstcw */
2800 case 'd': /* fstdw */
2801 case 'e': /* fstenv */
2802 case 's': /* fsts[gw] */
2803 return 3;
2806 break;
2807 case 'x':
2808 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2809 return 0; /* fxsave/fxrstor are not really math ops */
2810 break;
2813 return 1;
2816 /* Build the VEX prefix. */
2818 static void
2819 build_vex_prefix (const insn_template *t)
2821 unsigned int register_specifier;
2822 unsigned int implied_prefix;
2823 unsigned int vector_length;
2825 /* Check register specifier. */
2826 if (i.vex.register_specifier)
2828 register_specifier = i.vex.register_specifier->reg_num;
2829 if ((i.vex.register_specifier->reg_flags & RegRex))
2830 register_specifier += 8;
2831 register_specifier = ~register_specifier & 0xf;
2833 else
2834 register_specifier = 0xf;
2836 /* Use 2-byte VEX prefix by swappping destination and source
2837 operand. */
2838 if (!i.swap_operand
2839 && i.operands == i.reg_operands
2840 && i.tm.opcode_modifier.vexopcode == VEX0F
2841 && i.tm.opcode_modifier.s
2842 && i.rex == REX_B)
2844 unsigned int xchg = i.operands - 1;
2845 union i386_op temp_op;
2846 i386_operand_type temp_type;
2848 temp_type = i.types[xchg];
2849 i.types[xchg] = i.types[0];
2850 i.types[0] = temp_type;
2851 temp_op = i.op[xchg];
2852 i.op[xchg] = i.op[0];
2853 i.op[0] = temp_op;
2855 gas_assert (i.rm.mode == 3);
2857 i.rex = REX_R;
2858 xchg = i.rm.regmem;
2859 i.rm.regmem = i.rm.reg;
2860 i.rm.reg = xchg;
2862 /* Use the next insn. */
2863 i.tm = t[1];
2866 if (i.tm.opcode_modifier.vex == VEXScalar)
2867 vector_length = avxscalar;
2868 else
2869 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2871 switch ((i.tm.base_opcode >> 8) & 0xff)
2873 case 0:
2874 implied_prefix = 0;
2875 break;
2876 case DATA_PREFIX_OPCODE:
2877 implied_prefix = 1;
2878 break;
2879 case REPE_PREFIX_OPCODE:
2880 implied_prefix = 2;
2881 break;
2882 case REPNE_PREFIX_OPCODE:
2883 implied_prefix = 3;
2884 break;
2885 default:
2886 abort ();
2889 /* Use 2-byte VEX prefix if possible. */
2890 if (i.tm.opcode_modifier.vexopcode == VEX0F
2891 && i.tm.opcode_modifier.vexw != VEXW1
2892 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2894 /* 2-byte VEX prefix. */
2895 unsigned int r;
2897 i.vex.length = 2;
2898 i.vex.bytes[0] = 0xc5;
2900 /* Check the REX.R bit. */
2901 r = (i.rex & REX_R) ? 0 : 1;
2902 i.vex.bytes[1] = (r << 7
2903 | register_specifier << 3
2904 | vector_length << 2
2905 | implied_prefix);
2907 else
2909 /* 3-byte VEX prefix. */
2910 unsigned int m, w;
2912 i.vex.length = 3;
2914 switch (i.tm.opcode_modifier.vexopcode)
2916 case VEX0F:
2917 m = 0x1;
2918 i.vex.bytes[0] = 0xc4;
2919 break;
2920 case VEX0F38:
2921 m = 0x2;
2922 i.vex.bytes[0] = 0xc4;
2923 break;
2924 case VEX0F3A:
2925 m = 0x3;
2926 i.vex.bytes[0] = 0xc4;
2927 break;
2928 case XOP08:
2929 m = 0x8;
2930 i.vex.bytes[0] = 0x8f;
2931 break;
2932 case XOP09:
2933 m = 0x9;
2934 i.vex.bytes[0] = 0x8f;
2935 break;
2936 case XOP0A:
2937 m = 0xa;
2938 i.vex.bytes[0] = 0x8f;
2939 break;
2940 default:
2941 abort ();
2944 /* The high 3 bits of the second VEX byte are 1's compliment
2945 of RXB bits from REX. */
2946 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2948 /* Check the REX.W bit. */
2949 w = (i.rex & REX_W) ? 1 : 0;
2950 if (i.tm.opcode_modifier.vexw)
2952 if (w)
2953 abort ();
2955 if (i.tm.opcode_modifier.vexw == VEXW1)
2956 w = 1;
2959 i.vex.bytes[2] = (w << 7
2960 | register_specifier << 3
2961 | vector_length << 2
2962 | implied_prefix);
2966 static void
2967 process_immext (void)
2969 expressionS *exp;
2971 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2973 /* SSE3 Instructions have the fixed operands with an opcode
2974 suffix which is coded in the same place as an 8-bit immediate
2975 field would be. Here we check those operands and remove them
2976 afterwards. */
2977 unsigned int x;
2979 for (x = 0; x < i.operands; x++)
2980 if (i.op[x].regs->reg_num != x)
2981 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2982 register_prefix, i.op[x].regs->reg_name, x + 1,
2983 i.tm.name);
2985 i.operands = 0;
2988 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2989 which is coded in the same place as an 8-bit immediate field
2990 would be. Here we fake an 8-bit immediate operand from the
2991 opcode suffix stored in tm.extension_opcode.
2993 AVX instructions also use this encoding, for some of
2994 3 argument instructions. */
2996 gas_assert (i.imm_operands == 0
2997 && (i.operands <= 2
2998 || (i.tm.opcode_modifier.vex
2999 && i.operands <= 4)));
3001 exp = &im_expressions[i.imm_operands++];
3002 i.op[i.operands].imms = exp;
3003 i.types[i.operands] = imm8;
3004 i.operands++;
3005 exp->X_op = O_constant;
3006 exp->X_add_number = i.tm.extension_opcode;
3007 i.tm.extension_opcode = None;
3011 static int
3012 check_hle (void)
3014 switch (i.tm.opcode_modifier.hleprefixok)
3016 default:
3017 abort ();
3018 case HLEPrefixNone:
3019 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3020 as_bad (_("invalid instruction `%s' after `xacquire'"),
3021 i.tm.name);
3022 else
3023 as_bad (_("invalid instruction `%s' after `xrelease'"),
3024 i.tm.name);
3025 return 0;
3026 case HLEPrefixLock:
3027 if (i.prefix[LOCK_PREFIX])
3028 return 1;
3029 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3030 as_bad (_("missing `lock' with `xacquire'"));
3031 else
3032 as_bad (_("missing `lock' with `xrelease'"));
3033 return 0;
3034 case HLEPrefixAny:
3035 return 1;
3036 case HLEPrefixRelease:
3037 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3039 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3040 i.tm.name);
3041 return 0;
3043 if (i.mem_operands == 0
3044 || !operand_type_check (i.types[i.operands - 1], anymem))
3046 as_bad (_("memory destination needed for instruction `%s'"
3047 " after `xrelease'"), i.tm.name);
3048 return 0;
3050 return 1;
3054 /* This is the guts of the machine-dependent assembler. LINE points to a
3055 machine dependent instruction. This function is supposed to emit
3056 the frags/bytes it assembles to. */
3058 void
3059 md_assemble (char *line)
3061 unsigned int j;
3062 char mnemonic[MAX_MNEM_SIZE];
3063 const insn_template *t;
3065 /* Initialize globals. */
3066 memset (&i, '\0', sizeof (i));
3067 for (j = 0; j < MAX_OPERANDS; j++)
3068 i.reloc[j] = NO_RELOC;
3069 memset (disp_expressions, '\0', sizeof (disp_expressions));
3070 memset (im_expressions, '\0', sizeof (im_expressions));
3071 save_stack_p = save_stack;
3073 /* First parse an instruction mnemonic & call i386_operand for the operands.
3074 We assume that the scrubber has arranged it so that line[0] is the valid
3075 start of a (possibly prefixed) mnemonic. */
3077 line = parse_insn (line, mnemonic);
3078 if (line == NULL)
3079 return;
3081 line = parse_operands (line, mnemonic);
3082 this_operand = -1;
3083 if (line == NULL)
3084 return;
3086 /* Now we've parsed the mnemonic into a set of templates, and have the
3087 operands at hand. */
3089 /* All intel opcodes have reversed operands except for "bound" and
3090 "enter". We also don't reverse intersegment "jmp" and "call"
3091 instructions with 2 immediate operands so that the immediate segment
3092 precedes the offset, as it does when in AT&T mode. */
3093 if (intel_syntax
3094 && i.operands > 1
3095 && (strcmp (mnemonic, "bound") != 0)
3096 && (strcmp (mnemonic, "invlpga") != 0)
3097 && !(operand_type_check (i.types[0], imm)
3098 && operand_type_check (i.types[1], imm)))
3099 swap_operands ();
3101 /* The order of the immediates should be reversed
3102 for 2 immediates extrq and insertq instructions */
3103 if (i.imm_operands == 2
3104 && (strcmp (mnemonic, "extrq") == 0
3105 || strcmp (mnemonic, "insertq") == 0))
3106 swap_2_operands (0, 1);
3108 if (i.imm_operands)
3109 optimize_imm ();
3111 /* Don't optimize displacement for movabs since it only takes 64bit
3112 displacement. */
3113 if (i.disp_operands
3114 && i.disp_encoding != disp_encoding_32bit
3115 && (flag_code != CODE_64BIT
3116 || strcmp (mnemonic, "movabs") != 0))
3117 optimize_disp ();
3119 /* Next, we find a template that matches the given insn,
3120 making sure the overlap of the given operands types is consistent
3121 with the template operand types. */
3123 if (!(t = match_template ()))
3124 return;
3126 if (sse_check != sse_check_none
3127 && !i.tm.opcode_modifier.noavx
3128 && (i.tm.cpu_flags.bitfield.cpusse
3129 || i.tm.cpu_flags.bitfield.cpusse2
3130 || i.tm.cpu_flags.bitfield.cpusse3
3131 || i.tm.cpu_flags.bitfield.cpussse3
3132 || i.tm.cpu_flags.bitfield.cpusse4_1
3133 || i.tm.cpu_flags.bitfield.cpusse4_2))
3135 (sse_check == sse_check_warning
3136 ? as_warn
3137 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3140 /* Zap movzx and movsx suffix. The suffix has been set from
3141 "word ptr" or "byte ptr" on the source operand in Intel syntax
3142 or extracted from mnemonic in AT&T syntax. But we'll use
3143 the destination register to choose the suffix for encoding. */
3144 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3146 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3147 there is no suffix, the default will be byte extension. */
3148 if (i.reg_operands != 2
3149 && !i.suffix
3150 && intel_syntax)
3151 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3153 i.suffix = 0;
3156 if (i.tm.opcode_modifier.fwait)
3157 if (!add_prefix (FWAIT_OPCODE))
3158 return;
3160 /* Check for lock without a lockable instruction. Destination operand
3161 must be memory unless it is xchg (0x86). */
3162 if (i.prefix[LOCK_PREFIX]
3163 && (!i.tm.opcode_modifier.islockable
3164 || i.mem_operands == 0
3165 || (i.tm.base_opcode != 0x86
3166 && !operand_type_check (i.types[i.operands - 1], anymem))))
3168 as_bad (_("expecting lockable instruction after `lock'"));
3169 return;
3172 /* Check if HLE prefix is OK. */
3173 if (i.have_hle && !check_hle ())
3174 return;
3176 /* Check string instruction segment overrides. */
3177 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3179 if (!check_string ())
3180 return;
3181 i.disp_operands = 0;
3184 if (!process_suffix ())
3185 return;
3187 /* Update operand types. */
3188 for (j = 0; j < i.operands; j++)
3189 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3191 /* Make still unresolved immediate matches conform to size of immediate
3192 given in i.suffix. */
3193 if (!finalize_imm ())
3194 return;
3196 if (i.types[0].bitfield.imm1)
3197 i.imm_operands = 0; /* kludge for shift insns. */
3199 /* We only need to check those implicit registers for instructions
3200 with 3 operands or less. */
3201 if (i.operands <= 3)
3202 for (j = 0; j < i.operands; j++)
3203 if (i.types[j].bitfield.inoutportreg
3204 || i.types[j].bitfield.shiftcount
3205 || i.types[j].bitfield.acc
3206 || i.types[j].bitfield.floatacc)
3207 i.reg_operands--;
3209 /* ImmExt should be processed after SSE2AVX. */
3210 if (!i.tm.opcode_modifier.sse2avx
3211 && i.tm.opcode_modifier.immext)
3212 process_immext ();
3214 /* For insns with operands there are more diddles to do to the opcode. */
3215 if (i.operands)
3217 if (!process_operands ())
3218 return;
3220 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3222 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3223 as_warn (_("translating to `%sp'"), i.tm.name);
3226 if (i.tm.opcode_modifier.vex)
3227 build_vex_prefix (t);
3229 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3230 instructions may define INT_OPCODE as well, so avoid this corner
3231 case for those instructions that use MODRM. */
3232 if (i.tm.base_opcode == INT_OPCODE
3233 && !i.tm.opcode_modifier.modrm
3234 && i.op[0].imms->X_add_number == 3)
3236 i.tm.base_opcode = INT3_OPCODE;
3237 i.imm_operands = 0;
3240 if ((i.tm.opcode_modifier.jump
3241 || i.tm.opcode_modifier.jumpbyte
3242 || i.tm.opcode_modifier.jumpdword)
3243 && i.op[0].disps->X_op == O_constant)
3245 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3246 the absolute address given by the constant. Since ix86 jumps and
3247 calls are pc relative, we need to generate a reloc. */
3248 i.op[0].disps->X_add_symbol = &abs_symbol;
3249 i.op[0].disps->X_op = O_symbol;
3252 if (i.tm.opcode_modifier.rex64)
3253 i.rex |= REX_W;
3255 /* For 8 bit registers we need an empty rex prefix. Also if the
3256 instruction already has a prefix, we need to convert old
3257 registers to new ones. */
3259 if ((i.types[0].bitfield.reg8
3260 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3261 || (i.types[1].bitfield.reg8
3262 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3263 || ((i.types[0].bitfield.reg8
3264 || i.types[1].bitfield.reg8)
3265 && i.rex != 0))
3267 int x;
3269 i.rex |= REX_OPCODE;
3270 for (x = 0; x < 2; x++)
3272 /* Look for 8 bit operand that uses old registers. */
3273 if (i.types[x].bitfield.reg8
3274 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3276 /* In case it is "hi" register, give up. */
3277 if (i.op[x].regs->reg_num > 3)
3278 as_bad (_("can't encode register '%s%s' in an "
3279 "instruction requiring REX prefix."),
3280 register_prefix, i.op[x].regs->reg_name);
3282 /* Otherwise it is equivalent to the extended register.
3283 Since the encoding doesn't change this is merely
3284 cosmetic cleanup for debug output. */
3286 i.op[x].regs = i.op[x].regs + 8;
3291 if (i.rex != 0)
3292 add_prefix (REX_OPCODE | i.rex);
3294 /* We are ready to output the insn. */
3295 output_insn ();
3298 static char *
3299 parse_insn (char *line, char *mnemonic)
3301 char *l = line;
3302 char *token_start = l;
3303 char *mnem_p;
3304 int supported;
3305 const insn_template *t;
3306 char *dot_p = NULL;
3308 /* Non-zero if we found a prefix only acceptable with string insns. */
3309 const char *expecting_string_instruction = NULL;
3311 while (1)
3313 mnem_p = mnemonic;
3314 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3316 if (*mnem_p == '.')
3317 dot_p = mnem_p;
3318 mnem_p++;
3319 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3321 as_bad (_("no such instruction: `%s'"), token_start);
3322 return NULL;
3324 l++;
3326 if (!is_space_char (*l)
3327 && *l != END_OF_INSN
3328 && (intel_syntax
3329 || (*l != PREFIX_SEPARATOR
3330 && *l != ',')))
3332 as_bad (_("invalid character %s in mnemonic"),
3333 output_invalid (*l));
3334 return NULL;
3336 if (token_start == l)
3338 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3339 as_bad (_("expecting prefix; got nothing"));
3340 else
3341 as_bad (_("expecting mnemonic; got nothing"));
3342 return NULL;
3345 /* Look up instruction (or prefix) via hash table. */
3346 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3348 if (*l != END_OF_INSN
3349 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3350 && current_templates
3351 && current_templates->start->opcode_modifier.isprefix)
3353 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3355 as_bad ((flag_code != CODE_64BIT
3356 ? _("`%s' is only supported in 64-bit mode")
3357 : _("`%s' is not supported in 64-bit mode")),
3358 current_templates->start->name);
3359 return NULL;
3361 /* If we are in 16-bit mode, do not allow addr16 or data16.
3362 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3363 if ((current_templates->start->opcode_modifier.size16
3364 || current_templates->start->opcode_modifier.size32)
3365 && flag_code != CODE_64BIT
3366 && (current_templates->start->opcode_modifier.size32
3367 ^ (flag_code == CODE_16BIT)))
3369 as_bad (_("redundant %s prefix"),
3370 current_templates->start->name);
3371 return NULL;
3373 /* Add prefix, checking for repeated prefixes. */
3374 switch (add_prefix (current_templates->start->base_opcode))
3376 case PREFIX_EXIST:
3377 return NULL;
3378 case PREFIX_REP:
3379 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3380 i.have_hle = 1;
3381 else
3382 expecting_string_instruction = current_templates->start->name;
3383 break;
3384 default:
3385 break;
3387 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3388 token_start = ++l;
3390 else
3391 break;
3394 if (!current_templates)
3396 /* Check if we should swap operand or force 32bit displacement in
3397 encoding. */
3398 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3399 i.swap_operand = 1;
3400 else if (mnem_p - 3 == dot_p
3401 && dot_p[1] == 'd'
3402 && dot_p[2] == '8')
3403 i.disp_encoding = disp_encoding_8bit;
3404 else if (mnem_p - 4 == dot_p
3405 && dot_p[1] == 'd'
3406 && dot_p[2] == '3'
3407 && dot_p[3] == '2')
3408 i.disp_encoding = disp_encoding_32bit;
3409 else
3410 goto check_suffix;
3411 mnem_p = dot_p;
3412 *dot_p = '\0';
3413 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3416 if (!current_templates)
3418 check_suffix:
3419 /* See if we can get a match by trimming off a suffix. */
3420 switch (mnem_p[-1])
3422 case WORD_MNEM_SUFFIX:
3423 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3424 i.suffix = SHORT_MNEM_SUFFIX;
3425 else
3426 case BYTE_MNEM_SUFFIX:
3427 case QWORD_MNEM_SUFFIX:
3428 i.suffix = mnem_p[-1];
3429 mnem_p[-1] = '\0';
3430 current_templates = (const templates *) hash_find (op_hash,
3431 mnemonic);
3432 break;
3433 case SHORT_MNEM_SUFFIX:
3434 case LONG_MNEM_SUFFIX:
3435 if (!intel_syntax)
3437 i.suffix = mnem_p[-1];
3438 mnem_p[-1] = '\0';
3439 current_templates = (const templates *) hash_find (op_hash,
3440 mnemonic);
3442 break;
3444 /* Intel Syntax. */
3445 case 'd':
3446 if (intel_syntax)
3448 if (intel_float_operand (mnemonic) == 1)
3449 i.suffix = SHORT_MNEM_SUFFIX;
3450 else
3451 i.suffix = LONG_MNEM_SUFFIX;
3452 mnem_p[-1] = '\0';
3453 current_templates = (const templates *) hash_find (op_hash,
3454 mnemonic);
3456 break;
3458 if (!current_templates)
3460 as_bad (_("no such instruction: `%s'"), token_start);
3461 return NULL;
3465 if (current_templates->start->opcode_modifier.jump
3466 || current_templates->start->opcode_modifier.jumpbyte)
3468 /* Check for a branch hint. We allow ",pt" and ",pn" for
3469 predict taken and predict not taken respectively.
3470 I'm not sure that branch hints actually do anything on loop
3471 and jcxz insns (JumpByte) for current Pentium4 chips. They
3472 may work in the future and it doesn't hurt to accept them
3473 now. */
3474 if (l[0] == ',' && l[1] == 'p')
3476 if (l[2] == 't')
3478 if (!add_prefix (DS_PREFIX_OPCODE))
3479 return NULL;
3480 l += 3;
3482 else if (l[2] == 'n')
3484 if (!add_prefix (CS_PREFIX_OPCODE))
3485 return NULL;
3486 l += 3;
3490 /* Any other comma loses. */
3491 if (*l == ',')
3493 as_bad (_("invalid character %s in mnemonic"),
3494 output_invalid (*l));
3495 return NULL;
3498 /* Check if instruction is supported on specified architecture. */
3499 supported = 0;
3500 for (t = current_templates->start; t < current_templates->end; ++t)
3502 supported |= cpu_flags_match (t);
3503 if (supported == CPU_FLAGS_PERFECT_MATCH)
3504 goto skip;
3507 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3509 as_bad (flag_code == CODE_64BIT
3510 ? _("`%s' is not supported in 64-bit mode")
3511 : _("`%s' is only supported in 64-bit mode"),
3512 current_templates->start->name);
3513 return NULL;
3515 if (supported != CPU_FLAGS_PERFECT_MATCH)
3517 as_bad (_("`%s' is not supported on `%s%s'"),
3518 current_templates->start->name,
3519 cpu_arch_name ? cpu_arch_name : default_arch,
3520 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3521 return NULL;
3524 skip:
3525 if (!cpu_arch_flags.bitfield.cpui386
3526 && (flag_code != CODE_16BIT))
3528 as_warn (_("use .code16 to ensure correct addressing mode"));
3531 /* Check for rep/repne without a string instruction. */
3532 if (expecting_string_instruction)
3534 static templates override;
3536 for (t = current_templates->start; t < current_templates->end; ++t)
3537 if (t->opcode_modifier.isstring)
3538 break;
3539 if (t >= current_templates->end)
3541 as_bad (_("expecting string instruction after `%s'"),
3542 expecting_string_instruction);
3543 return NULL;
3545 for (override.start = t; t < current_templates->end; ++t)
3546 if (!t->opcode_modifier.isstring)
3547 break;
3548 override.end = t;
3549 current_templates = &override;
3552 return l;
3555 static char *
3556 parse_operands (char *l, const char *mnemonic)
3558 char *token_start;
3560 /* 1 if operand is pending after ','. */
3561 unsigned int expecting_operand = 0;
3563 /* Non-zero if operand parens not balanced. */
3564 unsigned int paren_not_balanced;
3566 while (*l != END_OF_INSN)
3568 /* Skip optional white space before operand. */
3569 if (is_space_char (*l))
3570 ++l;
3571 if (!is_operand_char (*l) && *l != END_OF_INSN)
3573 as_bad (_("invalid character %s before operand %d"),
3574 output_invalid (*l),
3575 i.operands + 1);
3576 return NULL;
3578 token_start = l; /* after white space */
3579 paren_not_balanced = 0;
3580 while (paren_not_balanced || *l != ',')
3582 if (*l == END_OF_INSN)
3584 if (paren_not_balanced)
3586 if (!intel_syntax)
3587 as_bad (_("unbalanced parenthesis in operand %d."),
3588 i.operands + 1);
3589 else
3590 as_bad (_("unbalanced brackets in operand %d."),
3591 i.operands + 1);
3592 return NULL;
3594 else
3595 break; /* we are done */
3597 else if (!is_operand_char (*l) && !is_space_char (*l))
3599 as_bad (_("invalid character %s in operand %d"),
3600 output_invalid (*l),
3601 i.operands + 1);
3602 return NULL;
3604 if (!intel_syntax)
3606 if (*l == '(')
3607 ++paren_not_balanced;
3608 if (*l == ')')
3609 --paren_not_balanced;
3611 else
3613 if (*l == '[')
3614 ++paren_not_balanced;
3615 if (*l == ']')
3616 --paren_not_balanced;
3618 l++;
3620 if (l != token_start)
3621 { /* Yes, we've read in another operand. */
3622 unsigned int operand_ok;
3623 this_operand = i.operands++;
3624 i.types[this_operand].bitfield.unspecified = 1;
3625 if (i.operands > MAX_OPERANDS)
3627 as_bad (_("spurious operands; (%d operands/instruction max)"),
3628 MAX_OPERANDS);
3629 return NULL;
3631 /* Now parse operand adding info to 'i' as we go along. */
3632 END_STRING_AND_SAVE (l);
3634 if (intel_syntax)
3635 operand_ok =
3636 i386_intel_operand (token_start,
3637 intel_float_operand (mnemonic));
3638 else
3639 operand_ok = i386_att_operand (token_start);
3641 RESTORE_END_STRING (l);
3642 if (!operand_ok)
3643 return NULL;
3645 else
3647 if (expecting_operand)
3649 expecting_operand_after_comma:
3650 as_bad (_("expecting operand after ','; got nothing"));
3651 return NULL;
3653 if (*l == ',')
3655 as_bad (_("expecting operand before ','; got nothing"));
3656 return NULL;
3660 /* Now *l must be either ',' or END_OF_INSN. */
3661 if (*l == ',')
3663 if (*++l == END_OF_INSN)
3665 /* Just skip it, if it's \n complain. */
3666 goto expecting_operand_after_comma;
3668 expecting_operand = 1;
3671 return l;
3674 static void
3675 swap_2_operands (int xchg1, int xchg2)
3677 union i386_op temp_op;
3678 i386_operand_type temp_type;
3679 enum bfd_reloc_code_real temp_reloc;
3681 temp_type = i.types[xchg2];
3682 i.types[xchg2] = i.types[xchg1];
3683 i.types[xchg1] = temp_type;
3684 temp_op = i.op[xchg2];
3685 i.op[xchg2] = i.op[xchg1];
3686 i.op[xchg1] = temp_op;
3687 temp_reloc = i.reloc[xchg2];
3688 i.reloc[xchg2] = i.reloc[xchg1];
3689 i.reloc[xchg1] = temp_reloc;
3692 static void
3693 swap_operands (void)
3695 switch (i.operands)
3697 case 5:
3698 case 4:
3699 swap_2_operands (1, i.operands - 2);
3700 case 3:
3701 case 2:
3702 swap_2_operands (0, i.operands - 1);
3703 break;
3704 default:
3705 abort ();
3708 if (i.mem_operands == 2)
3710 const seg_entry *temp_seg;
3711 temp_seg = i.seg[0];
3712 i.seg[0] = i.seg[1];
3713 i.seg[1] = temp_seg;
3717 /* Try to ensure constant immediates are represented in the smallest
3718 opcode possible. */
3719 static void
3720 optimize_imm (void)
3722 char guess_suffix = 0;
3723 int op;
3725 if (i.suffix)
3726 guess_suffix = i.suffix;
3727 else if (i.reg_operands)
3729 /* Figure out a suffix from the last register operand specified.
3730 We can't do this properly yet, ie. excluding InOutPortReg,
3731 but the following works for instructions with immediates.
3732 In any case, we can't set i.suffix yet. */
3733 for (op = i.operands; --op >= 0;)
3734 if (i.types[op].bitfield.reg8)
3736 guess_suffix = BYTE_MNEM_SUFFIX;
3737 break;
3739 else if (i.types[op].bitfield.reg16)
3741 guess_suffix = WORD_MNEM_SUFFIX;
3742 break;
3744 else if (i.types[op].bitfield.reg32)
3746 guess_suffix = LONG_MNEM_SUFFIX;
3747 break;
3749 else if (i.types[op].bitfield.reg64)
3751 guess_suffix = QWORD_MNEM_SUFFIX;
3752 break;
3755 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3756 guess_suffix = WORD_MNEM_SUFFIX;
3758 for (op = i.operands; --op >= 0;)
3759 if (operand_type_check (i.types[op], imm))
3761 switch (i.op[op].imms->X_op)
3763 case O_constant:
3764 /* If a suffix is given, this operand may be shortened. */
3765 switch (guess_suffix)
3767 case LONG_MNEM_SUFFIX:
3768 i.types[op].bitfield.imm32 = 1;
3769 i.types[op].bitfield.imm64 = 1;
3770 break;
3771 case WORD_MNEM_SUFFIX:
3772 i.types[op].bitfield.imm16 = 1;
3773 i.types[op].bitfield.imm32 = 1;
3774 i.types[op].bitfield.imm32s = 1;
3775 i.types[op].bitfield.imm64 = 1;
3776 break;
3777 case BYTE_MNEM_SUFFIX:
3778 i.types[op].bitfield.imm8 = 1;
3779 i.types[op].bitfield.imm8s = 1;
3780 i.types[op].bitfield.imm16 = 1;
3781 i.types[op].bitfield.imm32 = 1;
3782 i.types[op].bitfield.imm32s = 1;
3783 i.types[op].bitfield.imm64 = 1;
3784 break;
3787 /* If this operand is at most 16 bits, convert it
3788 to a signed 16 bit number before trying to see
3789 whether it will fit in an even smaller size.
3790 This allows a 16-bit operand such as $0xffe0 to
3791 be recognised as within Imm8S range. */
3792 if ((i.types[op].bitfield.imm16)
3793 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3795 i.op[op].imms->X_add_number =
3796 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3798 if ((i.types[op].bitfield.imm32)
3799 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3800 == 0))
3802 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3803 ^ ((offsetT) 1 << 31))
3804 - ((offsetT) 1 << 31));
3806 i.types[op]
3807 = operand_type_or (i.types[op],
3808 smallest_imm_type (i.op[op].imms->X_add_number));
3810 /* We must avoid matching of Imm32 templates when 64bit
3811 only immediate is available. */
3812 if (guess_suffix == QWORD_MNEM_SUFFIX)
3813 i.types[op].bitfield.imm32 = 0;
3814 break;
3816 case O_absent:
3817 case O_register:
3818 abort ();
3820 /* Symbols and expressions. */
3821 default:
3822 /* Convert symbolic operand to proper sizes for matching, but don't
3823 prevent matching a set of insns that only supports sizes other
3824 than those matching the insn suffix. */
3826 i386_operand_type mask, allowed;
3827 const insn_template *t;
3829 operand_type_set (&mask, 0);
3830 operand_type_set (&allowed, 0);
3832 for (t = current_templates->start;
3833 t < current_templates->end;
3834 ++t)
3835 allowed = operand_type_or (allowed,
3836 t->operand_types[op]);
3837 switch (guess_suffix)
3839 case QWORD_MNEM_SUFFIX:
3840 mask.bitfield.imm64 = 1;
3841 mask.bitfield.imm32s = 1;
3842 break;
3843 case LONG_MNEM_SUFFIX:
3844 mask.bitfield.imm32 = 1;
3845 break;
3846 case WORD_MNEM_SUFFIX:
3847 mask.bitfield.imm16 = 1;
3848 break;
3849 case BYTE_MNEM_SUFFIX:
3850 mask.bitfield.imm8 = 1;
3851 break;
3852 default:
3853 break;
3855 allowed = operand_type_and (mask, allowed);
3856 if (!operand_type_all_zero (&allowed))
3857 i.types[op] = operand_type_and (i.types[op], mask);
3859 break;
3864 /* Try to use the smallest displacement type too. */
3865 static void
3866 optimize_disp (void)
3868 int op;
3870 for (op = i.operands; --op >= 0;)
3871 if (operand_type_check (i.types[op], disp))
3873 if (i.op[op].disps->X_op == O_constant)
3875 offsetT op_disp = i.op[op].disps->X_add_number;
3877 if (i.types[op].bitfield.disp16
3878 && (op_disp & ~(offsetT) 0xffff) == 0)
3880 /* If this operand is at most 16 bits, convert
3881 to a signed 16 bit number and don't use 64bit
3882 displacement. */
3883 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3884 i.types[op].bitfield.disp64 = 0;
3886 if (i.types[op].bitfield.disp32
3887 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3889 /* If this operand is at most 32 bits, convert
3890 to a signed 32 bit number and don't use 64bit
3891 displacement. */
3892 op_disp &= (((offsetT) 2 << 31) - 1);
3893 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3894 i.types[op].bitfield.disp64 = 0;
3896 if (!op_disp && i.types[op].bitfield.baseindex)
3898 i.types[op].bitfield.disp8 = 0;
3899 i.types[op].bitfield.disp16 = 0;
3900 i.types[op].bitfield.disp32 = 0;
3901 i.types[op].bitfield.disp32s = 0;
3902 i.types[op].bitfield.disp64 = 0;
3903 i.op[op].disps = 0;
3904 i.disp_operands--;
3906 else if (flag_code == CODE_64BIT)
3908 if (fits_in_signed_long (op_disp))
3910 i.types[op].bitfield.disp64 = 0;
3911 i.types[op].bitfield.disp32s = 1;
3913 if (i.prefix[ADDR_PREFIX]
3914 && fits_in_unsigned_long (op_disp))
3915 i.types[op].bitfield.disp32 = 1;
3917 if ((i.types[op].bitfield.disp32
3918 || i.types[op].bitfield.disp32s
3919 || i.types[op].bitfield.disp16)
3920 && fits_in_signed_byte (op_disp))
3921 i.types[op].bitfield.disp8 = 1;
3923 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3924 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3926 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3927 i.op[op].disps, 0, i.reloc[op]);
3928 i.types[op].bitfield.disp8 = 0;
3929 i.types[op].bitfield.disp16 = 0;
3930 i.types[op].bitfield.disp32 = 0;
3931 i.types[op].bitfield.disp32s = 0;
3932 i.types[op].bitfield.disp64 = 0;
3934 else
3935 /* We only support 64bit displacement on constants. */
3936 i.types[op].bitfield.disp64 = 0;
3940 /* Check if operands are valid for the instruction. */
3942 static int
3943 check_VecOperands (const insn_template *t)
3945 /* Without VSIB byte, we can't have a vector register for index. */
3946 if (!t->opcode_modifier.vecsib
3947 && i.index_reg
3948 && (i.index_reg->reg_type.bitfield.regxmm
3949 || i.index_reg->reg_type.bitfield.regymm))
3951 i.error = unsupported_vector_index_register;
3952 return 1;
3955 /* For VSIB byte, we need a vector register for index and no PC
3956 relative addressing is allowed. */
3957 if (t->opcode_modifier.vecsib
3958 && (!i.index_reg
3959 || !((t->opcode_modifier.vecsib == VecSIB128
3960 && i.index_reg->reg_type.bitfield.regxmm)
3961 || (t->opcode_modifier.vecsib == VecSIB256
3962 && i.index_reg->reg_type.bitfield.regymm))
3963 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3965 i.error = invalid_vsib_address;
3966 return 1;
3969 return 0;
3972 /* Check if operands are valid for the instruction. Update VEX
3973 operand types. */
3975 static int
3976 VEX_check_operands (const insn_template *t)
3978 if (!t->opcode_modifier.vex)
3979 return 0;
3981 /* Only check VEX_Imm4, which must be the first operand. */
3982 if (t->operand_types[0].bitfield.vec_imm4)
3984 if (i.op[0].imms->X_op != O_constant
3985 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3987 i.error = bad_imm4;
3988 return 1;
3991 /* Turn off Imm8 so that update_imm won't complain. */
3992 i.types[0] = vec_imm4;
3995 return 0;
3998 static const insn_template *
3999 match_template (void)
4001 /* Points to template once we've found it. */
4002 const insn_template *t;
4003 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4004 i386_operand_type overlap4;
4005 unsigned int found_reverse_match;
4006 i386_opcode_modifier suffix_check;
4007 i386_operand_type operand_types [MAX_OPERANDS];
4008 int addr_prefix_disp;
4009 unsigned int j;
4010 unsigned int found_cpu_match;
4011 unsigned int check_register;
4013 #if MAX_OPERANDS != 5
4014 # error "MAX_OPERANDS must be 5."
4015 #endif
4017 found_reverse_match = 0;
4018 addr_prefix_disp = -1;
4020 memset (&suffix_check, 0, sizeof (suffix_check));
4021 if (i.suffix == BYTE_MNEM_SUFFIX)
4022 suffix_check.no_bsuf = 1;
4023 else if (i.suffix == WORD_MNEM_SUFFIX)
4024 suffix_check.no_wsuf = 1;
4025 else if (i.suffix == SHORT_MNEM_SUFFIX)
4026 suffix_check.no_ssuf = 1;
4027 else if (i.suffix == LONG_MNEM_SUFFIX)
4028 suffix_check.no_lsuf = 1;
4029 else if (i.suffix == QWORD_MNEM_SUFFIX)
4030 suffix_check.no_qsuf = 1;
4031 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4032 suffix_check.no_ldsuf = 1;
4034 /* Must have right number of operands. */
4035 i.error = number_of_operands_mismatch;
4037 for (t = current_templates->start; t < current_templates->end; t++)
4039 addr_prefix_disp = -1;
4041 if (i.operands != t->operands)
4042 continue;
4044 /* Check processor support. */
4045 i.error = unsupported;
4046 found_cpu_match = (cpu_flags_match (t)
4047 == CPU_FLAGS_PERFECT_MATCH);
4048 if (!found_cpu_match)
4049 continue;
4051 /* Check old gcc support. */
4052 i.error = old_gcc_only;
4053 if (!old_gcc && t->opcode_modifier.oldgcc)
4054 continue;
4056 /* Check AT&T mnemonic. */
4057 i.error = unsupported_with_intel_mnemonic;
4058 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4059 continue;
4061 /* Check AT&T/Intel syntax. */
4062 i.error = unsupported_syntax;
4063 if ((intel_syntax && t->opcode_modifier.attsyntax)
4064 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4065 continue;
4067 /* Check the suffix, except for some instructions in intel mode. */
4068 i.error = invalid_instruction_suffix;
4069 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4070 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4071 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4072 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4073 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4074 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4075 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4076 continue;
4078 if (!operand_size_match (t))
4079 continue;
4081 for (j = 0; j < MAX_OPERANDS; j++)
4082 operand_types[j] = t->operand_types[j];
4084 /* In general, don't allow 64-bit operands in 32-bit mode. */
4085 if (i.suffix == QWORD_MNEM_SUFFIX
4086 && flag_code != CODE_64BIT
4087 && (intel_syntax
4088 ? (!t->opcode_modifier.ignoresize
4089 && !intel_float_operand (t->name))
4090 : intel_float_operand (t->name) != 2)
4091 && ((!operand_types[0].bitfield.regmmx
4092 && !operand_types[0].bitfield.regxmm
4093 && !operand_types[0].bitfield.regymm)
4094 || (!operand_types[t->operands > 1].bitfield.regmmx
4095 && !!operand_types[t->operands > 1].bitfield.regxmm
4096 && !!operand_types[t->operands > 1].bitfield.regymm))
4097 && (t->base_opcode != 0x0fc7
4098 || t->extension_opcode != 1 /* cmpxchg8b */))
4099 continue;
4101 /* In general, don't allow 32-bit operands on pre-386. */
4102 else if (i.suffix == LONG_MNEM_SUFFIX
4103 && !cpu_arch_flags.bitfield.cpui386
4104 && (intel_syntax
4105 ? (!t->opcode_modifier.ignoresize
4106 && !intel_float_operand (t->name))
4107 : intel_float_operand (t->name) != 2)
4108 && ((!operand_types[0].bitfield.regmmx
4109 && !operand_types[0].bitfield.regxmm)
4110 || (!operand_types[t->operands > 1].bitfield.regmmx
4111 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4112 continue;
4114 /* Do not verify operands when there are none. */
4115 else
4117 if (!t->operands)
4118 /* We've found a match; break out of loop. */
4119 break;
4122 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4123 into Disp32/Disp16/Disp32 operand. */
4124 if (i.prefix[ADDR_PREFIX] != 0)
4126 /* There should be only one Disp operand. */
4127 switch (flag_code)
4129 case CODE_16BIT:
4130 for (j = 0; j < MAX_OPERANDS; j++)
4132 if (operand_types[j].bitfield.disp16)
4134 addr_prefix_disp = j;
4135 operand_types[j].bitfield.disp32 = 1;
4136 operand_types[j].bitfield.disp16 = 0;
4137 break;
4140 break;
4141 case CODE_32BIT:
4142 for (j = 0; j < MAX_OPERANDS; j++)
4144 if (operand_types[j].bitfield.disp32)
4146 addr_prefix_disp = j;
4147 operand_types[j].bitfield.disp32 = 0;
4148 operand_types[j].bitfield.disp16 = 1;
4149 break;
4152 break;
4153 case CODE_64BIT:
4154 for (j = 0; j < MAX_OPERANDS; j++)
4156 if (operand_types[j].bitfield.disp64)
4158 addr_prefix_disp = j;
4159 operand_types[j].bitfield.disp64 = 0;
4160 operand_types[j].bitfield.disp32 = 1;
4161 break;
4164 break;
4168 /* We check register size if needed. */
4169 check_register = t->opcode_modifier.checkregsize;
4170 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4171 switch (t->operands)
4173 case 1:
4174 if (!operand_type_match (overlap0, i.types[0]))
4175 continue;
4176 break;
4177 case 2:
4178 /* xchg %eax, %eax is a special case. It is an aliase for nop
4179 only in 32bit mode and we can use opcode 0x90. In 64bit
4180 mode, we can't use 0x90 for xchg %eax, %eax since it should
4181 zero-extend %eax to %rax. */
4182 if (flag_code == CODE_64BIT
4183 && t->base_opcode == 0x90
4184 && operand_type_equal (&i.types [0], &acc32)
4185 && operand_type_equal (&i.types [1], &acc32))
4186 continue;
4187 if (i.swap_operand)
4189 /* If we swap operand in encoding, we either match
4190 the next one or reverse direction of operands. */
4191 if (t->opcode_modifier.s)
4192 continue;
4193 else if (t->opcode_modifier.d)
4194 goto check_reverse;
4197 case 3:
4198 /* If we swap operand in encoding, we match the next one. */
4199 if (i.swap_operand && t->opcode_modifier.s)
4200 continue;
4201 case 4:
4202 case 5:
4203 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4204 if (!operand_type_match (overlap0, i.types[0])
4205 || !operand_type_match (overlap1, i.types[1])
4206 || (check_register
4207 && !operand_type_register_match (overlap0, i.types[0],
4208 operand_types[0],
4209 overlap1, i.types[1],
4210 operand_types[1])))
4212 /* Check if other direction is valid ... */
4213 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4214 continue;
4216 check_reverse:
4217 /* Try reversing direction of operands. */
4218 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4219 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4220 if (!operand_type_match (overlap0, i.types[0])
4221 || !operand_type_match (overlap1, i.types[1])
4222 || (check_register
4223 && !operand_type_register_match (overlap0,
4224 i.types[0],
4225 operand_types[1],
4226 overlap1,
4227 i.types[1],
4228 operand_types[0])))
4230 /* Does not match either direction. */
4231 continue;
4233 /* found_reverse_match holds which of D or FloatDR
4234 we've found. */
4235 if (t->opcode_modifier.d)
4236 found_reverse_match = Opcode_D;
4237 else if (t->opcode_modifier.floatd)
4238 found_reverse_match = Opcode_FloatD;
4239 else
4240 found_reverse_match = 0;
4241 if (t->opcode_modifier.floatr)
4242 found_reverse_match |= Opcode_FloatR;
4244 else
4246 /* Found a forward 2 operand match here. */
4247 switch (t->operands)
4249 case 5:
4250 overlap4 = operand_type_and (i.types[4],
4251 operand_types[4]);
4252 case 4:
4253 overlap3 = operand_type_and (i.types[3],
4254 operand_types[3]);
4255 case 3:
4256 overlap2 = operand_type_and (i.types[2],
4257 operand_types[2]);
4258 break;
4261 switch (t->operands)
4263 case 5:
4264 if (!operand_type_match (overlap4, i.types[4])
4265 || !operand_type_register_match (overlap3,
4266 i.types[3],
4267 operand_types[3],
4268 overlap4,
4269 i.types[4],
4270 operand_types[4]))
4271 continue;
4272 case 4:
4273 if (!operand_type_match (overlap3, i.types[3])
4274 || (check_register
4275 && !operand_type_register_match (overlap2,
4276 i.types[2],
4277 operand_types[2],
4278 overlap3,
4279 i.types[3],
4280 operand_types[3])))
4281 continue;
4282 case 3:
4283 /* Here we make use of the fact that there are no
4284 reverse match 3 operand instructions, and all 3
4285 operand instructions only need to be checked for
4286 register consistency between operands 2 and 3. */
4287 if (!operand_type_match (overlap2, i.types[2])
4288 || (check_register
4289 && !operand_type_register_match (overlap1,
4290 i.types[1],
4291 operand_types[1],
4292 overlap2,
4293 i.types[2],
4294 operand_types[2])))
4295 continue;
4296 break;
4299 /* Found either forward/reverse 2, 3 or 4 operand match here:
4300 slip through to break. */
4302 if (!found_cpu_match)
4304 found_reverse_match = 0;
4305 continue;
4308 /* Check if vector operands are valid. */
4309 if (check_VecOperands (t))
4310 continue;
4312 /* Check if VEX operands are valid. */
4313 if (VEX_check_operands (t))
4314 continue;
4316 /* We've found a match; break out of loop. */
4317 break;
4320 if (t == current_templates->end)
4322 /* We found no match. */
4323 const char *err_msg;
4324 switch (i.error)
4326 default:
4327 abort ();
4328 case operand_size_mismatch:
4329 err_msg = _("operand size mismatch");
4330 break;
4331 case operand_type_mismatch:
4332 err_msg = _("operand type mismatch");
4333 break;
4334 case register_type_mismatch:
4335 err_msg = _("register type mismatch");
4336 break;
4337 case number_of_operands_mismatch:
4338 err_msg = _("number of operands mismatch");
4339 break;
4340 case invalid_instruction_suffix:
4341 err_msg = _("invalid instruction suffix");
4342 break;
4343 case bad_imm4:
4344 err_msg = _("Imm4 isn't the first operand");
4345 break;
4346 case old_gcc_only:
4347 err_msg = _("only supported with old gcc");
4348 break;
4349 case unsupported_with_intel_mnemonic:
4350 err_msg = _("unsupported with Intel mnemonic");
4351 break;
4352 case unsupported_syntax:
4353 err_msg = _("unsupported syntax");
4354 break;
4355 case unsupported:
4356 err_msg = _("unsupported");
4357 break;
4358 case invalid_vsib_address:
4359 err_msg = _("invalid VSIB address");
4360 break;
4361 case unsupported_vector_index_register:
4362 err_msg = _("unsupported vector index register");
4363 break;
4365 as_bad (_("%s for `%s'"), err_msg,
4366 current_templates->start->name);
4367 return NULL;
4370 if (!quiet_warnings)
4372 if (!intel_syntax
4373 && (i.types[0].bitfield.jumpabsolute
4374 != operand_types[0].bitfield.jumpabsolute))
4376 as_warn (_("indirect %s without `*'"), t->name);
4379 if (t->opcode_modifier.isprefix
4380 && t->opcode_modifier.ignoresize)
4382 /* Warn them that a data or address size prefix doesn't
4383 affect assembly of the next line of code. */
4384 as_warn (_("stand-alone `%s' prefix"), t->name);
4388 /* Copy the template we found. */
4389 i.tm = *t;
4391 if (addr_prefix_disp != -1)
4392 i.tm.operand_types[addr_prefix_disp]
4393 = operand_types[addr_prefix_disp];
4395 if (found_reverse_match)
4397 /* If we found a reverse match we must alter the opcode
4398 direction bit. found_reverse_match holds bits to change
4399 (different for int & float insns). */
4401 i.tm.base_opcode ^= found_reverse_match;
4403 i.tm.operand_types[0] = operand_types[1];
4404 i.tm.operand_types[1] = operand_types[0];
4407 return t;
4410 static int
4411 check_string (void)
4413 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4414 if (i.tm.operand_types[mem_op].bitfield.esseg)
4416 if (i.seg[0] != NULL && i.seg[0] != &es)
4418 as_bad (_("`%s' operand %d must use `%ses' segment"),
4419 i.tm.name,
4420 mem_op + 1,
4421 register_prefix);
4422 return 0;
4424 /* There's only ever one segment override allowed per instruction.
4425 This instruction possibly has a legal segment override on the
4426 second operand, so copy the segment to where non-string
4427 instructions store it, allowing common code. */
4428 i.seg[0] = i.seg[1];
4430 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4432 if (i.seg[1] != NULL && i.seg[1] != &es)
4434 as_bad (_("`%s' operand %d must use `%ses' segment"),
4435 i.tm.name,
4436 mem_op + 2,
4437 register_prefix);
4438 return 0;
4441 return 1;
4444 static int
4445 process_suffix (void)
4447 /* If matched instruction specifies an explicit instruction mnemonic
4448 suffix, use it. */
4449 if (i.tm.opcode_modifier.size16)
4450 i.suffix = WORD_MNEM_SUFFIX;
4451 else if (i.tm.opcode_modifier.size32)
4452 i.suffix = LONG_MNEM_SUFFIX;
4453 else if (i.tm.opcode_modifier.size64)
4454 i.suffix = QWORD_MNEM_SUFFIX;
4455 else if (i.reg_operands)
4457 /* If there's no instruction mnemonic suffix we try to invent one
4458 based on register operands. */
4459 if (!i.suffix)
4461 /* We take i.suffix from the last register operand specified,
4462 Destination register type is more significant than source
4463 register type. crc32 in SSE4.2 prefers source register
4464 type. */
4465 if (i.tm.base_opcode == 0xf20f38f1)
4467 if (i.types[0].bitfield.reg16)
4468 i.suffix = WORD_MNEM_SUFFIX;
4469 else if (i.types[0].bitfield.reg32)
4470 i.suffix = LONG_MNEM_SUFFIX;
4471 else if (i.types[0].bitfield.reg64)
4472 i.suffix = QWORD_MNEM_SUFFIX;
4474 else if (i.tm.base_opcode == 0xf20f38f0)
4476 if (i.types[0].bitfield.reg8)
4477 i.suffix = BYTE_MNEM_SUFFIX;
4480 if (!i.suffix)
4482 int op;
4484 if (i.tm.base_opcode == 0xf20f38f1
4485 || i.tm.base_opcode == 0xf20f38f0)
4487 /* We have to know the operand size for crc32. */
4488 as_bad (_("ambiguous memory operand size for `%s`"),
4489 i.tm.name);
4490 return 0;
4493 for (op = i.operands; --op >= 0;)
4494 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4496 if (i.types[op].bitfield.reg8)
4498 i.suffix = BYTE_MNEM_SUFFIX;
4499 break;
4501 else if (i.types[op].bitfield.reg16)
4503 i.suffix = WORD_MNEM_SUFFIX;
4504 break;
4506 else if (i.types[op].bitfield.reg32)
4508 i.suffix = LONG_MNEM_SUFFIX;
4509 break;
4511 else if (i.types[op].bitfield.reg64)
4513 i.suffix = QWORD_MNEM_SUFFIX;
4514 break;
4519 else if (i.suffix == BYTE_MNEM_SUFFIX)
4521 if (intel_syntax
4522 && i.tm.opcode_modifier.ignoresize
4523 && i.tm.opcode_modifier.no_bsuf)
4524 i.suffix = 0;
4525 else if (!check_byte_reg ())
4526 return 0;
4528 else if (i.suffix == LONG_MNEM_SUFFIX)
4530 if (intel_syntax
4531 && i.tm.opcode_modifier.ignoresize
4532 && i.tm.opcode_modifier.no_lsuf)
4533 i.suffix = 0;
4534 else if (!check_long_reg ())
4535 return 0;
4537 else if (i.suffix == QWORD_MNEM_SUFFIX)
4539 if (intel_syntax
4540 && i.tm.opcode_modifier.ignoresize
4541 && i.tm.opcode_modifier.no_qsuf)
4542 i.suffix = 0;
4543 else if (!check_qword_reg ())
4544 return 0;
4546 else if (i.suffix == WORD_MNEM_SUFFIX)
4548 if (intel_syntax
4549 && i.tm.opcode_modifier.ignoresize
4550 && i.tm.opcode_modifier.no_wsuf)
4551 i.suffix = 0;
4552 else if (!check_word_reg ())
4553 return 0;
4555 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4556 || i.suffix == YMMWORD_MNEM_SUFFIX)
4558 /* Skip if the instruction has x/y suffix. match_template
4559 should check if it is a valid suffix. */
4561 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4562 /* Do nothing if the instruction is going to ignore the prefix. */
4564 else
4565 abort ();
4567 else if (i.tm.opcode_modifier.defaultsize
4568 && !i.suffix
4569 /* exclude fldenv/frstor/fsave/fstenv */
4570 && i.tm.opcode_modifier.no_ssuf)
4572 i.suffix = stackop_size;
4574 else if (intel_syntax
4575 && !i.suffix
4576 && (i.tm.operand_types[0].bitfield.jumpabsolute
4577 || i.tm.opcode_modifier.jumpbyte
4578 || i.tm.opcode_modifier.jumpintersegment
4579 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4580 && i.tm.extension_opcode <= 3)))
4582 switch (flag_code)
4584 case CODE_64BIT:
4585 if (!i.tm.opcode_modifier.no_qsuf)
4587 i.suffix = QWORD_MNEM_SUFFIX;
4588 break;
4590 case CODE_32BIT:
4591 if (!i.tm.opcode_modifier.no_lsuf)
4592 i.suffix = LONG_MNEM_SUFFIX;
4593 break;
4594 case CODE_16BIT:
4595 if (!i.tm.opcode_modifier.no_wsuf)
4596 i.suffix = WORD_MNEM_SUFFIX;
4597 break;
4601 if (!i.suffix)
4603 if (!intel_syntax)
4605 if (i.tm.opcode_modifier.w)
4607 as_bad (_("no instruction mnemonic suffix given and "
4608 "no register operands; can't size instruction"));
4609 return 0;
4612 else
4614 unsigned int suffixes;
4616 suffixes = !i.tm.opcode_modifier.no_bsuf;
4617 if (!i.tm.opcode_modifier.no_wsuf)
4618 suffixes |= 1 << 1;
4619 if (!i.tm.opcode_modifier.no_lsuf)
4620 suffixes |= 1 << 2;
4621 if (!i.tm.opcode_modifier.no_ldsuf)
4622 suffixes |= 1 << 3;
4623 if (!i.tm.opcode_modifier.no_ssuf)
4624 suffixes |= 1 << 4;
4625 if (!i.tm.opcode_modifier.no_qsuf)
4626 suffixes |= 1 << 5;
4628 /* There are more than suffix matches. */
4629 if (i.tm.opcode_modifier.w
4630 || ((suffixes & (suffixes - 1))
4631 && !i.tm.opcode_modifier.defaultsize
4632 && !i.tm.opcode_modifier.ignoresize))
4634 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4635 return 0;
4640 /* Change the opcode based on the operand size given by i.suffix;
4641 We don't need to change things for byte insns. */
4643 if (i.suffix
4644 && i.suffix != BYTE_MNEM_SUFFIX
4645 && i.suffix != XMMWORD_MNEM_SUFFIX
4646 && i.suffix != YMMWORD_MNEM_SUFFIX)
4648 /* It's not a byte, select word/dword operation. */
4649 if (i.tm.opcode_modifier.w)
4651 if (i.tm.opcode_modifier.shortform)
4652 i.tm.base_opcode |= 8;
4653 else
4654 i.tm.base_opcode |= 1;
4657 /* Now select between word & dword operations via the operand
4658 size prefix, except for instructions that will ignore this
4659 prefix anyway. */
4660 if (i.tm.opcode_modifier.addrprefixop0)
4662 /* The address size override prefix changes the size of the
4663 first operand. */
4664 if ((flag_code == CODE_32BIT
4665 && i.op->regs[0].reg_type.bitfield.reg16)
4666 || (flag_code != CODE_32BIT
4667 && i.op->regs[0].reg_type.bitfield.reg32))
4668 if (!add_prefix (ADDR_PREFIX_OPCODE))
4669 return 0;
4671 else if (i.suffix != QWORD_MNEM_SUFFIX
4672 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4673 && !i.tm.opcode_modifier.ignoresize
4674 && !i.tm.opcode_modifier.floatmf
4675 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4676 || (flag_code == CODE_64BIT
4677 && i.tm.opcode_modifier.jumpbyte)))
4679 unsigned int prefix = DATA_PREFIX_OPCODE;
4681 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4682 prefix = ADDR_PREFIX_OPCODE;
4684 if (!add_prefix (prefix))
4685 return 0;
4688 /* Set mode64 for an operand. */
4689 if (i.suffix == QWORD_MNEM_SUFFIX
4690 && flag_code == CODE_64BIT
4691 && !i.tm.opcode_modifier.norex64)
4693 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4694 need rex64. cmpxchg8b is also a special case. */
4695 if (! (i.operands == 2
4696 && i.tm.base_opcode == 0x90
4697 && i.tm.extension_opcode == None
4698 && operand_type_equal (&i.types [0], &acc64)
4699 && operand_type_equal (&i.types [1], &acc64))
4700 && ! (i.operands == 1
4701 && i.tm.base_opcode == 0xfc7
4702 && i.tm.extension_opcode == 1
4703 && !operand_type_check (i.types [0], reg)
4704 && operand_type_check (i.types [0], anymem)))
4705 i.rex |= REX_W;
4708 /* Size floating point instruction. */
4709 if (i.suffix == LONG_MNEM_SUFFIX)
4710 if (i.tm.opcode_modifier.floatmf)
4711 i.tm.base_opcode ^= 4;
4714 return 1;
4717 static int
4718 check_byte_reg (void)
4720 int op;
4722 for (op = i.operands; --op >= 0;)
4724 /* If this is an eight bit register, it's OK. If it's the 16 or
4725 32 bit version of an eight bit register, we will just use the
4726 low portion, and that's OK too. */
4727 if (i.types[op].bitfield.reg8)
4728 continue;
4730 /* crc32 doesn't generate this warning. */
4731 if (i.tm.base_opcode == 0xf20f38f0)
4732 continue;
4734 if ((i.types[op].bitfield.reg16
4735 || i.types[op].bitfield.reg32
4736 || i.types[op].bitfield.reg64)
4737 && i.op[op].regs->reg_num < 4)
4739 /* Prohibit these changes in the 64bit mode, since the
4740 lowering is more complicated. */
4741 if (flag_code == CODE_64BIT
4742 && !i.tm.operand_types[op].bitfield.inoutportreg)
4744 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4745 register_prefix, i.op[op].regs->reg_name,
4746 i.suffix);
4747 return 0;
4749 #if REGISTER_WARNINGS
4750 if (!quiet_warnings
4751 && !i.tm.operand_types[op].bitfield.inoutportreg)
4752 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4753 register_prefix,
4754 (i.op[op].regs + (i.types[op].bitfield.reg16
4755 ? REGNAM_AL - REGNAM_AX
4756 : REGNAM_AL - REGNAM_EAX))->reg_name,
4757 register_prefix,
4758 i.op[op].regs->reg_name,
4759 i.suffix);
4760 #endif
4761 continue;
4763 /* Any other register is bad. */
4764 if (i.types[op].bitfield.reg16
4765 || i.types[op].bitfield.reg32
4766 || i.types[op].bitfield.reg64
4767 || i.types[op].bitfield.regmmx
4768 || i.types[op].bitfield.regxmm
4769 || i.types[op].bitfield.regymm
4770 || i.types[op].bitfield.sreg2
4771 || i.types[op].bitfield.sreg3
4772 || i.types[op].bitfield.control
4773 || i.types[op].bitfield.debug
4774 || i.types[op].bitfield.test
4775 || i.types[op].bitfield.floatreg
4776 || i.types[op].bitfield.floatacc)
4778 as_bad (_("`%s%s' not allowed with `%s%c'"),
4779 register_prefix,
4780 i.op[op].regs->reg_name,
4781 i.tm.name,
4782 i.suffix);
4783 return 0;
4786 return 1;
4789 static int
4790 check_long_reg (void)
4792 int op;
4794 for (op = i.operands; --op >= 0;)
4795 /* Reject eight bit registers, except where the template requires
4796 them. (eg. movzb) */
4797 if (i.types[op].bitfield.reg8
4798 && (i.tm.operand_types[op].bitfield.reg16
4799 || i.tm.operand_types[op].bitfield.reg32
4800 || i.tm.operand_types[op].bitfield.acc))
4802 as_bad (_("`%s%s' not allowed with `%s%c'"),
4803 register_prefix,
4804 i.op[op].regs->reg_name,
4805 i.tm.name,
4806 i.suffix);
4807 return 0;
4809 /* Warn if the e prefix on a general reg is missing. */
4810 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4811 && i.types[op].bitfield.reg16
4812 && (i.tm.operand_types[op].bitfield.reg32
4813 || i.tm.operand_types[op].bitfield.acc))
4815 /* Prohibit these changes in the 64bit mode, since the
4816 lowering is more complicated. */
4817 if (flag_code == CODE_64BIT)
4819 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4820 register_prefix, i.op[op].regs->reg_name,
4821 i.suffix);
4822 return 0;
4824 #if REGISTER_WARNINGS
4825 else
4826 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4827 register_prefix,
4828 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4829 register_prefix,
4830 i.op[op].regs->reg_name,
4831 i.suffix);
4832 #endif
4834 /* Warn if the r prefix on a general reg is missing. */
4835 else if (i.types[op].bitfield.reg64
4836 && (i.tm.operand_types[op].bitfield.reg32
4837 || i.tm.operand_types[op].bitfield.acc))
4839 if (intel_syntax
4840 && i.tm.opcode_modifier.toqword
4841 && !i.types[0].bitfield.regxmm)
4843 /* Convert to QWORD. We want REX byte. */
4844 i.suffix = QWORD_MNEM_SUFFIX;
4846 else
4848 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4849 register_prefix, i.op[op].regs->reg_name,
4850 i.suffix);
4851 return 0;
4854 return 1;
4857 static int
4858 check_qword_reg (void)
4860 int op;
4862 for (op = i.operands; --op >= 0; )
4863 /* Reject eight bit registers, except where the template requires
4864 them. (eg. movzb) */
4865 if (i.types[op].bitfield.reg8
4866 && (i.tm.operand_types[op].bitfield.reg16
4867 || i.tm.operand_types[op].bitfield.reg32
4868 || i.tm.operand_types[op].bitfield.acc))
4870 as_bad (_("`%s%s' not allowed with `%s%c'"),
4871 register_prefix,
4872 i.op[op].regs->reg_name,
4873 i.tm.name,
4874 i.suffix);
4875 return 0;
4877 /* Warn if the e prefix on a general reg is missing. */
4878 else if ((i.types[op].bitfield.reg16
4879 || i.types[op].bitfield.reg32)
4880 && (i.tm.operand_types[op].bitfield.reg32
4881 || i.tm.operand_types[op].bitfield.acc))
4883 /* Prohibit these changes in the 64bit mode, since the
4884 lowering is more complicated. */
4885 if (intel_syntax
4886 && i.tm.opcode_modifier.todword
4887 && !i.types[0].bitfield.regxmm)
4889 /* Convert to DWORD. We don't want REX byte. */
4890 i.suffix = LONG_MNEM_SUFFIX;
4892 else
4894 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4895 register_prefix, i.op[op].regs->reg_name,
4896 i.suffix);
4897 return 0;
4900 return 1;
4903 static int
4904 check_word_reg (void)
4906 int op;
4907 for (op = i.operands; --op >= 0;)
4908 /* Reject eight bit registers, except where the template requires
4909 them. (eg. movzb) */
4910 if (i.types[op].bitfield.reg8
4911 && (i.tm.operand_types[op].bitfield.reg16
4912 || i.tm.operand_types[op].bitfield.reg32
4913 || i.tm.operand_types[op].bitfield.acc))
4915 as_bad (_("`%s%s' not allowed with `%s%c'"),
4916 register_prefix,
4917 i.op[op].regs->reg_name,
4918 i.tm.name,
4919 i.suffix);
4920 return 0;
4922 /* Warn if the e prefix on a general reg is present. */
4923 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4924 && i.types[op].bitfield.reg32
4925 && (i.tm.operand_types[op].bitfield.reg16
4926 || i.tm.operand_types[op].bitfield.acc))
4928 /* Prohibit these changes in the 64bit mode, since the
4929 lowering is more complicated. */
4930 if (flag_code == CODE_64BIT)
4932 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4933 register_prefix, i.op[op].regs->reg_name,
4934 i.suffix);
4935 return 0;
4937 else
4938 #if REGISTER_WARNINGS
4939 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4940 register_prefix,
4941 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4942 register_prefix,
4943 i.op[op].regs->reg_name,
4944 i.suffix);
4945 #endif
4947 return 1;
4950 static int
4951 update_imm (unsigned int j)
4953 i386_operand_type overlap = i.types[j];
4954 if ((overlap.bitfield.imm8
4955 || overlap.bitfield.imm8s
4956 || overlap.bitfield.imm16
4957 || overlap.bitfield.imm32
4958 || overlap.bitfield.imm32s
4959 || overlap.bitfield.imm64)
4960 && !operand_type_equal (&overlap, &imm8)
4961 && !operand_type_equal (&overlap, &imm8s)
4962 && !operand_type_equal (&overlap, &imm16)
4963 && !operand_type_equal (&overlap, &imm32)
4964 && !operand_type_equal (&overlap, &imm32s)
4965 && !operand_type_equal (&overlap, &imm64))
4967 if (i.suffix)
4969 i386_operand_type temp;
4971 operand_type_set (&temp, 0);
4972 if (i.suffix == BYTE_MNEM_SUFFIX)
4974 temp.bitfield.imm8 = overlap.bitfield.imm8;
4975 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4977 else if (i.suffix == WORD_MNEM_SUFFIX)
4978 temp.bitfield.imm16 = overlap.bitfield.imm16;
4979 else if (i.suffix == QWORD_MNEM_SUFFIX)
4981 temp.bitfield.imm64 = overlap.bitfield.imm64;
4982 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4984 else
4985 temp.bitfield.imm32 = overlap.bitfield.imm32;
4986 overlap = temp;
4988 else if (operand_type_equal (&overlap, &imm16_32_32s)
4989 || operand_type_equal (&overlap, &imm16_32)
4990 || operand_type_equal (&overlap, &imm16_32s))
4992 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4993 overlap = imm16;
4994 else
4995 overlap = imm32s;
4997 if (!operand_type_equal (&overlap, &imm8)
4998 && !operand_type_equal (&overlap, &imm8s)
4999 && !operand_type_equal (&overlap, &imm16)
5000 && !operand_type_equal (&overlap, &imm32)
5001 && !operand_type_equal (&overlap, &imm32s)
5002 && !operand_type_equal (&overlap, &imm64))
5004 as_bad (_("no instruction mnemonic suffix given; "
5005 "can't determine immediate size"));
5006 return 0;
5009 i.types[j] = overlap;
5011 return 1;
5014 static int
5015 finalize_imm (void)
5017 unsigned int j, n;
5019 /* Update the first 2 immediate operands. */
5020 n = i.operands > 2 ? 2 : i.operands;
5021 if (n)
5023 for (j = 0; j < n; j++)
5024 if (update_imm (j) == 0)
5025 return 0;
5027 /* The 3rd operand can't be immediate operand. */
5028 gas_assert (operand_type_check (i.types[2], imm) == 0);
5031 return 1;
5034 static int
5035 bad_implicit_operand (int xmm)
5037 const char *ireg = xmm ? "xmm0" : "ymm0";
5039 if (intel_syntax)
5040 as_bad (_("the last operand of `%s' must be `%s%s'"),
5041 i.tm.name, register_prefix, ireg);
5042 else
5043 as_bad (_("the first operand of `%s' must be `%s%s'"),
5044 i.tm.name, register_prefix, ireg);
5045 return 0;
5048 static int
5049 process_operands (void)
5051 /* Default segment register this instruction will use for memory
5052 accesses. 0 means unknown. This is only for optimizing out
5053 unnecessary segment overrides. */
5054 const seg_entry *default_seg = 0;
5056 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5058 unsigned int dupl = i.operands;
5059 unsigned int dest = dupl - 1;
5060 unsigned int j;
5062 /* The destination must be an xmm register. */
5063 gas_assert (i.reg_operands
5064 && MAX_OPERANDS > dupl
5065 && operand_type_equal (&i.types[dest], &regxmm));
5067 if (i.tm.opcode_modifier.firstxmm0)
5069 /* The first operand is implicit and must be xmm0. */
5070 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5071 if (i.op[0].regs->reg_num != 0)
5072 return bad_implicit_operand (1);
5074 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5076 /* Keep xmm0 for instructions with VEX prefix and 3
5077 sources. */
5078 goto duplicate;
5080 else
5082 /* We remove the first xmm0 and keep the number of
5083 operands unchanged, which in fact duplicates the
5084 destination. */
5085 for (j = 1; j < i.operands; j++)
5087 i.op[j - 1] = i.op[j];
5088 i.types[j - 1] = i.types[j];
5089 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5093 else if (i.tm.opcode_modifier.implicit1stxmm0)
5095 gas_assert ((MAX_OPERANDS - 1) > dupl
5096 && (i.tm.opcode_modifier.vexsources
5097 == VEX3SOURCES));
5099 /* Add the implicit xmm0 for instructions with VEX prefix
5100 and 3 sources. */
5101 for (j = i.operands; j > 0; j--)
5103 i.op[j] = i.op[j - 1];
5104 i.types[j] = i.types[j - 1];
5105 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5107 i.op[0].regs
5108 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5109 i.types[0] = regxmm;
5110 i.tm.operand_types[0] = regxmm;
5112 i.operands += 2;
5113 i.reg_operands += 2;
5114 i.tm.operands += 2;
5116 dupl++;
5117 dest++;
5118 i.op[dupl] = i.op[dest];
5119 i.types[dupl] = i.types[dest];
5120 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5122 else
5124 duplicate:
5125 i.operands++;
5126 i.reg_operands++;
5127 i.tm.operands++;
5129 i.op[dupl] = i.op[dest];
5130 i.types[dupl] = i.types[dest];
5131 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5134 if (i.tm.opcode_modifier.immext)
5135 process_immext ();
5137 else if (i.tm.opcode_modifier.firstxmm0)
5139 unsigned int j;
5141 /* The first operand is implicit and must be xmm0/ymm0. */
5142 gas_assert (i.reg_operands
5143 && (operand_type_equal (&i.types[0], &regxmm)
5144 || operand_type_equal (&i.types[0], &regymm)));
5145 if (i.op[0].regs->reg_num != 0)
5146 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5148 for (j = 1; j < i.operands; j++)
5150 i.op[j - 1] = i.op[j];
5151 i.types[j - 1] = i.types[j];
5153 /* We need to adjust fields in i.tm since they are used by
5154 build_modrm_byte. */
5155 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5158 i.operands--;
5159 i.reg_operands--;
5160 i.tm.operands--;
5162 else if (i.tm.opcode_modifier.regkludge)
5164 /* The imul $imm, %reg instruction is converted into
5165 imul $imm, %reg, %reg, and the clr %reg instruction
5166 is converted into xor %reg, %reg. */
5168 unsigned int first_reg_op;
5170 if (operand_type_check (i.types[0], reg))
5171 first_reg_op = 0;
5172 else
5173 first_reg_op = 1;
5174 /* Pretend we saw the extra register operand. */
5175 gas_assert (i.reg_operands == 1
5176 && i.op[first_reg_op + 1].regs == 0);
5177 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5178 i.types[first_reg_op + 1] = i.types[first_reg_op];
5179 i.operands++;
5180 i.reg_operands++;
5183 if (i.tm.opcode_modifier.shortform)
5185 if (i.types[0].bitfield.sreg2
5186 || i.types[0].bitfield.sreg3)
5188 if (i.tm.base_opcode == POP_SEG_SHORT
5189 && i.op[0].regs->reg_num == 1)
5191 as_bad (_("you can't `pop %scs'"), register_prefix);
5192 return 0;
5194 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5195 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5196 i.rex |= REX_B;
5198 else
5200 /* The register or float register operand is in operand
5201 0 or 1. */
5202 unsigned int op;
5204 if (i.types[0].bitfield.floatreg
5205 || operand_type_check (i.types[0], reg))
5206 op = 0;
5207 else
5208 op = 1;
5209 /* Register goes in low 3 bits of opcode. */
5210 i.tm.base_opcode |= i.op[op].regs->reg_num;
5211 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5212 i.rex |= REX_B;
5213 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5215 /* Warn about some common errors, but press on regardless.
5216 The first case can be generated by gcc (<= 2.8.1). */
5217 if (i.operands == 2)
5219 /* Reversed arguments on faddp, fsubp, etc. */
5220 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5221 register_prefix, i.op[!intel_syntax].regs->reg_name,
5222 register_prefix, i.op[intel_syntax].regs->reg_name);
5224 else
5226 /* Extraneous `l' suffix on fp insn. */
5227 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5228 register_prefix, i.op[0].regs->reg_name);
5233 else if (i.tm.opcode_modifier.modrm)
5235 /* The opcode is completed (modulo i.tm.extension_opcode which
5236 must be put into the modrm byte). Now, we make the modrm and
5237 index base bytes based on all the info we've collected. */
5239 default_seg = build_modrm_byte ();
5241 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5243 default_seg = &ds;
5245 else if (i.tm.opcode_modifier.isstring)
5247 /* For the string instructions that allow a segment override
5248 on one of their operands, the default segment is ds. */
5249 default_seg = &ds;
5252 if (i.tm.base_opcode == 0x8d /* lea */
5253 && i.seg[0]
5254 && !quiet_warnings)
5255 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5257 /* If a segment was explicitly specified, and the specified segment
5258 is not the default, use an opcode prefix to select it. If we
5259 never figured out what the default segment is, then default_seg
5260 will be zero at this point, and the specified segment prefix will
5261 always be used. */
5262 if ((i.seg[0]) && (i.seg[0] != default_seg))
5264 if (!add_prefix (i.seg[0]->seg_prefix))
5265 return 0;
5267 return 1;
5270 static const seg_entry *
5271 build_modrm_byte (void)
5273 const seg_entry *default_seg = 0;
5274 unsigned int source, dest;
5275 int vex_3_sources;
5277 /* The first operand of instructions with VEX prefix and 3 sources
5278 must be VEX_Imm4. */
5279 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5280 if (vex_3_sources)
5282 unsigned int nds, reg_slot;
5283 expressionS *exp;
5285 if (i.tm.opcode_modifier.veximmext
5286 && i.tm.opcode_modifier.immext)
5288 dest = i.operands - 2;
5289 gas_assert (dest == 3);
5291 else
5292 dest = i.operands - 1;
5293 nds = dest - 1;
5295 /* There are 2 kinds of instructions:
5296 1. 5 operands: 4 register operands or 3 register operands
5297 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5298 VexW0 or VexW1. The destination must be either XMM or YMM
5299 register.
5300 2. 4 operands: 4 register operands or 3 register operands
5301 plus 1 memory operand, VexXDS, and VexImmExt */
5302 gas_assert ((i.reg_operands == 4
5303 || (i.reg_operands == 3 && i.mem_operands == 1))
5304 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5305 && (i.tm.opcode_modifier.veximmext
5306 || (i.imm_operands == 1
5307 && i.types[0].bitfield.vec_imm4
5308 && (i.tm.opcode_modifier.vexw == VEXW0
5309 || i.tm.opcode_modifier.vexw == VEXW1)
5310 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5311 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5313 if (i.imm_operands == 0)
5315 /* When there is no immediate operand, generate an 8bit
5316 immediate operand to encode the first operand. */
5317 exp = &im_expressions[i.imm_operands++];
5318 i.op[i.operands].imms = exp;
5319 i.types[i.operands] = imm8;
5320 i.operands++;
5321 /* If VexW1 is set, the first operand is the source and
5322 the second operand is encoded in the immediate operand. */
5323 if (i.tm.opcode_modifier.vexw == VEXW1)
5325 source = 0;
5326 reg_slot = 1;
5328 else
5330 source = 1;
5331 reg_slot = 0;
5334 /* FMA swaps REG and NDS. */
5335 if (i.tm.cpu_flags.bitfield.cpufma)
5337 unsigned int tmp;
5338 tmp = reg_slot;
5339 reg_slot = nds;
5340 nds = tmp;
5343 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5344 &regxmm)
5345 || operand_type_equal (&i.tm.operand_types[reg_slot],
5346 &regymm));
5347 exp->X_op = O_constant;
5348 exp->X_add_number
5349 = ((i.op[reg_slot].regs->reg_num
5350 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5351 << 4);
5353 else
5355 unsigned int imm_slot;
5357 if (i.tm.opcode_modifier.vexw == VEXW0)
5359 /* If VexW0 is set, the third operand is the source and
5360 the second operand is encoded in the immediate
5361 operand. */
5362 source = 2;
5363 reg_slot = 1;
5365 else
5367 /* VexW1 is set, the second operand is the source and
5368 the third operand is encoded in the immediate
5369 operand. */
5370 source = 1;
5371 reg_slot = 2;
5374 if (i.tm.opcode_modifier.immext)
5376 /* When ImmExt is set, the immdiate byte is the last
5377 operand. */
5378 imm_slot = i.operands - 1;
5379 source--;
5380 reg_slot--;
5382 else
5384 imm_slot = 0;
5386 /* Turn on Imm8 so that output_imm will generate it. */
5387 i.types[imm_slot].bitfield.imm8 = 1;
5390 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5391 &regxmm)
5392 || operand_type_equal (&i.tm.operand_types[reg_slot],
5393 &regymm));
5394 i.op[imm_slot].imms->X_add_number
5395 |= ((i.op[reg_slot].regs->reg_num
5396 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5397 << 4);
5400 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5401 || operand_type_equal (&i.tm.operand_types[nds],
5402 &regymm));
5403 i.vex.register_specifier = i.op[nds].regs;
5405 else
5406 source = dest = 0;
5408 /* i.reg_operands MUST be the number of real register operands;
5409 implicit registers do not count. If there are 3 register
5410 operands, it must be a instruction with VexNDS. For a
5411 instruction with VexNDD, the destination register is encoded
5412 in VEX prefix. If there are 4 register operands, it must be
5413 a instruction with VEX prefix and 3 sources. */
5414 if (i.mem_operands == 0
5415 && ((i.reg_operands == 2
5416 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5417 || (i.reg_operands == 3
5418 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5419 || (i.reg_operands == 4 && vex_3_sources)))
5421 switch (i.operands)
5423 case 2:
5424 source = 0;
5425 break;
5426 case 3:
5427 /* When there are 3 operands, one of them may be immediate,
5428 which may be the first or the last operand. Otherwise,
5429 the first operand must be shift count register (cl) or it
5430 is an instruction with VexNDS. */
5431 gas_assert (i.imm_operands == 1
5432 || (i.imm_operands == 0
5433 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5434 || i.types[0].bitfield.shiftcount)));
5435 if (operand_type_check (i.types[0], imm)
5436 || i.types[0].bitfield.shiftcount)
5437 source = 1;
5438 else
5439 source = 0;
5440 break;
5441 case 4:
5442 /* When there are 4 operands, the first two must be 8bit
5443 immediate operands. The source operand will be the 3rd
5444 one.
5446 For instructions with VexNDS, if the first operand
5447 an imm8, the source operand is the 2nd one. If the last
5448 operand is imm8, the source operand is the first one. */
5449 gas_assert ((i.imm_operands == 2
5450 && i.types[0].bitfield.imm8
5451 && i.types[1].bitfield.imm8)
5452 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5453 && i.imm_operands == 1
5454 && (i.types[0].bitfield.imm8
5455 || i.types[i.operands - 1].bitfield.imm8)));
5456 if (i.imm_operands == 2)
5457 source = 2;
5458 else
5460 if (i.types[0].bitfield.imm8)
5461 source = 1;
5462 else
5463 source = 0;
5465 break;
5466 case 5:
5467 break;
5468 default:
5469 abort ();
5472 if (!vex_3_sources)
5474 dest = source + 1;
5476 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5478 /* For instructions with VexNDS, the register-only
5479 source operand must be 32/64bit integer, XMM or
5480 YMM register. It is encoded in VEX prefix. We
5481 need to clear RegMem bit before calling
5482 operand_type_equal. */
5484 i386_operand_type op;
5485 unsigned int vvvv;
5487 /* Check register-only source operand when two source
5488 operands are swapped. */
5489 if (!i.tm.operand_types[source].bitfield.baseindex
5490 && i.tm.operand_types[dest].bitfield.baseindex)
5492 vvvv = source;
5493 source = dest;
5495 else
5496 vvvv = dest;
5498 op = i.tm.operand_types[vvvv];
5499 op.bitfield.regmem = 0;
5500 if ((dest + 1) >= i.operands
5501 || (op.bitfield.reg32 != 1
5502 && !op.bitfield.reg64 != 1
5503 && !operand_type_equal (&op, &regxmm)
5504 && !operand_type_equal (&op, &regymm)))
5505 abort ();
5506 i.vex.register_specifier = i.op[vvvv].regs;
5507 dest++;
5511 i.rm.mode = 3;
5512 /* One of the register operands will be encoded in the i.tm.reg
5513 field, the other in the combined i.tm.mode and i.tm.regmem
5514 fields. If no form of this instruction supports a memory
5515 destination operand, then we assume the source operand may
5516 sometimes be a memory operand and so we need to store the
5517 destination in the i.rm.reg field. */
5518 if (!i.tm.operand_types[dest].bitfield.regmem
5519 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5521 i.rm.reg = i.op[dest].regs->reg_num;
5522 i.rm.regmem = i.op[source].regs->reg_num;
5523 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5524 i.rex |= REX_R;
5525 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5526 i.rex |= REX_B;
5528 else
5530 i.rm.reg = i.op[source].regs->reg_num;
5531 i.rm.regmem = i.op[dest].regs->reg_num;
5532 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5533 i.rex |= REX_B;
5534 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5535 i.rex |= REX_R;
5537 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5539 if (!i.types[0].bitfield.control
5540 && !i.types[1].bitfield.control)
5541 abort ();
5542 i.rex &= ~(REX_R | REX_B);
5543 add_prefix (LOCK_PREFIX_OPCODE);
5546 else
5547 { /* If it's not 2 reg operands... */
5548 unsigned int mem;
5550 if (i.mem_operands)
5552 unsigned int fake_zero_displacement = 0;
5553 unsigned int op;
5555 for (op = 0; op < i.operands; op++)
5556 if (operand_type_check (i.types[op], anymem))
5557 break;
5558 gas_assert (op < i.operands);
5560 if (i.tm.opcode_modifier.vecsib)
5562 if (i.index_reg->reg_num == RegEiz
5563 || i.index_reg->reg_num == RegRiz)
5564 abort ();
5566 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5567 if (!i.base_reg)
5569 i.sib.base = NO_BASE_REGISTER;
5570 i.sib.scale = i.log2_scale_factor;
5571 i.types[op].bitfield.disp8 = 0;
5572 i.types[op].bitfield.disp16 = 0;
5573 i.types[op].bitfield.disp64 = 0;
5574 if (flag_code != CODE_64BIT)
5576 /* Must be 32 bit */
5577 i.types[op].bitfield.disp32 = 1;
5578 i.types[op].bitfield.disp32s = 0;
5580 else
5582 i.types[op].bitfield.disp32 = 0;
5583 i.types[op].bitfield.disp32s = 1;
5586 i.sib.index = i.index_reg->reg_num;
5587 if ((i.index_reg->reg_flags & RegRex) != 0)
5588 i.rex |= REX_X;
5591 default_seg = &ds;
5593 if (i.base_reg == 0)
5595 i.rm.mode = 0;
5596 if (!i.disp_operands)
5598 fake_zero_displacement = 1;
5599 /* Instructions with VSIB byte need 32bit displacement
5600 if there is no base register. */
5601 if (i.tm.opcode_modifier.vecsib)
5602 i.types[op].bitfield.disp32 = 1;
5604 if (i.index_reg == 0)
5606 gas_assert (!i.tm.opcode_modifier.vecsib);
5607 /* Operand is just <disp> */
5608 if (flag_code == CODE_64BIT)
5610 /* 64bit mode overwrites the 32bit absolute
5611 addressing by RIP relative addressing and
5612 absolute addressing is encoded by one of the
5613 redundant SIB forms. */
5614 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5615 i.sib.base = NO_BASE_REGISTER;
5616 i.sib.index = NO_INDEX_REGISTER;
5617 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5618 ? disp32s : disp32);
5620 else if ((flag_code == CODE_16BIT)
5621 ^ (i.prefix[ADDR_PREFIX] != 0))
5623 i.rm.regmem = NO_BASE_REGISTER_16;
5624 i.types[op] = disp16;
5626 else
5628 i.rm.regmem = NO_BASE_REGISTER;
5629 i.types[op] = disp32;
5632 else if (!i.tm.opcode_modifier.vecsib)
5634 /* !i.base_reg && i.index_reg */
5635 if (i.index_reg->reg_num == RegEiz
5636 || i.index_reg->reg_num == RegRiz)
5637 i.sib.index = NO_INDEX_REGISTER;
5638 else
5639 i.sib.index = i.index_reg->reg_num;
5640 i.sib.base = NO_BASE_REGISTER;
5641 i.sib.scale = i.log2_scale_factor;
5642 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5643 i.types[op].bitfield.disp8 = 0;
5644 i.types[op].bitfield.disp16 = 0;
5645 i.types[op].bitfield.disp64 = 0;
5646 if (flag_code != CODE_64BIT)
5648 /* Must be 32 bit */
5649 i.types[op].bitfield.disp32 = 1;
5650 i.types[op].bitfield.disp32s = 0;
5652 else
5654 i.types[op].bitfield.disp32 = 0;
5655 i.types[op].bitfield.disp32s = 1;
5657 if ((i.index_reg->reg_flags & RegRex) != 0)
5658 i.rex |= REX_X;
5661 /* RIP addressing for 64bit mode. */
5662 else if (i.base_reg->reg_num == RegRip ||
5663 i.base_reg->reg_num == RegEip)
5665 gas_assert (!i.tm.opcode_modifier.vecsib);
5666 i.rm.regmem = NO_BASE_REGISTER;
5667 i.types[op].bitfield.disp8 = 0;
5668 i.types[op].bitfield.disp16 = 0;
5669 i.types[op].bitfield.disp32 = 0;
5670 i.types[op].bitfield.disp32s = 1;
5671 i.types[op].bitfield.disp64 = 0;
5672 i.flags[op] |= Operand_PCrel;
5673 if (! i.disp_operands)
5674 fake_zero_displacement = 1;
5676 else if (i.base_reg->reg_type.bitfield.reg16)
5678 gas_assert (!i.tm.opcode_modifier.vecsib);
5679 switch (i.base_reg->reg_num)
5681 case 3: /* (%bx) */
5682 if (i.index_reg == 0)
5683 i.rm.regmem = 7;
5684 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5685 i.rm.regmem = i.index_reg->reg_num - 6;
5686 break;
5687 case 5: /* (%bp) */
5688 default_seg = &ss;
5689 if (i.index_reg == 0)
5691 i.rm.regmem = 6;
5692 if (operand_type_check (i.types[op], disp) == 0)
5694 /* fake (%bp) into 0(%bp) */
5695 i.types[op].bitfield.disp8 = 1;
5696 fake_zero_displacement = 1;
5699 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5700 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5701 break;
5702 default: /* (%si) -> 4 or (%di) -> 5 */
5703 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5705 i.rm.mode = mode_from_disp_size (i.types[op]);
5707 else /* i.base_reg and 32/64 bit mode */
5709 if (flag_code == CODE_64BIT
5710 && operand_type_check (i.types[op], disp))
5712 i386_operand_type temp;
5713 operand_type_set (&temp, 0);
5714 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5715 i.types[op] = temp;
5716 if (i.prefix[ADDR_PREFIX] == 0)
5717 i.types[op].bitfield.disp32s = 1;
5718 else
5719 i.types[op].bitfield.disp32 = 1;
5722 if (!i.tm.opcode_modifier.vecsib)
5723 i.rm.regmem = i.base_reg->reg_num;
5724 if ((i.base_reg->reg_flags & RegRex) != 0)
5725 i.rex |= REX_B;
5726 i.sib.base = i.base_reg->reg_num;
5727 /* x86-64 ignores REX prefix bit here to avoid decoder
5728 complications. */
5729 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5731 default_seg = &ss;
5732 if (i.disp_operands == 0)
5734 fake_zero_displacement = 1;
5735 i.types[op].bitfield.disp8 = 1;
5738 else if (i.base_reg->reg_num == ESP_REG_NUM)
5740 default_seg = &ss;
5742 i.sib.scale = i.log2_scale_factor;
5743 if (i.index_reg == 0)
5745 gas_assert (!i.tm.opcode_modifier.vecsib);
5746 /* <disp>(%esp) becomes two byte modrm with no index
5747 register. We've already stored the code for esp
5748 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5749 Any base register besides %esp will not use the
5750 extra modrm byte. */
5751 i.sib.index = NO_INDEX_REGISTER;
5753 else if (!i.tm.opcode_modifier.vecsib)
5755 if (i.index_reg->reg_num == RegEiz
5756 || i.index_reg->reg_num == RegRiz)
5757 i.sib.index = NO_INDEX_REGISTER;
5758 else
5759 i.sib.index = i.index_reg->reg_num;
5760 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5761 if ((i.index_reg->reg_flags & RegRex) != 0)
5762 i.rex |= REX_X;
5765 if (i.disp_operands
5766 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5767 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5768 i.rm.mode = 0;
5769 else
5771 if (!fake_zero_displacement
5772 && !i.disp_operands
5773 && i.disp_encoding)
5775 fake_zero_displacement = 1;
5776 if (i.disp_encoding == disp_encoding_8bit)
5777 i.types[op].bitfield.disp8 = 1;
5778 else
5779 i.types[op].bitfield.disp32 = 1;
5781 i.rm.mode = mode_from_disp_size (i.types[op]);
5785 if (fake_zero_displacement)
5787 /* Fakes a zero displacement assuming that i.types[op]
5788 holds the correct displacement size. */
5789 expressionS *exp;
5791 gas_assert (i.op[op].disps == 0);
5792 exp = &disp_expressions[i.disp_operands++];
5793 i.op[op].disps = exp;
5794 exp->X_op = O_constant;
5795 exp->X_add_number = 0;
5796 exp->X_add_symbol = (symbolS *) 0;
5797 exp->X_op_symbol = (symbolS *) 0;
5800 mem = op;
5802 else
5803 mem = ~0;
5805 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5807 if (operand_type_check (i.types[0], imm))
5808 i.vex.register_specifier = NULL;
5809 else
5811 /* VEX.vvvv encodes one of the sources when the first
5812 operand is not an immediate. */
5813 if (i.tm.opcode_modifier.vexw == VEXW0)
5814 i.vex.register_specifier = i.op[0].regs;
5815 else
5816 i.vex.register_specifier = i.op[1].regs;
5819 /* Destination is a XMM register encoded in the ModRM.reg
5820 and VEX.R bit. */
5821 i.rm.reg = i.op[2].regs->reg_num;
5822 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5823 i.rex |= REX_R;
5825 /* ModRM.rm and VEX.B encodes the other source. */
5826 if (!i.mem_operands)
5828 i.rm.mode = 3;
5830 if (i.tm.opcode_modifier.vexw == VEXW0)
5831 i.rm.regmem = i.op[1].regs->reg_num;
5832 else
5833 i.rm.regmem = i.op[0].regs->reg_num;
5835 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5836 i.rex |= REX_B;
5839 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5841 i.vex.register_specifier = i.op[2].regs;
5842 if (!i.mem_operands)
5844 i.rm.mode = 3;
5845 i.rm.regmem = i.op[1].regs->reg_num;
5846 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5847 i.rex |= REX_B;
5850 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5851 (if any) based on i.tm.extension_opcode. Again, we must be
5852 careful to make sure that segment/control/debug/test/MMX
5853 registers are coded into the i.rm.reg field. */
5854 else if (i.reg_operands)
5856 unsigned int op;
5857 unsigned int vex_reg = ~0;
5859 for (op = 0; op < i.operands; op++)
5860 if (i.types[op].bitfield.reg8
5861 || i.types[op].bitfield.reg16
5862 || i.types[op].bitfield.reg32
5863 || i.types[op].bitfield.reg64
5864 || i.types[op].bitfield.regmmx
5865 || i.types[op].bitfield.regxmm
5866 || i.types[op].bitfield.regymm
5867 || i.types[op].bitfield.sreg2
5868 || i.types[op].bitfield.sreg3
5869 || i.types[op].bitfield.control
5870 || i.types[op].bitfield.debug
5871 || i.types[op].bitfield.test)
5872 break;
5874 if (vex_3_sources)
5875 op = dest;
5876 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5878 /* For instructions with VexNDS, the register-only
5879 source operand is encoded in VEX prefix. */
5880 gas_assert (mem != (unsigned int) ~0);
5882 if (op > mem)
5884 vex_reg = op++;
5885 gas_assert (op < i.operands);
5887 else
5889 /* Check register-only source operand when two source
5890 operands are swapped. */
5891 if (!i.tm.operand_types[op].bitfield.baseindex
5892 && i.tm.operand_types[op + 1].bitfield.baseindex)
5894 vex_reg = op;
5895 op += 2;
5896 gas_assert (mem == (vex_reg + 1)
5897 && op < i.operands);
5899 else
5901 vex_reg = op + 1;
5902 gas_assert (vex_reg < i.operands);
5906 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5908 /* For instructions with VexNDD, the register destination
5909 is encoded in VEX prefix. */
5910 if (i.mem_operands == 0)
5912 /* There is no memory operand. */
5913 gas_assert ((op + 2) == i.operands);
5914 vex_reg = op + 1;
5916 else
5918 /* There are only 2 operands. */
5919 gas_assert (op < 2 && i.operands == 2);
5920 vex_reg = 1;
5923 else
5924 gas_assert (op < i.operands);
5926 if (vex_reg != (unsigned int) ~0)
5928 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5930 if (type->bitfield.reg32 != 1
5931 && type->bitfield.reg64 != 1
5932 && !operand_type_equal (type, &regxmm)
5933 && !operand_type_equal (type, &regymm))
5934 abort ();
5936 i.vex.register_specifier = i.op[vex_reg].regs;
5939 /* Don't set OP operand twice. */
5940 if (vex_reg != op)
5942 /* If there is an extension opcode to put here, the
5943 register number must be put into the regmem field. */
5944 if (i.tm.extension_opcode != None)
5946 i.rm.regmem = i.op[op].regs->reg_num;
5947 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5948 i.rex |= REX_B;
5950 else
5952 i.rm.reg = i.op[op].regs->reg_num;
5953 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5954 i.rex |= REX_R;
5958 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5959 must set it to 3 to indicate this is a register operand
5960 in the regmem field. */
5961 if (!i.mem_operands)
5962 i.rm.mode = 3;
5965 /* Fill in i.rm.reg field with extension opcode (if any). */
5966 if (i.tm.extension_opcode != None)
5967 i.rm.reg = i.tm.extension_opcode;
5969 return default_seg;
5972 static void
5973 output_branch (void)
5975 char *p;
5976 int size;
5977 int code16;
5978 int prefix;
5979 relax_substateT subtype;
5980 symbolS *sym;
5981 offsetT off;
5983 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5984 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5986 prefix = 0;
5987 if (i.prefix[DATA_PREFIX] != 0)
5989 prefix = 1;
5990 i.prefixes -= 1;
5991 code16 ^= CODE16;
5993 /* Pentium4 branch hints. */
5994 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5995 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5997 prefix++;
5998 i.prefixes--;
6000 if (i.prefix[REX_PREFIX] != 0)
6002 prefix++;
6003 i.prefixes--;
6006 if (i.prefixes != 0 && !intel_syntax)
6007 as_warn (_("skipping prefixes on this instruction"));
6009 /* It's always a symbol; End frag & setup for relax.
6010 Make sure there is enough room in this frag for the largest
6011 instruction we may generate in md_convert_frag. This is 2
6012 bytes for the opcode and room for the prefix and largest
6013 displacement. */
6014 frag_grow (prefix + 2 + 4);
6015 /* Prefix and 1 opcode byte go in fr_fix. */
6016 p = frag_more (prefix + 1);
6017 if (i.prefix[DATA_PREFIX] != 0)
6018 *p++ = DATA_PREFIX_OPCODE;
6019 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6020 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6021 *p++ = i.prefix[SEG_PREFIX];
6022 if (i.prefix[REX_PREFIX] != 0)
6023 *p++ = i.prefix[REX_PREFIX];
6024 *p = i.tm.base_opcode;
6026 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6027 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6028 else if (cpu_arch_flags.bitfield.cpui386)
6029 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6030 else
6031 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6032 subtype |= code16;
6034 sym = i.op[0].disps->X_add_symbol;
6035 off = i.op[0].disps->X_add_number;
6037 if (i.op[0].disps->X_op != O_constant
6038 && i.op[0].disps->X_op != O_symbol)
6040 /* Handle complex expressions. */
6041 sym = make_expr_symbol (i.op[0].disps);
6042 off = 0;
6045 /* 1 possible extra opcode + 4 byte displacement go in var part.
6046 Pass reloc in fr_var. */
6047 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6050 static void
6051 output_jump (void)
6053 char *p;
6054 int size;
6055 fixS *fixP;
6057 if (i.tm.opcode_modifier.jumpbyte)
6059 /* This is a loop or jecxz type instruction. */
6060 size = 1;
6061 if (i.prefix[ADDR_PREFIX] != 0)
6063 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6064 i.prefixes -= 1;
6066 /* Pentium4 branch hints. */
6067 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6068 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6070 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6071 i.prefixes--;
6074 else
6076 int code16;
6078 code16 = 0;
6079 if (flag_code == CODE_16BIT)
6080 code16 = CODE16;
6082 if (i.prefix[DATA_PREFIX] != 0)
6084 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6085 i.prefixes -= 1;
6086 code16 ^= CODE16;
6089 size = 4;
6090 if (code16)
6091 size = 2;
6094 if (i.prefix[REX_PREFIX] != 0)
6096 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6097 i.prefixes -= 1;
6100 if (i.prefixes != 0 && !intel_syntax)
6101 as_warn (_("skipping prefixes on this instruction"));
6103 p = frag_more (i.tm.opcode_length + size);
6104 switch (i.tm.opcode_length)
6106 case 2:
6107 *p++ = i.tm.base_opcode >> 8;
6108 case 1:
6109 *p++ = i.tm.base_opcode;
6110 break;
6111 default:
6112 abort ();
6115 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6116 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6118 /* All jumps handled here are signed, but don't use a signed limit
6119 check for 32 and 16 bit jumps as we want to allow wrap around at
6120 4G and 64k respectively. */
6121 if (size == 1)
6122 fixP->fx_signed = 1;
6125 static void
6126 output_interseg_jump (void)
6128 char *p;
6129 int size;
6130 int prefix;
6131 int code16;
6133 code16 = 0;
6134 if (flag_code == CODE_16BIT)
6135 code16 = CODE16;
6137 prefix = 0;
6138 if (i.prefix[DATA_PREFIX] != 0)
6140 prefix = 1;
6141 i.prefixes -= 1;
6142 code16 ^= CODE16;
6144 if (i.prefix[REX_PREFIX] != 0)
6146 prefix++;
6147 i.prefixes -= 1;
6150 size = 4;
6151 if (code16)
6152 size = 2;
6154 if (i.prefixes != 0 && !intel_syntax)
6155 as_warn (_("skipping prefixes on this instruction"));
6157 /* 1 opcode; 2 segment; offset */
6158 p = frag_more (prefix + 1 + 2 + size);
6160 if (i.prefix[DATA_PREFIX] != 0)
6161 *p++ = DATA_PREFIX_OPCODE;
6163 if (i.prefix[REX_PREFIX] != 0)
6164 *p++ = i.prefix[REX_PREFIX];
6166 *p++ = i.tm.base_opcode;
6167 if (i.op[1].imms->X_op == O_constant)
6169 offsetT n = i.op[1].imms->X_add_number;
6171 if (size == 2
6172 && !fits_in_unsigned_word (n)
6173 && !fits_in_signed_word (n))
6175 as_bad (_("16-bit jump out of range"));
6176 return;
6178 md_number_to_chars (p, n, size);
6180 else
6181 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6182 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6183 if (i.op[0].imms->X_op != O_constant)
6184 as_bad (_("can't handle non absolute segment in `%s'"),
6185 i.tm.name);
6186 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6189 static void
6190 output_insn (void)
6192 fragS *insn_start_frag;
6193 offsetT insn_start_off;
6195 /* Tie dwarf2 debug info to the address at the start of the insn.
6196 We can't do this after the insn has been output as the current
6197 frag may have been closed off. eg. by frag_var. */
6198 dwarf2_emit_insn (0);
6200 insn_start_frag = frag_now;
6201 insn_start_off = frag_now_fix ();
6203 /* Output jumps. */
6204 if (i.tm.opcode_modifier.jump)
6205 output_branch ();
6206 else if (i.tm.opcode_modifier.jumpbyte
6207 || i.tm.opcode_modifier.jumpdword)
6208 output_jump ();
6209 else if (i.tm.opcode_modifier.jumpintersegment)
6210 output_interseg_jump ();
6211 else
6213 /* Output normal instructions here. */
6214 char *p;
6215 unsigned char *q;
6216 unsigned int j;
6217 unsigned int prefix;
6219 /* Since the VEX prefix contains the implicit prefix, we don't
6220 need the explicit prefix. */
6221 if (!i.tm.opcode_modifier.vex)
6223 switch (i.tm.opcode_length)
6225 case 3:
6226 if (i.tm.base_opcode & 0xff000000)
6228 prefix = (i.tm.base_opcode >> 24) & 0xff;
6229 goto check_prefix;
6231 break;
6232 case 2:
6233 if ((i.tm.base_opcode & 0xff0000) != 0)
6235 prefix = (i.tm.base_opcode >> 16) & 0xff;
6236 if (i.tm.cpu_flags.bitfield.cpupadlock)
6238 check_prefix:
6239 if (prefix != REPE_PREFIX_OPCODE
6240 || (i.prefix[REP_PREFIX]
6241 != REPE_PREFIX_OPCODE))
6242 add_prefix (prefix);
6244 else
6245 add_prefix (prefix);
6247 break;
6248 case 1:
6249 break;
6250 default:
6251 abort ();
6254 /* The prefix bytes. */
6255 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6256 if (*q)
6257 FRAG_APPEND_1_CHAR (*q);
6260 if (i.tm.opcode_modifier.vex)
6262 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6263 if (*q)
6264 switch (j)
6266 case REX_PREFIX:
6267 /* REX byte is encoded in VEX prefix. */
6268 break;
6269 case SEG_PREFIX:
6270 case ADDR_PREFIX:
6271 FRAG_APPEND_1_CHAR (*q);
6272 break;
6273 default:
6274 /* There should be no other prefixes for instructions
6275 with VEX prefix. */
6276 abort ();
6279 /* Now the VEX prefix. */
6280 p = frag_more (i.vex.length);
6281 for (j = 0; j < i.vex.length; j++)
6282 p[j] = i.vex.bytes[j];
6285 /* Now the opcode; be careful about word order here! */
6286 if (i.tm.opcode_length == 1)
6288 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6290 else
6292 switch (i.tm.opcode_length)
6294 case 3:
6295 p = frag_more (3);
6296 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6297 break;
6298 case 2:
6299 p = frag_more (2);
6300 break;
6301 default:
6302 abort ();
6303 break;
6306 /* Put out high byte first: can't use md_number_to_chars! */
6307 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6308 *p = i.tm.base_opcode & 0xff;
6311 /* Now the modrm byte and sib byte (if present). */
6312 if (i.tm.opcode_modifier.modrm)
6314 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6315 | i.rm.reg << 3
6316 | i.rm.mode << 6));
6317 /* If i.rm.regmem == ESP (4)
6318 && i.rm.mode != (Register mode)
6319 && not 16 bit
6320 ==> need second modrm byte. */
6321 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6322 && i.rm.mode != 3
6323 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6324 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6325 | i.sib.index << 3
6326 | i.sib.scale << 6));
6329 if (i.disp_operands)
6330 output_disp (insn_start_frag, insn_start_off);
6332 if (i.imm_operands)
6333 output_imm (insn_start_frag, insn_start_off);
6336 #ifdef DEBUG386
6337 if (flag_debug)
6339 pi ("" /*line*/, &i);
6341 #endif /* DEBUG386 */
6344 /* Return the size of the displacement operand N. */
6346 static int
6347 disp_size (unsigned int n)
6349 int size = 4;
6350 if (i.types[n].bitfield.disp64)
6351 size = 8;
6352 else if (i.types[n].bitfield.disp8)
6353 size = 1;
6354 else if (i.types[n].bitfield.disp16)
6355 size = 2;
6356 return size;
6359 /* Return the size of the immediate operand N. */
6361 static int
6362 imm_size (unsigned int n)
6364 int size = 4;
6365 if (i.types[n].bitfield.imm64)
6366 size = 8;
6367 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6368 size = 1;
6369 else if (i.types[n].bitfield.imm16)
6370 size = 2;
6371 return size;
6374 static void
6375 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6377 char *p;
6378 unsigned int n;
6380 for (n = 0; n < i.operands; n++)
6382 if (operand_type_check (i.types[n], disp))
6384 if (i.op[n].disps->X_op == O_constant)
6386 int size = disp_size (n);
6387 offsetT val;
6389 val = offset_in_range (i.op[n].disps->X_add_number,
6390 size);
6391 p = frag_more (size);
6392 md_number_to_chars (p, val, size);
6394 else
6396 enum bfd_reloc_code_real reloc_type;
6397 int size = disp_size (n);
6398 int sign = i.types[n].bitfield.disp32s;
6399 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6401 /* We can't have 8 bit displacement here. */
6402 gas_assert (!i.types[n].bitfield.disp8);
6404 /* The PC relative address is computed relative
6405 to the instruction boundary, so in case immediate
6406 fields follows, we need to adjust the value. */
6407 if (pcrel && i.imm_operands)
6409 unsigned int n1;
6410 int sz = 0;
6412 for (n1 = 0; n1 < i.operands; n1++)
6413 if (operand_type_check (i.types[n1], imm))
6415 /* Only one immediate is allowed for PC
6416 relative address. */
6417 gas_assert (sz == 0);
6418 sz = imm_size (n1);
6419 i.op[n].disps->X_add_number -= sz;
6421 /* We should find the immediate. */
6422 gas_assert (sz != 0);
6425 p = frag_more (size);
6426 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6427 if (GOT_symbol
6428 && GOT_symbol == i.op[n].disps->X_add_symbol
6429 && (((reloc_type == BFD_RELOC_32
6430 || reloc_type == BFD_RELOC_X86_64_32S
6431 || (reloc_type == BFD_RELOC_64
6432 && object_64bit))
6433 && (i.op[n].disps->X_op == O_symbol
6434 || (i.op[n].disps->X_op == O_add
6435 && ((symbol_get_value_expression
6436 (i.op[n].disps->X_op_symbol)->X_op)
6437 == O_subtract))))
6438 || reloc_type == BFD_RELOC_32_PCREL))
6440 offsetT add;
6442 if (insn_start_frag == frag_now)
6443 add = (p - frag_now->fr_literal) - insn_start_off;
6444 else
6446 fragS *fr;
6448 add = insn_start_frag->fr_fix - insn_start_off;
6449 for (fr = insn_start_frag->fr_next;
6450 fr && fr != frag_now; fr = fr->fr_next)
6451 add += fr->fr_fix;
6452 add += p - frag_now->fr_literal;
6455 if (!object_64bit)
6457 reloc_type = BFD_RELOC_386_GOTPC;
6458 i.op[n].imms->X_add_number += add;
6460 else if (reloc_type == BFD_RELOC_64)
6461 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6462 else
6463 /* Don't do the adjustment for x86-64, as there
6464 the pcrel addressing is relative to the _next_
6465 insn, and that is taken care of in other code. */
6466 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6468 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6469 i.op[n].disps, pcrel, reloc_type);
6475 static void
6476 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6478 char *p;
6479 unsigned int n;
6481 for (n = 0; n < i.operands; n++)
6483 if (operand_type_check (i.types[n], imm))
6485 if (i.op[n].imms->X_op == O_constant)
6487 int size = imm_size (n);
6488 offsetT val;
6490 val = offset_in_range (i.op[n].imms->X_add_number,
6491 size);
6492 p = frag_more (size);
6493 md_number_to_chars (p, val, size);
6495 else
6497 /* Not absolute_section.
6498 Need a 32-bit fixup (don't support 8bit
6499 non-absolute imms). Try to support other
6500 sizes ... */
6501 enum bfd_reloc_code_real reloc_type;
6502 int size = imm_size (n);
6503 int sign;
6505 if (i.types[n].bitfield.imm32s
6506 && (i.suffix == QWORD_MNEM_SUFFIX
6507 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6508 sign = 1;
6509 else
6510 sign = 0;
6512 p = frag_more (size);
6513 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6515 /* This is tough to explain. We end up with this one if we
6516 * have operands that look like
6517 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6518 * obtain the absolute address of the GOT, and it is strongly
6519 * preferable from a performance point of view to avoid using
6520 * a runtime relocation for this. The actual sequence of
6521 * instructions often look something like:
6523 * call .L66
6524 * .L66:
6525 * popl %ebx
6526 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6528 * The call and pop essentially return the absolute address
6529 * of the label .L66 and store it in %ebx. The linker itself
6530 * will ultimately change the first operand of the addl so
6531 * that %ebx points to the GOT, but to keep things simple, the
6532 * .o file must have this operand set so that it generates not
6533 * the absolute address of .L66, but the absolute address of
6534 * itself. This allows the linker itself simply treat a GOTPC
6535 * relocation as asking for a pcrel offset to the GOT to be
6536 * added in, and the addend of the relocation is stored in the
6537 * operand field for the instruction itself.
6539 * Our job here is to fix the operand so that it would add
6540 * the correct offset so that %ebx would point to itself. The
6541 * thing that is tricky is that .-.L66 will point to the
6542 * beginning of the instruction, so we need to further modify
6543 * the operand so that it will point to itself. There are
6544 * other cases where you have something like:
6546 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6548 * and here no correction would be required. Internally in
6549 * the assembler we treat operands of this form as not being
6550 * pcrel since the '.' is explicitly mentioned, and I wonder
6551 * whether it would simplify matters to do it this way. Who
6552 * knows. In earlier versions of the PIC patches, the
6553 * pcrel_adjust field was used to store the correction, but
6554 * since the expression is not pcrel, I felt it would be
6555 * confusing to do it this way. */
6557 if ((reloc_type == BFD_RELOC_32
6558 || reloc_type == BFD_RELOC_X86_64_32S
6559 || reloc_type == BFD_RELOC_64)
6560 && GOT_symbol
6561 && GOT_symbol == i.op[n].imms->X_add_symbol
6562 && (i.op[n].imms->X_op == O_symbol
6563 || (i.op[n].imms->X_op == O_add
6564 && ((symbol_get_value_expression
6565 (i.op[n].imms->X_op_symbol)->X_op)
6566 == O_subtract))))
6568 offsetT add;
6570 if (insn_start_frag == frag_now)
6571 add = (p - frag_now->fr_literal) - insn_start_off;
6572 else
6574 fragS *fr;
6576 add = insn_start_frag->fr_fix - insn_start_off;
6577 for (fr = insn_start_frag->fr_next;
6578 fr && fr != frag_now; fr = fr->fr_next)
6579 add += fr->fr_fix;
6580 add += p - frag_now->fr_literal;
6583 if (!object_64bit)
6584 reloc_type = BFD_RELOC_386_GOTPC;
6585 else if (size == 4)
6586 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6587 else if (size == 8)
6588 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6589 i.op[n].imms->X_add_number += add;
6591 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6592 i.op[n].imms, 0, reloc_type);
6598 /* x86_cons_fix_new is called via the expression parsing code when a
6599 reloc is needed. We use this hook to get the correct .got reloc. */
6600 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6601 static int cons_sign = -1;
6603 void
6604 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6605 expressionS *exp)
6607 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6609 got_reloc = NO_RELOC;
6611 #ifdef TE_PE
6612 if (exp->X_op == O_secrel)
6614 exp->X_op = O_symbol;
6615 r = BFD_RELOC_32_SECREL;
6617 #endif
6619 fix_new_exp (frag, off, len, exp, 0, r);
6622 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6623 || defined (LEX_AT)
6624 # define lex_got(reloc, adjust, types) NULL
6625 #else
6626 /* Parse operands of the form
6627 <symbol>@GOTOFF+<nnn>
6628 and similar .plt or .got references.
6630 If we find one, set up the correct relocation in RELOC and copy the
6631 input string, minus the `@GOTOFF' into a malloc'd buffer for
6632 parsing by the calling routine. Return this buffer, and if ADJUST
6633 is non-null set it to the length of the string we removed from the
6634 input line. Otherwise return NULL. */
6635 static char *
6636 lex_got (enum bfd_reloc_code_real *rel,
6637 int *adjust,
6638 i386_operand_type *types)
6640 /* Some of the relocations depend on the size of what field is to
6641 be relocated. But in our callers i386_immediate and i386_displacement
6642 we don't yet know the operand size (this will be set by insn
6643 matching). Hence we record the word32 relocation here,
6644 and adjust the reloc according to the real size in reloc(). */
6645 static const struct {
6646 const char *str;
6647 int len;
6648 const enum bfd_reloc_code_real rel[2];
6649 const i386_operand_type types64;
6650 } gotrel[] = {
6651 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6652 BFD_RELOC_X86_64_PLTOFF64 },
6653 OPERAND_TYPE_IMM64 },
6654 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6655 BFD_RELOC_X86_64_PLT32 },
6656 OPERAND_TYPE_IMM32_32S_DISP32 },
6657 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6658 BFD_RELOC_X86_64_GOTPLT64 },
6659 OPERAND_TYPE_IMM64_DISP64 },
6660 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6661 BFD_RELOC_X86_64_GOTOFF64 },
6662 OPERAND_TYPE_IMM64_DISP64 },
6663 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6664 BFD_RELOC_X86_64_GOTPCREL },
6665 OPERAND_TYPE_IMM32_32S_DISP32 },
6666 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6667 BFD_RELOC_X86_64_TLSGD },
6668 OPERAND_TYPE_IMM32_32S_DISP32 },
6669 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6670 _dummy_first_bfd_reloc_code_real },
6671 OPERAND_TYPE_NONE },
6672 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6673 BFD_RELOC_X86_64_TLSLD },
6674 OPERAND_TYPE_IMM32_32S_DISP32 },
6675 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6676 BFD_RELOC_X86_64_GOTTPOFF },
6677 OPERAND_TYPE_IMM32_32S_DISP32 },
6678 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6679 BFD_RELOC_X86_64_TPOFF32 },
6680 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6681 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6682 _dummy_first_bfd_reloc_code_real },
6683 OPERAND_TYPE_NONE },
6684 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6685 BFD_RELOC_X86_64_DTPOFF32 },
6686 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6687 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6688 _dummy_first_bfd_reloc_code_real },
6689 OPERAND_TYPE_NONE },
6690 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6691 _dummy_first_bfd_reloc_code_real },
6692 OPERAND_TYPE_NONE },
6693 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6694 BFD_RELOC_X86_64_GOT32 },
6695 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6696 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6697 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6698 OPERAND_TYPE_IMM32_32S_DISP32 },
6699 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6700 BFD_RELOC_X86_64_TLSDESC_CALL },
6701 OPERAND_TYPE_IMM32_32S_DISP32 },
6703 char *cp;
6704 unsigned int j;
6706 #if defined (OBJ_MAYBE_ELF)
6707 if (!IS_ELF)
6708 return NULL;
6709 #endif
6711 for (cp = input_line_pointer; *cp != '@'; cp++)
6712 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6713 return NULL;
6715 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6717 int len = gotrel[j].len;
6718 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6720 if (gotrel[j].rel[object_64bit] != 0)
6722 int first, second;
6723 char *tmpbuf, *past_reloc;
6725 *rel = gotrel[j].rel[object_64bit];
6726 if (adjust)
6727 *adjust = len;
6729 if (types)
6731 if (flag_code != CODE_64BIT)
6733 types->bitfield.imm32 = 1;
6734 types->bitfield.disp32 = 1;
6736 else
6737 *types = gotrel[j].types64;
6740 if (GOT_symbol == NULL)
6741 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6743 /* The length of the first part of our input line. */
6744 first = cp - input_line_pointer;
6746 /* The second part goes from after the reloc token until
6747 (and including) an end_of_line char or comma. */
6748 past_reloc = cp + 1 + len;
6749 cp = past_reloc;
6750 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6751 ++cp;
6752 second = cp + 1 - past_reloc;
6754 /* Allocate and copy string. The trailing NUL shouldn't
6755 be necessary, but be safe. */
6756 tmpbuf = (char *) xmalloc (first + second + 2);
6757 memcpy (tmpbuf, input_line_pointer, first);
6758 if (second != 0 && *past_reloc != ' ')
6759 /* Replace the relocation token with ' ', so that
6760 errors like foo@GOTOFF1 will be detected. */
6761 tmpbuf[first++] = ' ';
6762 memcpy (tmpbuf + first, past_reloc, second);
6763 tmpbuf[first + second] = '\0';
6764 return tmpbuf;
6767 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6768 gotrel[j].str, 1 << (5 + object_64bit));
6769 return NULL;
6773 /* Might be a symbol version string. Don't as_bad here. */
6774 return NULL;
6776 #endif
6778 void
6779 x86_cons (expressionS *exp, int size)
6781 intel_syntax = -intel_syntax;
6783 exp->X_md = 0;
6784 if (size == 4 || (object_64bit && size == 8))
6786 /* Handle @GOTOFF and the like in an expression. */
6787 char *save;
6788 char *gotfree_input_line;
6789 int adjust = 0;
6791 save = input_line_pointer;
6792 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6793 if (gotfree_input_line)
6794 input_line_pointer = gotfree_input_line;
6796 expression (exp);
6798 if (gotfree_input_line)
6800 /* expression () has merrily parsed up to the end of line,
6801 or a comma - in the wrong buffer. Transfer how far
6802 input_line_pointer has moved to the right buffer. */
6803 input_line_pointer = (save
6804 + (input_line_pointer - gotfree_input_line)
6805 + adjust);
6806 free (gotfree_input_line);
6807 if (exp->X_op == O_constant
6808 || exp->X_op == O_absent
6809 || exp->X_op == O_illegal
6810 || exp->X_op == O_register
6811 || exp->X_op == O_big)
6813 char c = *input_line_pointer;
6814 *input_line_pointer = 0;
6815 as_bad (_("missing or invalid expression `%s'"), save);
6816 *input_line_pointer = c;
6820 else
6821 expression (exp);
6823 intel_syntax = -intel_syntax;
6825 if (intel_syntax)
6826 i386_intel_simplify (exp);
6829 static void
6830 signed_cons (int size)
6832 if (flag_code == CODE_64BIT)
6833 cons_sign = 1;
6834 cons (size);
6835 cons_sign = -1;
6838 #ifdef TE_PE
6839 static void
6840 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6842 expressionS exp;
6846 expression (&exp);
6847 if (exp.X_op == O_symbol)
6848 exp.X_op = O_secrel;
6850 emit_expr (&exp, 4);
6852 while (*input_line_pointer++ == ',');
6854 input_line_pointer--;
6855 demand_empty_rest_of_line ();
6857 #endif
6859 static int
6860 i386_immediate (char *imm_start)
6862 char *save_input_line_pointer;
6863 char *gotfree_input_line;
6864 segT exp_seg = 0;
6865 expressionS *exp;
6866 i386_operand_type types;
6868 operand_type_set (&types, ~0);
6870 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6872 as_bad (_("at most %d immediate operands are allowed"),
6873 MAX_IMMEDIATE_OPERANDS);
6874 return 0;
6877 exp = &im_expressions[i.imm_operands++];
6878 i.op[this_operand].imms = exp;
6880 if (is_space_char (*imm_start))
6881 ++imm_start;
6883 save_input_line_pointer = input_line_pointer;
6884 input_line_pointer = imm_start;
6886 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6887 if (gotfree_input_line)
6888 input_line_pointer = gotfree_input_line;
6890 exp_seg = expression (exp);
6892 SKIP_WHITESPACE ();
6893 if (*input_line_pointer)
6894 as_bad (_("junk `%s' after expression"), input_line_pointer);
6896 input_line_pointer = save_input_line_pointer;
6897 if (gotfree_input_line)
6899 free (gotfree_input_line);
6901 if (exp->X_op == O_constant || exp->X_op == O_register)
6902 exp->X_op = O_illegal;
6905 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6908 static int
6909 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6910 i386_operand_type types, const char *imm_start)
6912 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6914 if (imm_start)
6915 as_bad (_("missing or invalid immediate expression `%s'"),
6916 imm_start);
6917 return 0;
6919 else if (exp->X_op == O_constant)
6921 /* Size it properly later. */
6922 i.types[this_operand].bitfield.imm64 = 1;
6923 /* If not 64bit, sign extend val. */
6924 if (flag_code != CODE_64BIT
6925 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6926 exp->X_add_number
6927 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6929 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6930 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6931 && exp_seg != absolute_section
6932 && exp_seg != text_section
6933 && exp_seg != data_section
6934 && exp_seg != bss_section
6935 && exp_seg != undefined_section
6936 && !bfd_is_com_section (exp_seg))
6938 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6939 return 0;
6941 #endif
6942 else if (!intel_syntax && exp->X_op == O_register)
6944 if (imm_start)
6945 as_bad (_("illegal immediate register operand %s"), imm_start);
6946 return 0;
6948 else
6950 /* This is an address. The size of the address will be
6951 determined later, depending on destination register,
6952 suffix, or the default for the section. */
6953 i.types[this_operand].bitfield.imm8 = 1;
6954 i.types[this_operand].bitfield.imm16 = 1;
6955 i.types[this_operand].bitfield.imm32 = 1;
6956 i.types[this_operand].bitfield.imm32s = 1;
6957 i.types[this_operand].bitfield.imm64 = 1;
6958 i.types[this_operand] = operand_type_and (i.types[this_operand],
6959 types);
6962 return 1;
6965 static char *
6966 i386_scale (char *scale)
6968 offsetT val;
6969 char *save = input_line_pointer;
6971 input_line_pointer = scale;
6972 val = get_absolute_expression ();
6974 switch (val)
6976 case 1:
6977 i.log2_scale_factor = 0;
6978 break;
6979 case 2:
6980 i.log2_scale_factor = 1;
6981 break;
6982 case 4:
6983 i.log2_scale_factor = 2;
6984 break;
6985 case 8:
6986 i.log2_scale_factor = 3;
6987 break;
6988 default:
6990 char sep = *input_line_pointer;
6992 *input_line_pointer = '\0';
6993 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6994 scale);
6995 *input_line_pointer = sep;
6996 input_line_pointer = save;
6997 return NULL;
7000 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7002 as_warn (_("scale factor of %d without an index register"),
7003 1 << i.log2_scale_factor);
7004 i.log2_scale_factor = 0;
7006 scale = input_line_pointer;
7007 input_line_pointer = save;
7008 return scale;
7011 static int
7012 i386_displacement (char *disp_start, char *disp_end)
7014 expressionS *exp;
7015 segT exp_seg = 0;
7016 char *save_input_line_pointer;
7017 char *gotfree_input_line;
7018 int override;
7019 i386_operand_type bigdisp, types = anydisp;
7020 int ret;
7022 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7024 as_bad (_("at most %d displacement operands are allowed"),
7025 MAX_MEMORY_OPERANDS);
7026 return 0;
7029 operand_type_set (&bigdisp, 0);
7030 if ((i.types[this_operand].bitfield.jumpabsolute)
7031 || (!current_templates->start->opcode_modifier.jump
7032 && !current_templates->start->opcode_modifier.jumpdword))
7034 bigdisp.bitfield.disp32 = 1;
7035 override = (i.prefix[ADDR_PREFIX] != 0);
7036 if (flag_code == CODE_64BIT)
7038 if (!override)
7040 bigdisp.bitfield.disp32s = 1;
7041 bigdisp.bitfield.disp64 = 1;
7044 else if ((flag_code == CODE_16BIT) ^ override)
7046 bigdisp.bitfield.disp32 = 0;
7047 bigdisp.bitfield.disp16 = 1;
7050 else
7052 /* For PC-relative branches, the width of the displacement
7053 is dependent upon data size, not address size. */
7054 override = (i.prefix[DATA_PREFIX] != 0);
7055 if (flag_code == CODE_64BIT)
7057 if (override || i.suffix == WORD_MNEM_SUFFIX)
7058 bigdisp.bitfield.disp16 = 1;
7059 else
7061 bigdisp.bitfield.disp32 = 1;
7062 bigdisp.bitfield.disp32s = 1;
7065 else
7067 if (!override)
7068 override = (i.suffix == (flag_code != CODE_16BIT
7069 ? WORD_MNEM_SUFFIX
7070 : LONG_MNEM_SUFFIX));
7071 bigdisp.bitfield.disp32 = 1;
7072 if ((flag_code == CODE_16BIT) ^ override)
7074 bigdisp.bitfield.disp32 = 0;
7075 bigdisp.bitfield.disp16 = 1;
7079 i.types[this_operand] = operand_type_or (i.types[this_operand],
7080 bigdisp);
7082 exp = &disp_expressions[i.disp_operands];
7083 i.op[this_operand].disps = exp;
7084 i.disp_operands++;
7085 save_input_line_pointer = input_line_pointer;
7086 input_line_pointer = disp_start;
7087 END_STRING_AND_SAVE (disp_end);
7089 #ifndef GCC_ASM_O_HACK
7090 #define GCC_ASM_O_HACK 0
7091 #endif
7092 #if GCC_ASM_O_HACK
7093 END_STRING_AND_SAVE (disp_end + 1);
7094 if (i.types[this_operand].bitfield.baseIndex
7095 && displacement_string_end[-1] == '+')
7097 /* This hack is to avoid a warning when using the "o"
7098 constraint within gcc asm statements.
7099 For instance:
7101 #define _set_tssldt_desc(n,addr,limit,type) \
7102 __asm__ __volatile__ ( \
7103 "movw %w2,%0\n\t" \
7104 "movw %w1,2+%0\n\t" \
7105 "rorl $16,%1\n\t" \
7106 "movb %b1,4+%0\n\t" \
7107 "movb %4,5+%0\n\t" \
7108 "movb $0,6+%0\n\t" \
7109 "movb %h1,7+%0\n\t" \
7110 "rorl $16,%1" \
7111 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7113 This works great except that the output assembler ends
7114 up looking a bit weird if it turns out that there is
7115 no offset. You end up producing code that looks like:
7117 #APP
7118 movw $235,(%eax)
7119 movw %dx,2+(%eax)
7120 rorl $16,%edx
7121 movb %dl,4+(%eax)
7122 movb $137,5+(%eax)
7123 movb $0,6+(%eax)
7124 movb %dh,7+(%eax)
7125 rorl $16,%edx
7126 #NO_APP
7128 So here we provide the missing zero. */
7130 *displacement_string_end = '0';
7132 #endif
7133 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7134 if (gotfree_input_line)
7135 input_line_pointer = gotfree_input_line;
7137 exp_seg = expression (exp);
7139 SKIP_WHITESPACE ();
7140 if (*input_line_pointer)
7141 as_bad (_("junk `%s' after expression"), input_line_pointer);
7142 #if GCC_ASM_O_HACK
7143 RESTORE_END_STRING (disp_end + 1);
7144 #endif
7145 input_line_pointer = save_input_line_pointer;
7146 if (gotfree_input_line)
7148 free (gotfree_input_line);
7150 if (exp->X_op == O_constant || exp->X_op == O_register)
7151 exp->X_op = O_illegal;
7154 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7156 RESTORE_END_STRING (disp_end);
7158 return ret;
7161 static int
7162 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7163 i386_operand_type types, const char *disp_start)
7165 i386_operand_type bigdisp;
7166 int ret = 1;
7168 /* We do this to make sure that the section symbol is in
7169 the symbol table. We will ultimately change the relocation
7170 to be relative to the beginning of the section. */
7171 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7172 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7173 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7175 if (exp->X_op != O_symbol)
7176 goto inv_disp;
7178 if (S_IS_LOCAL (exp->X_add_symbol)
7179 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7180 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7181 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7182 exp->X_op = O_subtract;
7183 exp->X_op_symbol = GOT_symbol;
7184 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7185 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7186 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7187 i.reloc[this_operand] = BFD_RELOC_64;
7188 else
7189 i.reloc[this_operand] = BFD_RELOC_32;
7192 else if (exp->X_op == O_absent
7193 || exp->X_op == O_illegal
7194 || exp->X_op == O_big)
7196 inv_disp:
7197 as_bad (_("missing or invalid displacement expression `%s'"),
7198 disp_start);
7199 ret = 0;
7202 else if (flag_code == CODE_64BIT
7203 && !i.prefix[ADDR_PREFIX]
7204 && exp->X_op == O_constant)
7206 /* Since displacement is signed extended to 64bit, don't allow
7207 disp32 and turn off disp32s if they are out of range. */
7208 i.types[this_operand].bitfield.disp32 = 0;
7209 if (!fits_in_signed_long (exp->X_add_number))
7211 i.types[this_operand].bitfield.disp32s = 0;
7212 if (i.types[this_operand].bitfield.baseindex)
7214 as_bad (_("0x%lx out range of signed 32bit displacement"),
7215 (long) exp->X_add_number);
7216 ret = 0;
7221 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7222 else if (exp->X_op != O_constant
7223 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7224 && exp_seg != absolute_section
7225 && exp_seg != text_section
7226 && exp_seg != data_section
7227 && exp_seg != bss_section
7228 && exp_seg != undefined_section
7229 && !bfd_is_com_section (exp_seg))
7231 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7232 ret = 0;
7234 #endif
7236 /* Check if this is a displacement only operand. */
7237 bigdisp = i.types[this_operand];
7238 bigdisp.bitfield.disp8 = 0;
7239 bigdisp.bitfield.disp16 = 0;
7240 bigdisp.bitfield.disp32 = 0;
7241 bigdisp.bitfield.disp32s = 0;
7242 bigdisp.bitfield.disp64 = 0;
7243 if (operand_type_all_zero (&bigdisp))
7244 i.types[this_operand] = operand_type_and (i.types[this_operand],
7245 types);
7247 return ret;
7250 /* Make sure the memory operand we've been dealt is valid.
7251 Return 1 on success, 0 on a failure. */
7253 static int
7254 i386_index_check (const char *operand_string)
7256 int ok;
7257 const char *kind = "base/index";
7258 #if INFER_ADDR_PREFIX
7259 int fudged = 0;
7261 tryprefix:
7262 #endif
7263 ok = 1;
7264 if (current_templates->start->opcode_modifier.isstring
7265 && !current_templates->start->opcode_modifier.immext
7266 && (current_templates->end[-1].opcode_modifier.isstring
7267 || i.mem_operands))
7269 /* Memory operands of string insns are special in that they only allow
7270 a single register (rDI, rSI, or rBX) as their memory address. */
7271 unsigned int expected;
7273 kind = "string address";
7275 if (current_templates->start->opcode_modifier.w)
7277 i386_operand_type type = current_templates->end[-1].operand_types[0];
7279 if (!type.bitfield.baseindex
7280 || ((!i.mem_operands != !intel_syntax)
7281 && current_templates->end[-1].operand_types[1]
7282 .bitfield.baseindex))
7283 type = current_templates->end[-1].operand_types[1];
7284 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7286 else
7287 expected = 3 /* rBX */;
7289 if (!i.base_reg || i.index_reg
7290 || operand_type_check (i.types[this_operand], disp))
7291 ok = -1;
7292 else if (!(flag_code == CODE_64BIT
7293 ? i.prefix[ADDR_PREFIX]
7294 ? i.base_reg->reg_type.bitfield.reg32
7295 : i.base_reg->reg_type.bitfield.reg64
7296 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7297 ? i.base_reg->reg_type.bitfield.reg32
7298 : i.base_reg->reg_type.bitfield.reg16))
7299 ok = 0;
7300 else if (i.base_reg->reg_num != expected)
7301 ok = -1;
7303 if (ok < 0)
7305 unsigned int j;
7307 for (j = 0; j < i386_regtab_size; ++j)
7308 if ((flag_code == CODE_64BIT
7309 ? i.prefix[ADDR_PREFIX]
7310 ? i386_regtab[j].reg_type.bitfield.reg32
7311 : i386_regtab[j].reg_type.bitfield.reg64
7312 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7313 ? i386_regtab[j].reg_type.bitfield.reg32
7314 : i386_regtab[j].reg_type.bitfield.reg16)
7315 && i386_regtab[j].reg_num == expected)
7316 break;
7317 gas_assert (j < i386_regtab_size);
7318 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7319 operand_string,
7320 intel_syntax ? '[' : '(',
7321 register_prefix,
7322 i386_regtab[j].reg_name,
7323 intel_syntax ? ']' : ')');
7324 ok = 1;
7327 else if (flag_code == CODE_64BIT)
7329 if ((i.base_reg
7330 && ((i.prefix[ADDR_PREFIX] == 0
7331 && !i.base_reg->reg_type.bitfield.reg64)
7332 || (i.prefix[ADDR_PREFIX]
7333 && !i.base_reg->reg_type.bitfield.reg32))
7334 && (i.index_reg
7335 || i.base_reg->reg_num !=
7336 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7337 || (i.index_reg
7338 && !(i.index_reg->reg_type.bitfield.regxmm
7339 || i.index_reg->reg_type.bitfield.regymm)
7340 && (!i.index_reg->reg_type.bitfield.baseindex
7341 || (i.prefix[ADDR_PREFIX] == 0
7342 && i.index_reg->reg_num != RegRiz
7343 && !i.index_reg->reg_type.bitfield.reg64
7345 || (i.prefix[ADDR_PREFIX]
7346 && i.index_reg->reg_num != RegEiz
7347 && !i.index_reg->reg_type.bitfield.reg32))))
7348 ok = 0;
7350 else
7352 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7354 /* 16bit checks. */
7355 if ((i.base_reg
7356 && (!i.base_reg->reg_type.bitfield.reg16
7357 || !i.base_reg->reg_type.bitfield.baseindex))
7358 || (i.index_reg
7359 && (!i.index_reg->reg_type.bitfield.reg16
7360 || !i.index_reg->reg_type.bitfield.baseindex
7361 || !(i.base_reg
7362 && i.base_reg->reg_num < 6
7363 && i.index_reg->reg_num >= 6
7364 && i.log2_scale_factor == 0))))
7365 ok = 0;
7367 else
7369 /* 32bit checks. */
7370 if ((i.base_reg
7371 && !i.base_reg->reg_type.bitfield.reg32)
7372 || (i.index_reg
7373 && !i.index_reg->reg_type.bitfield.regxmm
7374 && !i.index_reg->reg_type.bitfield.regymm
7375 && ((!i.index_reg->reg_type.bitfield.reg32
7376 && i.index_reg->reg_num != RegEiz)
7377 || !i.index_reg->reg_type.bitfield.baseindex)))
7378 ok = 0;
7381 if (!ok)
7383 #if INFER_ADDR_PREFIX
7384 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7386 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7387 i.prefixes += 1;
7388 /* Change the size of any displacement too. At most one of
7389 Disp16 or Disp32 is set.
7390 FIXME. There doesn't seem to be any real need for separate
7391 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7392 Removing them would probably clean up the code quite a lot. */
7393 if (flag_code != CODE_64BIT
7394 && (i.types[this_operand].bitfield.disp16
7395 || i.types[this_operand].bitfield.disp32))
7396 i.types[this_operand]
7397 = operand_type_xor (i.types[this_operand], disp16_32);
7398 fudged = 1;
7399 goto tryprefix;
7401 if (fudged)
7402 as_bad (_("`%s' is not a valid %s expression"),
7403 operand_string,
7404 kind);
7405 else
7406 #endif
7407 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7408 operand_string,
7409 flag_code_names[i.prefix[ADDR_PREFIX]
7410 ? flag_code == CODE_32BIT
7411 ? CODE_16BIT
7412 : CODE_32BIT
7413 : flag_code],
7414 kind);
7416 return ok;
7419 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7420 on error. */
7422 static int
7423 i386_att_operand (char *operand_string)
7425 const reg_entry *r;
7426 char *end_op;
7427 char *op_string = operand_string;
7429 if (is_space_char (*op_string))
7430 ++op_string;
7432 /* We check for an absolute prefix (differentiating,
7433 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7434 if (*op_string == ABSOLUTE_PREFIX)
7436 ++op_string;
7437 if (is_space_char (*op_string))
7438 ++op_string;
7439 i.types[this_operand].bitfield.jumpabsolute = 1;
7442 /* Check if operand is a register. */
7443 if ((r = parse_register (op_string, &end_op)) != NULL)
7445 i386_operand_type temp;
7447 /* Check for a segment override by searching for ':' after a
7448 segment register. */
7449 op_string = end_op;
7450 if (is_space_char (*op_string))
7451 ++op_string;
7452 if (*op_string == ':'
7453 && (r->reg_type.bitfield.sreg2
7454 || r->reg_type.bitfield.sreg3))
7456 switch (r->reg_num)
7458 case 0:
7459 i.seg[i.mem_operands] = &es;
7460 break;
7461 case 1:
7462 i.seg[i.mem_operands] = &cs;
7463 break;
7464 case 2:
7465 i.seg[i.mem_operands] = &ss;
7466 break;
7467 case 3:
7468 i.seg[i.mem_operands] = &ds;
7469 break;
7470 case 4:
7471 i.seg[i.mem_operands] = &fs;
7472 break;
7473 case 5:
7474 i.seg[i.mem_operands] = &gs;
7475 break;
7478 /* Skip the ':' and whitespace. */
7479 ++op_string;
7480 if (is_space_char (*op_string))
7481 ++op_string;
7483 if (!is_digit_char (*op_string)
7484 && !is_identifier_char (*op_string)
7485 && *op_string != '('
7486 && *op_string != ABSOLUTE_PREFIX)
7488 as_bad (_("bad memory operand `%s'"), op_string);
7489 return 0;
7491 /* Handle case of %es:*foo. */
7492 if (*op_string == ABSOLUTE_PREFIX)
7494 ++op_string;
7495 if (is_space_char (*op_string))
7496 ++op_string;
7497 i.types[this_operand].bitfield.jumpabsolute = 1;
7499 goto do_memory_reference;
7501 if (*op_string)
7503 as_bad (_("junk `%s' after register"), op_string);
7504 return 0;
7506 temp = r->reg_type;
7507 temp.bitfield.baseindex = 0;
7508 i.types[this_operand] = operand_type_or (i.types[this_operand],
7509 temp);
7510 i.types[this_operand].bitfield.unspecified = 0;
7511 i.op[this_operand].regs = r;
7512 i.reg_operands++;
7514 else if (*op_string == REGISTER_PREFIX)
7516 as_bad (_("bad register name `%s'"), op_string);
7517 return 0;
7519 else if (*op_string == IMMEDIATE_PREFIX)
7521 ++op_string;
7522 if (i.types[this_operand].bitfield.jumpabsolute)
7524 as_bad (_("immediate operand illegal with absolute jump"));
7525 return 0;
7527 if (!i386_immediate (op_string))
7528 return 0;
7530 else if (is_digit_char (*op_string)
7531 || is_identifier_char (*op_string)
7532 || *op_string == '(')
7534 /* This is a memory reference of some sort. */
7535 char *base_string;
7537 /* Start and end of displacement string expression (if found). */
7538 char *displacement_string_start;
7539 char *displacement_string_end;
7541 do_memory_reference:
7542 if ((i.mem_operands == 1
7543 && !current_templates->start->opcode_modifier.isstring)
7544 || i.mem_operands == 2)
7546 as_bad (_("too many memory references for `%s'"),
7547 current_templates->start->name);
7548 return 0;
7551 /* Check for base index form. We detect the base index form by
7552 looking for an ')' at the end of the operand, searching
7553 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7554 after the '('. */
7555 base_string = op_string + strlen (op_string);
7557 --base_string;
7558 if (is_space_char (*base_string))
7559 --base_string;
7561 /* If we only have a displacement, set-up for it to be parsed later. */
7562 displacement_string_start = op_string;
7563 displacement_string_end = base_string + 1;
7565 if (*base_string == ')')
7567 char *temp_string;
7568 unsigned int parens_balanced = 1;
7569 /* We've already checked that the number of left & right ()'s are
7570 equal, so this loop will not be infinite. */
7573 base_string--;
7574 if (*base_string == ')')
7575 parens_balanced++;
7576 if (*base_string == '(')
7577 parens_balanced--;
7579 while (parens_balanced);
7581 temp_string = base_string;
7583 /* Skip past '(' and whitespace. */
7584 ++base_string;
7585 if (is_space_char (*base_string))
7586 ++base_string;
7588 if (*base_string == ','
7589 || ((i.base_reg = parse_register (base_string, &end_op))
7590 != NULL))
7592 displacement_string_end = temp_string;
7594 i.types[this_operand].bitfield.baseindex = 1;
7596 if (i.base_reg)
7598 base_string = end_op;
7599 if (is_space_char (*base_string))
7600 ++base_string;
7603 /* There may be an index reg or scale factor here. */
7604 if (*base_string == ',')
7606 ++base_string;
7607 if (is_space_char (*base_string))
7608 ++base_string;
7610 if ((i.index_reg = parse_register (base_string, &end_op))
7611 != NULL)
7613 base_string = end_op;
7614 if (is_space_char (*base_string))
7615 ++base_string;
7616 if (*base_string == ',')
7618 ++base_string;
7619 if (is_space_char (*base_string))
7620 ++base_string;
7622 else if (*base_string != ')')
7624 as_bad (_("expecting `,' or `)' "
7625 "after index register in `%s'"),
7626 operand_string);
7627 return 0;
7630 else if (*base_string == REGISTER_PREFIX)
7632 as_bad (_("bad register name `%s'"), base_string);
7633 return 0;
7636 /* Check for scale factor. */
7637 if (*base_string != ')')
7639 char *end_scale = i386_scale (base_string);
7641 if (!end_scale)
7642 return 0;
7644 base_string = end_scale;
7645 if (is_space_char (*base_string))
7646 ++base_string;
7647 if (*base_string != ')')
7649 as_bad (_("expecting `)' "
7650 "after scale factor in `%s'"),
7651 operand_string);
7652 return 0;
7655 else if (!i.index_reg)
7657 as_bad (_("expecting index register or scale factor "
7658 "after `,'; got '%c'"),
7659 *base_string);
7660 return 0;
7663 else if (*base_string != ')')
7665 as_bad (_("expecting `,' or `)' "
7666 "after base register in `%s'"),
7667 operand_string);
7668 return 0;
7671 else if (*base_string == REGISTER_PREFIX)
7673 as_bad (_("bad register name `%s'"), base_string);
7674 return 0;
7678 /* If there's an expression beginning the operand, parse it,
7679 assuming displacement_string_start and
7680 displacement_string_end are meaningful. */
7681 if (displacement_string_start != displacement_string_end)
7683 if (!i386_displacement (displacement_string_start,
7684 displacement_string_end))
7685 return 0;
7688 /* Special case for (%dx) while doing input/output op. */
7689 if (i.base_reg
7690 && operand_type_equal (&i.base_reg->reg_type,
7691 &reg16_inoutportreg)
7692 && i.index_reg == 0
7693 && i.log2_scale_factor == 0
7694 && i.seg[i.mem_operands] == 0
7695 && !operand_type_check (i.types[this_operand], disp))
7697 i.types[this_operand] = inoutportreg;
7698 return 1;
7701 if (i386_index_check (operand_string) == 0)
7702 return 0;
7703 i.types[this_operand].bitfield.mem = 1;
7704 i.mem_operands++;
7706 else
7708 /* It's not a memory operand; argh! */
7709 as_bad (_("invalid char %s beginning operand %d `%s'"),
7710 output_invalid (*op_string),
7711 this_operand + 1,
7712 op_string);
7713 return 0;
7715 return 1; /* Normal return. */
7718 /* md_estimate_size_before_relax()
7720 Called just before relax() for rs_machine_dependent frags. The x86
7721 assembler uses these frags to handle variable size jump
7722 instructions.
7724 Any symbol that is now undefined will not become defined.
7725 Return the correct fr_subtype in the frag.
7726 Return the initial "guess for variable size of frag" to caller.
7727 The guess is actually the growth beyond the fixed part. Whatever
7728 we do to grow the fixed or variable part contributes to our
7729 returned value. */
7732 md_estimate_size_before_relax (fragS *fragP, segT segment)
7734 /* We've already got fragP->fr_subtype right; all we have to do is
7735 check for un-relaxable symbols. On an ELF system, we can't relax
7736 an externally visible symbol, because it may be overridden by a
7737 shared library. */
7738 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7739 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7740 || (IS_ELF
7741 && (S_IS_EXTERNAL (fragP->fr_symbol)
7742 || S_IS_WEAK (fragP->fr_symbol)
7743 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7744 & BSF_GNU_INDIRECT_FUNCTION))))
7745 #endif
7746 #if defined (OBJ_COFF) && defined (TE_PE)
7747 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7748 && S_IS_WEAK (fragP->fr_symbol))
7749 #endif
7752 /* Symbol is undefined in this segment, or we need to keep a
7753 reloc so that weak symbols can be overridden. */
7754 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7755 enum bfd_reloc_code_real reloc_type;
7756 unsigned char *opcode;
7757 int old_fr_fix;
7759 if (fragP->fr_var != NO_RELOC)
7760 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7761 else if (size == 2)
7762 reloc_type = BFD_RELOC_16_PCREL;
7763 else
7764 reloc_type = BFD_RELOC_32_PCREL;
7766 old_fr_fix = fragP->fr_fix;
7767 opcode = (unsigned char *) fragP->fr_opcode;
7769 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7771 case UNCOND_JUMP:
7772 /* Make jmp (0xeb) a (d)word displacement jump. */
7773 opcode[0] = 0xe9;
7774 fragP->fr_fix += size;
7775 fix_new (fragP, old_fr_fix, size,
7776 fragP->fr_symbol,
7777 fragP->fr_offset, 1,
7778 reloc_type);
7779 break;
7781 case COND_JUMP86:
7782 if (size == 2
7783 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7785 /* Negate the condition, and branch past an
7786 unconditional jump. */
7787 opcode[0] ^= 1;
7788 opcode[1] = 3;
7789 /* Insert an unconditional jump. */
7790 opcode[2] = 0xe9;
7791 /* We added two extra opcode bytes, and have a two byte
7792 offset. */
7793 fragP->fr_fix += 2 + 2;
7794 fix_new (fragP, old_fr_fix + 2, 2,
7795 fragP->fr_symbol,
7796 fragP->fr_offset, 1,
7797 reloc_type);
7798 break;
7800 /* Fall through. */
7802 case COND_JUMP:
7803 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7805 fixS *fixP;
7807 fragP->fr_fix += 1;
7808 fixP = fix_new (fragP, old_fr_fix, 1,
7809 fragP->fr_symbol,
7810 fragP->fr_offset, 1,
7811 BFD_RELOC_8_PCREL);
7812 fixP->fx_signed = 1;
7813 break;
7816 /* This changes the byte-displacement jump 0x7N
7817 to the (d)word-displacement jump 0x0f,0x8N. */
7818 opcode[1] = opcode[0] + 0x10;
7819 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7820 /* We've added an opcode byte. */
7821 fragP->fr_fix += 1 + size;
7822 fix_new (fragP, old_fr_fix + 1, size,
7823 fragP->fr_symbol,
7824 fragP->fr_offset, 1,
7825 reloc_type);
7826 break;
7828 default:
7829 BAD_CASE (fragP->fr_subtype);
7830 break;
7832 frag_wane (fragP);
7833 return fragP->fr_fix - old_fr_fix;
7836 /* Guess size depending on current relax state. Initially the relax
7837 state will correspond to a short jump and we return 1, because
7838 the variable part of the frag (the branch offset) is one byte
7839 long. However, we can relax a section more than once and in that
7840 case we must either set fr_subtype back to the unrelaxed state,
7841 or return the value for the appropriate branch. */
7842 return md_relax_table[fragP->fr_subtype].rlx_length;
7845 /* Called after relax() is finished.
7847 In: Address of frag.
7848 fr_type == rs_machine_dependent.
7849 fr_subtype is what the address relaxed to.
7851 Out: Any fixSs and constants are set up.
7852 Caller will turn frag into a ".space 0". */
7854 void
7855 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7856 fragS *fragP)
7858 unsigned char *opcode;
7859 unsigned char *where_to_put_displacement = NULL;
7860 offsetT target_address;
7861 offsetT opcode_address;
7862 unsigned int extension = 0;
7863 offsetT displacement_from_opcode_start;
7865 opcode = (unsigned char *) fragP->fr_opcode;
7867 /* Address we want to reach in file space. */
7868 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7870 /* Address opcode resides at in file space. */
7871 opcode_address = fragP->fr_address + fragP->fr_fix;
7873 /* Displacement from opcode start to fill into instruction. */
7874 displacement_from_opcode_start = target_address - opcode_address;
7876 if ((fragP->fr_subtype & BIG) == 0)
7878 /* Don't have to change opcode. */
7879 extension = 1; /* 1 opcode + 1 displacement */
7880 where_to_put_displacement = &opcode[1];
7882 else
7884 if (no_cond_jump_promotion
7885 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7886 as_warn_where (fragP->fr_file, fragP->fr_line,
7887 _("long jump required"));
7889 switch (fragP->fr_subtype)
7891 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7892 extension = 4; /* 1 opcode + 4 displacement */
7893 opcode[0] = 0xe9;
7894 where_to_put_displacement = &opcode[1];
7895 break;
7897 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7898 extension = 2; /* 1 opcode + 2 displacement */
7899 opcode[0] = 0xe9;
7900 where_to_put_displacement = &opcode[1];
7901 break;
7903 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7904 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7905 extension = 5; /* 2 opcode + 4 displacement */
7906 opcode[1] = opcode[0] + 0x10;
7907 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7908 where_to_put_displacement = &opcode[2];
7909 break;
7911 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7912 extension = 3; /* 2 opcode + 2 displacement */
7913 opcode[1] = opcode[0] + 0x10;
7914 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7915 where_to_put_displacement = &opcode[2];
7916 break;
7918 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7919 extension = 4;
7920 opcode[0] ^= 1;
7921 opcode[1] = 3;
7922 opcode[2] = 0xe9;
7923 where_to_put_displacement = &opcode[3];
7924 break;
7926 default:
7927 BAD_CASE (fragP->fr_subtype);
7928 break;
7932 /* If size if less then four we are sure that the operand fits,
7933 but if it's 4, then it could be that the displacement is larger
7934 then -/+ 2GB. */
7935 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7936 && object_64bit
7937 && ((addressT) (displacement_from_opcode_start - extension
7938 + ((addressT) 1 << 31))
7939 > (((addressT) 2 << 31) - 1)))
7941 as_bad_where (fragP->fr_file, fragP->fr_line,
7942 _("jump target out of range"));
7943 /* Make us emit 0. */
7944 displacement_from_opcode_start = extension;
7946 /* Now put displacement after opcode. */
7947 md_number_to_chars ((char *) where_to_put_displacement,
7948 (valueT) (displacement_from_opcode_start - extension),
7949 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7950 fragP->fr_fix += extension;
7953 /* Apply a fixup (fixP) to segment data, once it has been determined
7954 by our caller that we have all the info we need to fix it up.
7956 Parameter valP is the pointer to the value of the bits.
7958 On the 386, immediates, displacements, and data pointers are all in
7959 the same (little-endian) format, so we don't need to care about which
7960 we are handling. */
7962 void
7963 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7965 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7966 valueT value = *valP;
7968 #if !defined (TE_Mach)
7969 if (fixP->fx_pcrel)
7971 switch (fixP->fx_r_type)
7973 default:
7974 break;
7976 case BFD_RELOC_64:
7977 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7978 break;
7979 case BFD_RELOC_32:
7980 case BFD_RELOC_X86_64_32S:
7981 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7982 break;
7983 case BFD_RELOC_16:
7984 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7985 break;
7986 case BFD_RELOC_8:
7987 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7988 break;
7992 if (fixP->fx_addsy != NULL
7993 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7994 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7995 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7996 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7997 && !use_rela_relocations)
7999 /* This is a hack. There should be a better way to handle this.
8000 This covers for the fact that bfd_install_relocation will
8001 subtract the current location (for partial_inplace, PC relative
8002 relocations); see more below. */
8003 #ifndef OBJ_AOUT
8004 if (IS_ELF
8005 #ifdef TE_PE
8006 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8007 #endif
8009 value += fixP->fx_where + fixP->fx_frag->fr_address;
8010 #endif
8011 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8012 if (IS_ELF)
8014 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8016 if ((sym_seg == seg
8017 || (symbol_section_p (fixP->fx_addsy)
8018 && sym_seg != absolute_section))
8019 && !generic_force_reloc (fixP))
8021 /* Yes, we add the values in twice. This is because
8022 bfd_install_relocation subtracts them out again. I think
8023 bfd_install_relocation is broken, but I don't dare change
8024 it. FIXME. */
8025 value += fixP->fx_where + fixP->fx_frag->fr_address;
8028 #endif
8029 #if defined (OBJ_COFF) && defined (TE_PE)
8030 /* For some reason, the PE format does not store a
8031 section address offset for a PC relative symbol. */
8032 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8033 || S_IS_WEAK (fixP->fx_addsy))
8034 value += md_pcrel_from (fixP);
8035 #endif
8037 #if defined (OBJ_COFF) && defined (TE_PE)
8038 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8040 value -= S_GET_VALUE (fixP->fx_addsy);
8042 #endif
8044 /* Fix a few things - the dynamic linker expects certain values here,
8045 and we must not disappoint it. */
8046 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8047 if (IS_ELF && fixP->fx_addsy)
8048 switch (fixP->fx_r_type)
8050 case BFD_RELOC_386_PLT32:
8051 case BFD_RELOC_X86_64_PLT32:
8052 /* Make the jump instruction point to the address of the operand. At
8053 runtime we merely add the offset to the actual PLT entry. */
8054 value = -4;
8055 break;
8057 case BFD_RELOC_386_TLS_GD:
8058 case BFD_RELOC_386_TLS_LDM:
8059 case BFD_RELOC_386_TLS_IE_32:
8060 case BFD_RELOC_386_TLS_IE:
8061 case BFD_RELOC_386_TLS_GOTIE:
8062 case BFD_RELOC_386_TLS_GOTDESC:
8063 case BFD_RELOC_X86_64_TLSGD:
8064 case BFD_RELOC_X86_64_TLSLD:
8065 case BFD_RELOC_X86_64_GOTTPOFF:
8066 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8067 value = 0; /* Fully resolved at runtime. No addend. */
8068 /* Fallthrough */
8069 case BFD_RELOC_386_TLS_LE:
8070 case BFD_RELOC_386_TLS_LDO_32:
8071 case BFD_RELOC_386_TLS_LE_32:
8072 case BFD_RELOC_X86_64_DTPOFF32:
8073 case BFD_RELOC_X86_64_DTPOFF64:
8074 case BFD_RELOC_X86_64_TPOFF32:
8075 case BFD_RELOC_X86_64_TPOFF64:
8076 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8077 break;
8079 case BFD_RELOC_386_TLS_DESC_CALL:
8080 case BFD_RELOC_X86_64_TLSDESC_CALL:
8081 value = 0; /* Fully resolved at runtime. No addend. */
8082 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8083 fixP->fx_done = 0;
8084 return;
8086 case BFD_RELOC_386_GOT32:
8087 case BFD_RELOC_X86_64_GOT32:
8088 value = 0; /* Fully resolved at runtime. No addend. */
8089 break;
8091 case BFD_RELOC_VTABLE_INHERIT:
8092 case BFD_RELOC_VTABLE_ENTRY:
8093 fixP->fx_done = 0;
8094 return;
8096 default:
8097 break;
8099 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8100 *valP = value;
8101 #endif /* !defined (TE_Mach) */
8103 /* Are we finished with this relocation now? */
8104 if (fixP->fx_addsy == NULL)
8105 fixP->fx_done = 1;
8106 #if defined (OBJ_COFF) && defined (TE_PE)
8107 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8109 fixP->fx_done = 0;
8110 /* Remember value for tc_gen_reloc. */
8111 fixP->fx_addnumber = value;
8112 /* Clear out the frag for now. */
8113 value = 0;
8115 #endif
8116 else if (use_rela_relocations)
8118 fixP->fx_no_overflow = 1;
8119 /* Remember value for tc_gen_reloc. */
8120 fixP->fx_addnumber = value;
8121 value = 0;
8124 md_number_to_chars (p, value, fixP->fx_size);
8127 char *
8128 md_atof (int type, char *litP, int *sizeP)
8130 /* This outputs the LITTLENUMs in REVERSE order;
8131 in accord with the bigendian 386. */
8132 return ieee_md_atof (type, litP, sizeP, FALSE);
8135 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8137 static char *
8138 output_invalid (int c)
8140 if (ISPRINT (c))
8141 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8142 "'%c'", c);
8143 else
8144 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8145 "(0x%x)", (unsigned char) c);
8146 return output_invalid_buf;
8149 /* REG_STRING starts *before* REGISTER_PREFIX. */
8151 static const reg_entry *
8152 parse_real_register (char *reg_string, char **end_op)
8154 char *s = reg_string;
8155 char *p;
8156 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8157 const reg_entry *r;
8159 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8160 if (*s == REGISTER_PREFIX)
8161 ++s;
8163 if (is_space_char (*s))
8164 ++s;
8166 p = reg_name_given;
8167 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8169 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8170 return (const reg_entry *) NULL;
8171 s++;
8174 /* For naked regs, make sure that we are not dealing with an identifier.
8175 This prevents confusing an identifier like `eax_var' with register
8176 `eax'. */
8177 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8178 return (const reg_entry *) NULL;
8180 *end_op = s;
8182 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8184 /* Handle floating point regs, allowing spaces in the (i) part. */
8185 if (r == i386_regtab /* %st is first entry of table */)
8187 if (is_space_char (*s))
8188 ++s;
8189 if (*s == '(')
8191 ++s;
8192 if (is_space_char (*s))
8193 ++s;
8194 if (*s >= '0' && *s <= '7')
8196 int fpr = *s - '0';
8197 ++s;
8198 if (is_space_char (*s))
8199 ++s;
8200 if (*s == ')')
8202 *end_op = s + 1;
8203 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8204 know (r);
8205 return r + fpr;
8208 /* We have "%st(" then garbage. */
8209 return (const reg_entry *) NULL;
8213 if (r == NULL || allow_pseudo_reg)
8214 return r;
8216 if (operand_type_all_zero (&r->reg_type))
8217 return (const reg_entry *) NULL;
8219 if ((r->reg_type.bitfield.reg32
8220 || r->reg_type.bitfield.sreg3
8221 || r->reg_type.bitfield.control
8222 || r->reg_type.bitfield.debug
8223 || r->reg_type.bitfield.test)
8224 && !cpu_arch_flags.bitfield.cpui386)
8225 return (const reg_entry *) NULL;
8227 if (r->reg_type.bitfield.floatreg
8228 && !cpu_arch_flags.bitfield.cpu8087
8229 && !cpu_arch_flags.bitfield.cpu287
8230 && !cpu_arch_flags.bitfield.cpu387)
8231 return (const reg_entry *) NULL;
8233 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8234 return (const reg_entry *) NULL;
8236 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8237 return (const reg_entry *) NULL;
8239 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8240 return (const reg_entry *) NULL;
8242 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8243 if (!allow_index_reg
8244 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8245 return (const reg_entry *) NULL;
8247 if (((r->reg_flags & (RegRex64 | RegRex))
8248 || r->reg_type.bitfield.reg64)
8249 && (!cpu_arch_flags.bitfield.cpulm
8250 || !operand_type_equal (&r->reg_type, &control))
8251 && flag_code != CODE_64BIT)
8252 return (const reg_entry *) NULL;
8254 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8255 return (const reg_entry *) NULL;
8257 return r;
8260 /* REG_STRING starts *before* REGISTER_PREFIX. */
8262 static const reg_entry *
8263 parse_register (char *reg_string, char **end_op)
8265 const reg_entry *r;
8267 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8268 r = parse_real_register (reg_string, end_op);
8269 else
8270 r = NULL;
8271 if (!r)
8273 char *save = input_line_pointer;
8274 char c;
8275 symbolS *symbolP;
8277 input_line_pointer = reg_string;
8278 c = get_symbol_end ();
8279 symbolP = symbol_find (reg_string);
8280 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8282 const expressionS *e = symbol_get_value_expression (symbolP);
8284 know (e->X_op == O_register);
8285 know (e->X_add_number >= 0
8286 && (valueT) e->X_add_number < i386_regtab_size);
8287 r = i386_regtab + e->X_add_number;
8288 *end_op = input_line_pointer;
8290 *input_line_pointer = c;
8291 input_line_pointer = save;
8293 return r;
8297 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8299 const reg_entry *r;
8300 char *end = input_line_pointer;
8302 *end = *nextcharP;
8303 r = parse_register (name, &input_line_pointer);
8304 if (r && end <= input_line_pointer)
8306 *nextcharP = *input_line_pointer;
8307 *input_line_pointer = 0;
8308 e->X_op = O_register;
8309 e->X_add_number = r - i386_regtab;
8310 return 1;
8312 input_line_pointer = end;
8313 *end = 0;
8314 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8317 void
8318 md_operand (expressionS *e)
8320 char *end;
8321 const reg_entry *r;
8323 switch (*input_line_pointer)
8325 case REGISTER_PREFIX:
8326 r = parse_real_register (input_line_pointer, &end);
8327 if (r)
8329 e->X_op = O_register;
8330 e->X_add_number = r - i386_regtab;
8331 input_line_pointer = end;
8333 break;
8335 case '[':
8336 gas_assert (intel_syntax);
8337 end = input_line_pointer++;
8338 expression (e);
8339 if (*input_line_pointer == ']')
8341 ++input_line_pointer;
8342 e->X_op_symbol = make_expr_symbol (e);
8343 e->X_add_symbol = NULL;
8344 e->X_add_number = 0;
8345 e->X_op = O_index;
8347 else
8349 e->X_op = O_absent;
8350 input_line_pointer = end;
8352 break;
8357 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8358 const char *md_shortopts = "kVQ:sqn";
8359 #else
8360 const char *md_shortopts = "qn";
8361 #endif
8363 #define OPTION_32 (OPTION_MD_BASE + 0)
8364 #define OPTION_64 (OPTION_MD_BASE + 1)
8365 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8366 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8367 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8368 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8369 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8370 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8371 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8372 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8373 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8374 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8375 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8376 #define OPTION_X32 (OPTION_MD_BASE + 13)
8378 struct option md_longopts[] =
8380 {"32", no_argument, NULL, OPTION_32},
8381 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8382 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8383 {"64", no_argument, NULL, OPTION_64},
8384 #endif
8385 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8386 {"x32", no_argument, NULL, OPTION_X32},
8387 #endif
8388 {"divide", no_argument, NULL, OPTION_DIVIDE},
8389 {"march", required_argument, NULL, OPTION_MARCH},
8390 {"mtune", required_argument, NULL, OPTION_MTUNE},
8391 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8392 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8393 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8394 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8395 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8396 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8397 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8398 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8399 {NULL, no_argument, NULL, 0}
8401 size_t md_longopts_size = sizeof (md_longopts);
8404 md_parse_option (int c, char *arg)
8406 unsigned int j;
8407 char *arch, *next;
8409 switch (c)
8411 case 'n':
8412 optimize_align_code = 0;
8413 break;
8415 case 'q':
8416 quiet_warnings = 1;
8417 break;
8419 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8420 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8421 should be emitted or not. FIXME: Not implemented. */
8422 case 'Q':
8423 break;
8425 /* -V: SVR4 argument to print version ID. */
8426 case 'V':
8427 print_version_id ();
8428 break;
8430 /* -k: Ignore for FreeBSD compatibility. */
8431 case 'k':
8432 break;
8434 case 's':
8435 /* -s: On i386 Solaris, this tells the native assembler to use
8436 .stab instead of .stab.excl. We always use .stab anyhow. */
8437 break;
8438 #endif
8439 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8440 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8441 case OPTION_64:
8443 const char **list, **l;
8445 list = bfd_target_list ();
8446 for (l = list; *l != NULL; l++)
8447 if (CONST_STRNEQ (*l, "elf64-x86-64")
8448 || strcmp (*l, "coff-x86-64") == 0
8449 || strcmp (*l, "pe-x86-64") == 0
8450 || strcmp (*l, "pei-x86-64") == 0
8451 || strcmp (*l, "mach-o-x86-64") == 0)
8453 default_arch = "x86_64";
8454 break;
8456 if (*l == NULL)
8457 as_fatal (_("no compiled in support for x86_64"));
8458 free (list);
8460 break;
8461 #endif
8463 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8464 case OPTION_X32:
8465 if (IS_ELF)
8467 const char **list, **l;
8469 list = bfd_target_list ();
8470 for (l = list; *l != NULL; l++)
8471 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8473 default_arch = "x86_64:32";
8474 break;
8476 if (*l == NULL)
8477 as_fatal (_("no compiled in support for 32bit x86_64"));
8478 free (list);
8480 else
8481 as_fatal (_("32bit x86_64 is only supported for ELF"));
8482 break;
8483 #endif
8485 case OPTION_32:
8486 default_arch = "i386";
8487 break;
8489 case OPTION_DIVIDE:
8490 #ifdef SVR4_COMMENT_CHARS
8492 char *n, *t;
8493 const char *s;
8495 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8496 t = n;
8497 for (s = i386_comment_chars; *s != '\0'; s++)
8498 if (*s != '/')
8499 *t++ = *s;
8500 *t = '\0';
8501 i386_comment_chars = n;
8503 #endif
8504 break;
8506 case OPTION_MARCH:
8507 arch = xstrdup (arg);
8510 if (*arch == '.')
8511 as_fatal (_("invalid -march= option: `%s'"), arg);
8512 next = strchr (arch, '+');
8513 if (next)
8514 *next++ = '\0';
8515 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8517 if (strcmp (arch, cpu_arch [j].name) == 0)
8519 /* Processor. */
8520 if (! cpu_arch[j].flags.bitfield.cpui386)
8521 continue;
8523 cpu_arch_name = cpu_arch[j].name;
8524 cpu_sub_arch_name = NULL;
8525 cpu_arch_flags = cpu_arch[j].flags;
8526 cpu_arch_isa = cpu_arch[j].type;
8527 cpu_arch_isa_flags = cpu_arch[j].flags;
8528 if (!cpu_arch_tune_set)
8530 cpu_arch_tune = cpu_arch_isa;
8531 cpu_arch_tune_flags = cpu_arch_isa_flags;
8533 break;
8535 else if (*cpu_arch [j].name == '.'
8536 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8538 /* ISA entension. */
8539 i386_cpu_flags flags;
8541 if (!cpu_arch[j].negated)
8542 flags = cpu_flags_or (cpu_arch_flags,
8543 cpu_arch[j].flags);
8544 else
8545 flags = cpu_flags_and_not (cpu_arch_flags,
8546 cpu_arch[j].flags);
8547 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8549 if (cpu_sub_arch_name)
8551 char *name = cpu_sub_arch_name;
8552 cpu_sub_arch_name = concat (name,
8553 cpu_arch[j].name,
8554 (const char *) NULL);
8555 free (name);
8557 else
8558 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8559 cpu_arch_flags = flags;
8560 cpu_arch_isa_flags = flags;
8562 break;
8566 if (j >= ARRAY_SIZE (cpu_arch))
8567 as_fatal (_("invalid -march= option: `%s'"), arg);
8569 arch = next;
8571 while (next != NULL );
8572 break;
8574 case OPTION_MTUNE:
8575 if (*arg == '.')
8576 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8577 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8579 if (strcmp (arg, cpu_arch [j].name) == 0)
8581 cpu_arch_tune_set = 1;
8582 cpu_arch_tune = cpu_arch [j].type;
8583 cpu_arch_tune_flags = cpu_arch[j].flags;
8584 break;
8587 if (j >= ARRAY_SIZE (cpu_arch))
8588 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8589 break;
8591 case OPTION_MMNEMONIC:
8592 if (strcasecmp (arg, "att") == 0)
8593 intel_mnemonic = 0;
8594 else if (strcasecmp (arg, "intel") == 0)
8595 intel_mnemonic = 1;
8596 else
8597 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8598 break;
8600 case OPTION_MSYNTAX:
8601 if (strcasecmp (arg, "att") == 0)
8602 intel_syntax = 0;
8603 else if (strcasecmp (arg, "intel") == 0)
8604 intel_syntax = 1;
8605 else
8606 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8607 break;
8609 case OPTION_MINDEX_REG:
8610 allow_index_reg = 1;
8611 break;
8613 case OPTION_MNAKED_REG:
8614 allow_naked_reg = 1;
8615 break;
8617 case OPTION_MOLD_GCC:
8618 old_gcc = 1;
8619 break;
8621 case OPTION_MSSE2AVX:
8622 sse2avx = 1;
8623 break;
8625 case OPTION_MSSE_CHECK:
8626 if (strcasecmp (arg, "error") == 0)
8627 sse_check = sse_check_error;
8628 else if (strcasecmp (arg, "warning") == 0)
8629 sse_check = sse_check_warning;
8630 else if (strcasecmp (arg, "none") == 0)
8631 sse_check = sse_check_none;
8632 else
8633 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8634 break;
8636 case OPTION_MAVXSCALAR:
8637 if (strcasecmp (arg, "128") == 0)
8638 avxscalar = vex128;
8639 else if (strcasecmp (arg, "256") == 0)
8640 avxscalar = vex256;
8641 else
8642 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8643 break;
8645 default:
8646 return 0;
8648 return 1;
8651 #define MESSAGE_TEMPLATE \
8654 static void
8655 show_arch (FILE *stream, int ext, int check)
8657 static char message[] = MESSAGE_TEMPLATE;
8658 char *start = message + 27;
8659 char *p;
8660 int size = sizeof (MESSAGE_TEMPLATE);
8661 int left;
8662 const char *name;
8663 int len;
8664 unsigned int j;
8666 p = start;
8667 left = size - (start - message);
8668 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8670 /* Should it be skipped? */
8671 if (cpu_arch [j].skip)
8672 continue;
8674 name = cpu_arch [j].name;
8675 len = cpu_arch [j].len;
8676 if (*name == '.')
8678 /* It is an extension. Skip if we aren't asked to show it. */
8679 if (ext)
8681 name++;
8682 len--;
8684 else
8685 continue;
8687 else if (ext)
8689 /* It is an processor. Skip if we show only extension. */
8690 continue;
8692 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8694 /* It is an impossible processor - skip. */
8695 continue;
8698 /* Reserve 2 spaces for ", " or ",\0" */
8699 left -= len + 2;
8701 /* Check if there is any room. */
8702 if (left >= 0)
8704 if (p != start)
8706 *p++ = ',';
8707 *p++ = ' ';
8709 p = mempcpy (p, name, len);
8711 else
8713 /* Output the current message now and start a new one. */
8714 *p++ = ',';
8715 *p = '\0';
8716 fprintf (stream, "%s\n", message);
8717 p = start;
8718 left = size - (start - message) - len - 2;
8720 gas_assert (left >= 0);
8722 p = mempcpy (p, name, len);
8726 *p = '\0';
8727 fprintf (stream, "%s\n", message);
8730 void
8731 md_show_usage (FILE *stream)
8733 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8734 fprintf (stream, _("\
8735 -Q ignored\n\
8736 -V print assembler version number\n\
8737 -k ignored\n"));
8738 #endif
8739 fprintf (stream, _("\
8740 -n Do not optimize code alignment\n\
8741 -q quieten some warnings\n"));
8742 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8743 fprintf (stream, _("\
8744 -s ignored\n"));
8745 #endif
8746 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8747 || defined (TE_PE) || defined (TE_PEP))
8748 fprintf (stream, _("\
8749 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8750 #endif
8751 #ifdef SVR4_COMMENT_CHARS
8752 fprintf (stream, _("\
8753 --divide do not treat `/' as a comment character\n"));
8754 #else
8755 fprintf (stream, _("\
8756 --divide ignored\n"));
8757 #endif
8758 fprintf (stream, _("\
8759 -march=CPU[,+EXTENSION...]\n\
8760 generate code for CPU and EXTENSION, CPU is one of:\n"));
8761 show_arch (stream, 0, 1);
8762 fprintf (stream, _("\
8763 EXTENSION is combination of:\n"));
8764 show_arch (stream, 1, 0);
8765 fprintf (stream, _("\
8766 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8767 show_arch (stream, 0, 0);
8768 fprintf (stream, _("\
8769 -msse2avx encode SSE instructions with VEX prefix\n"));
8770 fprintf (stream, _("\
8771 -msse-check=[none|error|warning]\n\
8772 check SSE instructions\n"));
8773 fprintf (stream, _("\
8774 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8775 length\n"));
8776 fprintf (stream, _("\
8777 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8778 fprintf (stream, _("\
8779 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8780 fprintf (stream, _("\
8781 -mindex-reg support pseudo index registers\n"));
8782 fprintf (stream, _("\
8783 -mnaked-reg don't require `%%' prefix for registers\n"));
8784 fprintf (stream, _("\
8785 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8788 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8789 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8790 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8792 /* Pick the target format to use. */
8794 const char *
8795 i386_target_format (void)
8797 if (!strncmp (default_arch, "x86_64", 6))
8799 update_code_flag (CODE_64BIT, 1);
8800 if (default_arch[6] == '\0')
8801 x86_elf_abi = X86_64_ABI;
8802 else
8803 x86_elf_abi = X86_64_X32_ABI;
8805 else if (!strcmp (default_arch, "i386"))
8806 update_code_flag (CODE_32BIT, 1);
8807 else
8808 as_fatal (_("unknown architecture"));
8810 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8811 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8812 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8813 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8815 switch (OUTPUT_FLAVOR)
8817 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8818 case bfd_target_aout_flavour:
8819 return AOUT_TARGET_FORMAT;
8820 #endif
8821 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8822 # if defined (TE_PE) || defined (TE_PEP)
8823 case bfd_target_coff_flavour:
8824 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8825 # elif defined (TE_GO32)
8826 case bfd_target_coff_flavour:
8827 return "coff-go32";
8828 # else
8829 case bfd_target_coff_flavour:
8830 return "coff-i386";
8831 # endif
8832 #endif
8833 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8834 case bfd_target_elf_flavour:
8836 const char *format;
8838 switch (x86_elf_abi)
8840 default:
8841 format = ELF_TARGET_FORMAT;
8842 break;
8843 case X86_64_ABI:
8844 use_rela_relocations = 1;
8845 object_64bit = 1;
8846 format = ELF_TARGET_FORMAT64;
8847 break;
8848 case X86_64_X32_ABI:
8849 use_rela_relocations = 1;
8850 object_64bit = 1;
8851 disallow_64bit_reloc = 1;
8852 format = ELF_TARGET_FORMAT32;
8853 break;
8855 if (cpu_arch_isa == PROCESSOR_L1OM)
8857 if (x86_elf_abi != X86_64_ABI)
8858 as_fatal (_("Intel L1OM is 64bit only"));
8859 return ELF_TARGET_L1OM_FORMAT;
8861 if (cpu_arch_isa == PROCESSOR_K1OM)
8863 if (x86_elf_abi != X86_64_ABI)
8864 as_fatal (_("Intel K1OM is 64bit only"));
8865 return ELF_TARGET_K1OM_FORMAT;
8867 else
8868 return format;
8870 #endif
8871 #if defined (OBJ_MACH_O)
8872 case bfd_target_mach_o_flavour:
8873 if (flag_code == CODE_64BIT)
8875 use_rela_relocations = 1;
8876 object_64bit = 1;
8877 return "mach-o-x86-64";
8879 else
8880 return "mach-o-i386";
8881 #endif
8882 default:
8883 abort ();
8884 return NULL;
8888 #endif /* OBJ_MAYBE_ more than one */
8890 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8891 void
8892 i386_elf_emit_arch_note (void)
8894 if (IS_ELF && cpu_arch_name != NULL)
8896 char *p;
8897 asection *seg = now_seg;
8898 subsegT subseg = now_subseg;
8899 Elf_Internal_Note i_note;
8900 Elf_External_Note e_note;
8901 asection *note_secp;
8902 int len;
8904 /* Create the .note section. */
8905 note_secp = subseg_new (".note", 0);
8906 bfd_set_section_flags (stdoutput,
8907 note_secp,
8908 SEC_HAS_CONTENTS | SEC_READONLY);
8910 /* Process the arch string. */
8911 len = strlen (cpu_arch_name);
8913 i_note.namesz = len + 1;
8914 i_note.descsz = 0;
8915 i_note.type = NT_ARCH;
8916 p = frag_more (sizeof (e_note.namesz));
8917 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8918 p = frag_more (sizeof (e_note.descsz));
8919 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8920 p = frag_more (sizeof (e_note.type));
8921 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8922 p = frag_more (len + 1);
8923 strcpy (p, cpu_arch_name);
8925 frag_align (2, 0, 0);
8927 subseg_set (seg, subseg);
8930 #endif
8932 symbolS *
8933 md_undefined_symbol (char *name)
8935 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8936 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8937 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8938 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8940 if (!GOT_symbol)
8942 if (symbol_find (name))
8943 as_bad (_("GOT already in symbol table"));
8944 GOT_symbol = symbol_new (name, undefined_section,
8945 (valueT) 0, &zero_address_frag);
8947 return GOT_symbol;
8949 return 0;
8952 /* Round up a section size to the appropriate boundary. */
8954 valueT
8955 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8957 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8958 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8960 /* For a.out, force the section size to be aligned. If we don't do
8961 this, BFD will align it for us, but it will not write out the
8962 final bytes of the section. This may be a bug in BFD, but it is
8963 easier to fix it here since that is how the other a.out targets
8964 work. */
8965 int align;
8967 align = bfd_get_section_alignment (stdoutput, segment);
8968 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8970 #endif
8972 return size;
8975 /* On the i386, PC-relative offsets are relative to the start of the
8976 next instruction. That is, the address of the offset, plus its
8977 size, since the offset is always the last part of the insn. */
8979 long
8980 md_pcrel_from (fixS *fixP)
8982 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8985 #ifndef I386COFF
8987 static void
8988 s_bss (int ignore ATTRIBUTE_UNUSED)
8990 int temp;
8992 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8993 if (IS_ELF)
8994 obj_elf_section_change_hook ();
8995 #endif
8996 temp = get_absolute_expression ();
8997 subseg_set (bss_section, (subsegT) temp);
8998 demand_empty_rest_of_line ();
9001 #endif
9003 void
9004 i386_validate_fix (fixS *fixp)
9006 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9008 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9010 if (!object_64bit)
9011 abort ();
9012 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9014 else
9016 if (!object_64bit)
9017 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9018 else
9019 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9021 fixp->fx_subsy = 0;
9025 arelent *
9026 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9028 arelent *rel;
9029 bfd_reloc_code_real_type code;
9031 switch (fixp->fx_r_type)
9033 case BFD_RELOC_X86_64_PLT32:
9034 case BFD_RELOC_X86_64_GOT32:
9035 case BFD_RELOC_X86_64_GOTPCREL:
9036 case BFD_RELOC_386_PLT32:
9037 case BFD_RELOC_386_GOT32:
9038 case BFD_RELOC_386_GOTOFF:
9039 case BFD_RELOC_386_GOTPC:
9040 case BFD_RELOC_386_TLS_GD:
9041 case BFD_RELOC_386_TLS_LDM:
9042 case BFD_RELOC_386_TLS_LDO_32:
9043 case BFD_RELOC_386_TLS_IE_32:
9044 case BFD_RELOC_386_TLS_IE:
9045 case BFD_RELOC_386_TLS_GOTIE:
9046 case BFD_RELOC_386_TLS_LE_32:
9047 case BFD_RELOC_386_TLS_LE:
9048 case BFD_RELOC_386_TLS_GOTDESC:
9049 case BFD_RELOC_386_TLS_DESC_CALL:
9050 case BFD_RELOC_X86_64_TLSGD:
9051 case BFD_RELOC_X86_64_TLSLD:
9052 case BFD_RELOC_X86_64_DTPOFF32:
9053 case BFD_RELOC_X86_64_DTPOFF64:
9054 case BFD_RELOC_X86_64_GOTTPOFF:
9055 case BFD_RELOC_X86_64_TPOFF32:
9056 case BFD_RELOC_X86_64_TPOFF64:
9057 case BFD_RELOC_X86_64_GOTOFF64:
9058 case BFD_RELOC_X86_64_GOTPC32:
9059 case BFD_RELOC_X86_64_GOT64:
9060 case BFD_RELOC_X86_64_GOTPCREL64:
9061 case BFD_RELOC_X86_64_GOTPC64:
9062 case BFD_RELOC_X86_64_GOTPLT64:
9063 case BFD_RELOC_X86_64_PLTOFF64:
9064 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9065 case BFD_RELOC_X86_64_TLSDESC_CALL:
9066 case BFD_RELOC_RVA:
9067 case BFD_RELOC_VTABLE_ENTRY:
9068 case BFD_RELOC_VTABLE_INHERIT:
9069 #ifdef TE_PE
9070 case BFD_RELOC_32_SECREL:
9071 #endif
9072 code = fixp->fx_r_type;
9073 break;
9074 case BFD_RELOC_X86_64_32S:
9075 if (!fixp->fx_pcrel)
9077 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9078 code = fixp->fx_r_type;
9079 break;
9081 default:
9082 if (fixp->fx_pcrel)
9084 switch (fixp->fx_size)
9086 default:
9087 as_bad_where (fixp->fx_file, fixp->fx_line,
9088 _("can not do %d byte pc-relative relocation"),
9089 fixp->fx_size);
9090 code = BFD_RELOC_32_PCREL;
9091 break;
9092 case 1: code = BFD_RELOC_8_PCREL; break;
9093 case 2: code = BFD_RELOC_16_PCREL; break;
9094 case 4: code = BFD_RELOC_32_PCREL; break;
9095 #ifdef BFD64
9096 case 8: code = BFD_RELOC_64_PCREL; break;
9097 #endif
9100 else
9102 switch (fixp->fx_size)
9104 default:
9105 as_bad_where (fixp->fx_file, fixp->fx_line,
9106 _("can not do %d byte relocation"),
9107 fixp->fx_size);
9108 code = BFD_RELOC_32;
9109 break;
9110 case 1: code = BFD_RELOC_8; break;
9111 case 2: code = BFD_RELOC_16; break;
9112 case 4: code = BFD_RELOC_32; break;
9113 #ifdef BFD64
9114 case 8: code = BFD_RELOC_64; break;
9115 #endif
9118 break;
9121 if ((code == BFD_RELOC_32
9122 || code == BFD_RELOC_32_PCREL
9123 || code == BFD_RELOC_X86_64_32S)
9124 && GOT_symbol
9125 && fixp->fx_addsy == GOT_symbol)
9127 if (!object_64bit)
9128 code = BFD_RELOC_386_GOTPC;
9129 else
9130 code = BFD_RELOC_X86_64_GOTPC32;
9132 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9133 && GOT_symbol
9134 && fixp->fx_addsy == GOT_symbol)
9136 code = BFD_RELOC_X86_64_GOTPC64;
9139 rel = (arelent *) xmalloc (sizeof (arelent));
9140 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9141 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9143 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9145 if (!use_rela_relocations)
9147 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9148 vtable entry to be used in the relocation's section offset. */
9149 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9150 rel->address = fixp->fx_offset;
9151 #if defined (OBJ_COFF) && defined (TE_PE)
9152 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9153 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9154 else
9155 #endif
9156 rel->addend = 0;
9158 /* Use the rela in 64bit mode. */
9159 else
9161 if (disallow_64bit_reloc)
9162 switch (code)
9164 case BFD_RELOC_X86_64_DTPOFF64:
9165 case BFD_RELOC_X86_64_TPOFF64:
9166 case BFD_RELOC_64_PCREL:
9167 case BFD_RELOC_X86_64_GOTOFF64:
9168 case BFD_RELOC_X86_64_GOT64:
9169 case BFD_RELOC_X86_64_GOTPCREL64:
9170 case BFD_RELOC_X86_64_GOTPC64:
9171 case BFD_RELOC_X86_64_GOTPLT64:
9172 case BFD_RELOC_X86_64_PLTOFF64:
9173 as_bad_where (fixp->fx_file, fixp->fx_line,
9174 _("cannot represent relocation type %s in x32 mode"),
9175 bfd_get_reloc_code_name (code));
9176 break;
9177 default:
9178 break;
9181 if (!fixp->fx_pcrel)
9182 rel->addend = fixp->fx_offset;
9183 else
9184 switch (code)
9186 case BFD_RELOC_X86_64_PLT32:
9187 case BFD_RELOC_X86_64_GOT32:
9188 case BFD_RELOC_X86_64_GOTPCREL:
9189 case BFD_RELOC_X86_64_TLSGD:
9190 case BFD_RELOC_X86_64_TLSLD:
9191 case BFD_RELOC_X86_64_GOTTPOFF:
9192 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9193 case BFD_RELOC_X86_64_TLSDESC_CALL:
9194 rel->addend = fixp->fx_offset - fixp->fx_size;
9195 break;
9196 default:
9197 rel->addend = (section->vma
9198 - fixp->fx_size
9199 + fixp->fx_addnumber
9200 + md_pcrel_from (fixp));
9201 break;
9205 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9206 if (rel->howto == NULL)
9208 as_bad_where (fixp->fx_file, fixp->fx_line,
9209 _("cannot represent relocation type %s"),
9210 bfd_get_reloc_code_name (code));
9211 /* Set howto to a garbage value so that we can keep going. */
9212 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9213 gas_assert (rel->howto != NULL);
9216 return rel;
9219 #include "tc-i386-intel.c"
9221 void
9222 tc_x86_parse_to_dw2regnum (expressionS *exp)
9224 int saved_naked_reg;
9225 char saved_register_dot;
9227 saved_naked_reg = allow_naked_reg;
9228 allow_naked_reg = 1;
9229 saved_register_dot = register_chars['.'];
9230 register_chars['.'] = '.';
9231 allow_pseudo_reg = 1;
9232 expression_and_evaluate (exp);
9233 allow_pseudo_reg = 0;
9234 register_chars['.'] = saved_register_dot;
9235 allow_naked_reg = saved_naked_reg;
9237 if (exp->X_op == O_register && exp->X_add_number >= 0)
9239 if ((addressT) exp->X_add_number < i386_regtab_size)
9241 exp->X_op = O_constant;
9242 exp->X_add_number = i386_regtab[exp->X_add_number]
9243 .dw2_regnum[flag_code >> 1];
9245 else
9246 exp->X_op = O_illegal;
9250 void
9251 tc_x86_frame_initial_instructions (void)
9253 static unsigned int sp_regno[2];
9255 if (!sp_regno[flag_code >> 1])
9257 char *saved_input = input_line_pointer;
9258 char sp[][4] = {"esp", "rsp"};
9259 expressionS exp;
9261 input_line_pointer = sp[flag_code >> 1];
9262 tc_x86_parse_to_dw2regnum (&exp);
9263 gas_assert (exp.X_op == O_constant);
9264 sp_regno[flag_code >> 1] = exp.X_add_number;
9265 input_line_pointer = saved_input;
9268 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9269 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9273 x86_dwarf2_addr_size (void)
9275 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9276 if (x86_elf_abi == X86_64_X32_ABI)
9277 return 4;
9278 #endif
9279 return bfd_arch_bits_per_address (stdoutput) / 8;
9283 i386_elf_section_type (const char *str, size_t len)
9285 if (flag_code == CODE_64BIT
9286 && len == sizeof ("unwind") - 1
9287 && strncmp (str, "unwind", 6) == 0)
9288 return SHT_X86_64_UNWIND;
9290 return -1;
9293 #ifdef TE_SOLARIS
9294 void
9295 i386_solaris_fix_up_eh_frame (segT sec)
9297 if (flag_code == CODE_64BIT)
9298 elf_section_type (sec) = SHT_X86_64_UNWIND;
9300 #endif
9302 #ifdef TE_PE
9303 void
9304 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9306 expressionS exp;
9308 exp.X_op = O_secrel;
9309 exp.X_add_symbol = symbol;
9310 exp.X_add_number = 0;
9311 emit_expr (&exp, size);
9313 #endif
9315 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9316 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9318 bfd_vma
9319 x86_64_section_letter (int letter, char **ptr_msg)
9321 if (flag_code == CODE_64BIT)
9323 if (letter == 'l')
9324 return SHF_X86_64_LARGE;
9326 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9328 else
9329 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9330 return -1;
9333 bfd_vma
9334 x86_64_section_word (char *str, size_t len)
9336 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9337 return SHF_X86_64_LARGE;
9339 return -1;
9342 static void
9343 handle_large_common (int small ATTRIBUTE_UNUSED)
9345 if (flag_code != CODE_64BIT)
9347 s_comm_internal (0, elf_common_parse);
9348 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9350 else
9352 static segT lbss_section;
9353 asection *saved_com_section_ptr = elf_com_section_ptr;
9354 asection *saved_bss_section = bss_section;
9356 if (lbss_section == NULL)
9358 flagword applicable;
9359 segT seg = now_seg;
9360 subsegT subseg = now_subseg;
9362 /* The .lbss section is for local .largecomm symbols. */
9363 lbss_section = subseg_new (".lbss", 0);
9364 applicable = bfd_applicable_section_flags (stdoutput);
9365 bfd_set_section_flags (stdoutput, lbss_section,
9366 applicable & SEC_ALLOC);
9367 seg_info (lbss_section)->bss = 1;
9369 subseg_set (seg, subseg);
9372 elf_com_section_ptr = &_bfd_elf_large_com_section;
9373 bss_section = lbss_section;
9375 s_comm_internal (0, elf_common_parse);
9377 elf_com_section_ptr = saved_com_section_ptr;
9378 bss_section = saved_bss_section;
9381 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */