daily update
[binutils.git] / gas / config / tc-arm.c
blob1fe7f4c9e169b0d30ff882bbc6b7782caa859712
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
36 #include "opcode/arm.h"
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
43 #include "dwarf2dbg.h"
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
51 static struct
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
80 /* Bit N indicates that an R_ARM_NONE relocation has been output for
81 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
82 emitted only once per section, to save unnecessary bloat. */
83 static unsigned int marked_pr_dependency = 0;
85 #endif /* OBJ_ELF */
87 /* Results from operand parsing worker functions. */
89 typedef enum
91 PARSE_OPERAND_SUCCESS,
92 PARSE_OPERAND_FAIL,
93 PARSE_OPERAND_FAIL_NO_BACKTRACK
94 } parse_operand_result;
96 enum arm_float_abi
98 ARM_FLOAT_ABI_HARD,
99 ARM_FLOAT_ABI_SOFTFP,
100 ARM_FLOAT_ABI_SOFT
103 /* Types of processor to assemble for. */
104 #ifndef CPU_DEFAULT
105 #if defined __XSCALE__
106 #define CPU_DEFAULT ARM_ARCH_XSCALE
107 #else
108 #if defined __thumb__
109 #define CPU_DEFAULT ARM_ARCH_V5T
110 #endif
111 #endif
112 #endif
114 #ifndef FPU_DEFAULT
115 # ifdef TE_LINUX
116 # define FPU_DEFAULT FPU_ARCH_FPA
117 # elif defined (TE_NetBSD)
118 # ifdef OBJ_ELF
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
120 # else
121 /* Legacy a.out format. */
122 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
123 # endif
124 # elif defined (TE_VXWORKS)
125 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
126 # else
127 /* For backwards compatibility, default to FPA. */
128 # define FPU_DEFAULT FPU_ARCH_FPA
129 # endif
130 #endif /* ifndef FPU_DEFAULT */
132 #define streq(a, b) (strcmp (a, b) == 0)
134 static arm_feature_set cpu_variant;
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
149 /* Variables that we set while parsing command-line options. Once all
150 options have been read we re-process these values to set the real
151 assembly flags. */
152 static const arm_feature_set *legacy_cpu = NULL;
153 static const arm_feature_set *legacy_fpu = NULL;
155 static const arm_feature_set *mcpu_cpu_opt = NULL;
156 static const arm_feature_set *mcpu_fpu_opt = NULL;
157 static const arm_feature_set *march_cpu_opt = NULL;
158 static const arm_feature_set *march_fpu_opt = NULL;
159 static const arm_feature_set *mfpu_opt = NULL;
160 static const arm_feature_set *object_arch = NULL;
162 /* Constants for known architecture features. */
163 static const arm_feature_set fpu_default = FPU_DEFAULT;
164 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
165 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
166 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
167 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
168 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
169 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
170 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
171 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
173 #ifdef CPU_DEFAULT
174 static const arm_feature_set cpu_default = CPU_DEFAULT;
175 #endif
177 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
178 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
179 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
180 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
181 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
182 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
183 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
184 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v4t_5 =
186 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
187 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
188 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
189 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
190 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
191 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
192 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
193 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
194 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
195 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
196 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
197 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
198 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
199 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
200 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
201 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
202 static const arm_feature_set arm_ext_m =
203 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
205 static const arm_feature_set arm_arch_any = ARM_ANY;
206 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
208 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
210 static const arm_feature_set arm_cext_iwmmxt2 =
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
212 static const arm_feature_set arm_cext_iwmmxt =
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
214 static const arm_feature_set arm_cext_xscale =
215 ARM_FEATURE (0, ARM_CEXT_XSCALE);
216 static const arm_feature_set arm_cext_maverick =
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
218 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
219 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
220 static const arm_feature_set fpu_vfp_ext_v1xd =
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
222 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
223 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
224 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
225 static const arm_feature_set fpu_vfp_ext_d32 =
226 ARM_FEATURE (0, FPU_VFP_EXT_D32);
227 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
228 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
229 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
230 static const arm_feature_set fpu_neon_fp16 = ARM_FEATURE (0, FPU_NEON_FP16);
232 static int mfloat_abi_opt = -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name[16];
237 #ifdef OBJ_ELF
238 # ifdef EABI_DEFAULT
239 static int meabi_flags = EABI_DEFAULT;
240 # else
241 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
242 # endif
244 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
246 bfd_boolean
247 arm_is_eabi (void)
249 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
251 #endif
253 #ifdef OBJ_ELF
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS * GOT_symbol;
256 #endif
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
261 instructions. */
262 static int thumb_mode = 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
268 /* Specifies the intrinsic IT insn behavior mode. */
269 enum implicit_it_mode
271 IMPLICIT_IT_MODE_NEVER = 0x00,
272 IMPLICIT_IT_MODE_ARM = 0x01,
273 IMPLICIT_IT_MODE_THUMB = 0x02,
274 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
276 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
278 /* If unified_syntax is true, we are processing the new unified
279 ARM/Thumb syntax. Important differences from the old ARM mode:
281 - Immediate operands do not require a # prefix.
282 - Conditional affixes always appear at the end of the
283 instruction. (For backward compatibility, those instructions
284 that formerly had them in the middle, continue to accept them
285 there.)
286 - The IT instruction may appear, and if it does is validated
287 against subsequent conditional affixes. It does not generate
288 machine code.
290 Important differences from the old Thumb mode:
292 - Immediate operands do not require a # prefix.
293 - Most of the V6T2 instructions are only available in unified mode.
294 - The .N and .W suffixes are recognized and honored (it is an error
295 if they cannot be honored).
296 - All instructions set the flags if and only if they have an 's' affix.
297 - Conditional affixes may be used. They are validated against
298 preceding IT instructions. Unlike ARM mode, you cannot use a
299 conditional affix except in the scope of an IT instruction. */
301 static bfd_boolean unified_syntax = FALSE;
303 enum neon_el_type
305 NT_invtype,
306 NT_untyped,
307 NT_integer,
308 NT_float,
309 NT_poly,
310 NT_signed,
311 NT_unsigned
314 struct neon_type_el
316 enum neon_el_type type;
317 unsigned size;
320 #define NEON_MAX_TYPE_ELS 4
322 struct neon_type
324 struct neon_type_el el[NEON_MAX_TYPE_ELS];
325 unsigned elems;
328 enum it_instruction_type
330 OUTSIDE_IT_INSN,
331 INSIDE_IT_INSN,
332 INSIDE_IT_LAST_INSN,
333 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
334 if inside, should be the last one. */
335 NEUTRAL_IT_INSN, /* This could be either inside or outside,
336 i.e. BKPT and NOP. */
337 IT_INSN /* The IT insn has been parsed. */
340 struct arm_it
342 const char * error;
343 unsigned long instruction;
344 int size;
345 int size_req;
346 int cond;
347 /* "uncond_value" is set to the value in place of the conditional field in
348 unconditional versions of the instruction, or -1 if nothing is
349 appropriate. */
350 int uncond_value;
351 struct neon_type vectype;
352 /* Set to the opcode if the instruction needs relaxation.
353 Zero if the instruction is not relaxed. */
354 unsigned long relax;
355 struct
357 bfd_reloc_code_real_type type;
358 expressionS exp;
359 int pc_rel;
360 } reloc;
362 enum it_instruction_type it_insn_type;
364 struct
366 unsigned reg;
367 signed int imm;
368 struct neon_type_el vectype;
369 unsigned present : 1; /* Operand present. */
370 unsigned isreg : 1; /* Operand was a register. */
371 unsigned immisreg : 1; /* .imm field is a second register. */
372 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
373 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
374 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
375 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
376 instructions. This allows us to disambiguate ARM <-> vector insns. */
377 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
378 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
379 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
380 unsigned issingle : 1; /* Operand is VFP single-precision register. */
381 unsigned hasreloc : 1; /* Operand has relocation suffix. */
382 unsigned writeback : 1; /* Operand has trailing ! */
383 unsigned preind : 1; /* Preindexed address. */
384 unsigned postind : 1; /* Postindexed address. */
385 unsigned negative : 1; /* Index register was negated. */
386 unsigned shifted : 1; /* Shift applied to operation. */
387 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
388 } operands[6];
391 static struct arm_it inst;
393 #define NUM_FLOAT_VALS 8
395 const char * fp_const[] =
397 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
400 /* Number of littlenums required to hold an extended precision number. */
401 #define MAX_LITTLENUMS 6
403 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
405 #define FAIL (-1)
406 #define SUCCESS (0)
408 #define SUFF_S 1
409 #define SUFF_D 2
410 #define SUFF_E 3
411 #define SUFF_P 4
413 #define CP_T_X 0x00008000
414 #define CP_T_Y 0x00400000
416 #define CONDS_BIT 0x00100000
417 #define LOAD_BIT 0x00100000
419 #define DOUBLE_LOAD_FLAG 0x00000001
421 struct asm_cond
423 const char * template;
424 unsigned long value;
427 #define COND_ALWAYS 0xE
429 struct asm_psr
431 const char * template;
432 unsigned long field;
435 struct asm_barrier_opt
437 const char * template;
438 unsigned long value;
441 /* The bit that distinguishes CPSR and SPSR. */
442 #define SPSR_BIT (1 << 22)
444 /* The individual PSR flag bits. */
445 #define PSR_c (1 << 16)
446 #define PSR_x (1 << 17)
447 #define PSR_s (1 << 18)
448 #define PSR_f (1 << 19)
450 struct reloc_entry
452 char * name;
453 bfd_reloc_code_real_type reloc;
456 enum vfp_reg_pos
458 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
459 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
462 enum vfp_ldstm_type
464 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
467 /* Bits for DEFINED field in neon_typed_alias. */
468 #define NTA_HASTYPE 1
469 #define NTA_HASINDEX 2
471 struct neon_typed_alias
473 unsigned char defined;
474 unsigned char index;
475 struct neon_type_el eltype;
478 /* ARM register categories. This includes coprocessor numbers and various
479 architecture extensions' registers. */
480 enum arm_reg_type
482 REG_TYPE_RN,
483 REG_TYPE_CP,
484 REG_TYPE_CN,
485 REG_TYPE_FN,
486 REG_TYPE_VFS,
487 REG_TYPE_VFD,
488 REG_TYPE_NQ,
489 REG_TYPE_VFSD,
490 REG_TYPE_NDQ,
491 REG_TYPE_NSDQ,
492 REG_TYPE_VFC,
493 REG_TYPE_MVF,
494 REG_TYPE_MVD,
495 REG_TYPE_MVFX,
496 REG_TYPE_MVDX,
497 REG_TYPE_MVAX,
498 REG_TYPE_DSPSC,
499 REG_TYPE_MMXWR,
500 REG_TYPE_MMXWC,
501 REG_TYPE_MMXWCG,
502 REG_TYPE_XSCALE,
505 /* Structure for a hash table entry for a register.
506 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
507 information which states whether a vector type or index is specified (for a
508 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
509 struct reg_entry
511 const char * name;
512 unsigned char number;
513 unsigned char type;
514 unsigned char builtin;
515 struct neon_typed_alias * neon;
518 /* Diagnostics used when we don't get a register of the expected type. */
519 const char * const reg_expected_msgs[] =
521 N_("ARM register expected"),
522 N_("bad or missing co-processor number"),
523 N_("co-processor register expected"),
524 N_("FPA register expected"),
525 N_("VFP single precision register expected"),
526 N_("VFP/Neon double precision register expected"),
527 N_("Neon quad precision register expected"),
528 N_("VFP single or double precision register expected"),
529 N_("Neon double or quad precision register expected"),
530 N_("VFP single, double or Neon quad precision register expected"),
531 N_("VFP system register expected"),
532 N_("Maverick MVF register expected"),
533 N_("Maverick MVD register expected"),
534 N_("Maverick MVFX register expected"),
535 N_("Maverick MVDX register expected"),
536 N_("Maverick MVAX register expected"),
537 N_("Maverick DSPSC register expected"),
538 N_("iWMMXt data register expected"),
539 N_("iWMMXt control register expected"),
540 N_("iWMMXt scalar register expected"),
541 N_("XScale accumulator register expected"),
544 /* Some well known registers that we refer to directly elsewhere. */
545 #define REG_SP 13
546 #define REG_LR 14
547 #define REG_PC 15
549 /* ARM instructions take 4bytes in the object file, Thumb instructions
550 take 2: */
551 #define INSN_SIZE 4
553 struct asm_opcode
555 /* Basic string to match. */
556 const char * template;
558 /* Parameters to instruction. */
559 unsigned char operands[8];
561 /* Conditional tag - see opcode_lookup. */
562 unsigned int tag : 4;
564 /* Basic instruction code. */
565 unsigned int avalue : 28;
567 /* Thumb-format instruction code. */
568 unsigned int tvalue;
570 /* Which architecture variant provides this instruction. */
571 const arm_feature_set * avariant;
572 const arm_feature_set * tvariant;
574 /* Function to call to encode instruction in ARM format. */
575 void (* aencode) (void);
577 /* Function to call to encode instruction in Thumb format. */
578 void (* tencode) (void);
581 /* Defines for various bits that we will want to toggle. */
582 #define INST_IMMEDIATE 0x02000000
583 #define OFFSET_REG 0x02000000
584 #define HWOFFSET_IMM 0x00400000
585 #define SHIFT_BY_REG 0x00000010
586 #define PRE_INDEX 0x01000000
587 #define INDEX_UP 0x00800000
588 #define WRITE_BACK 0x00200000
589 #define LDM_TYPE_2_OR_3 0x00400000
590 #define CPSI_MMOD 0x00020000
592 #define LITERAL_MASK 0xf000f000
593 #define OPCODE_MASK 0xfe1fffff
594 #define V4_STR_BIT 0x00000020
596 #define T2_SUBS_PC_LR 0xf3de8f00
598 #define DATA_OP_SHIFT 21
600 #define T2_OPCODE_MASK 0xfe1fffff
601 #define T2_DATA_OP_SHIFT 21
603 /* Codes to distinguish the arithmetic instructions. */
604 #define OPCODE_AND 0
605 #define OPCODE_EOR 1
606 #define OPCODE_SUB 2
607 #define OPCODE_RSB 3
608 #define OPCODE_ADD 4
609 #define OPCODE_ADC 5
610 #define OPCODE_SBC 6
611 #define OPCODE_RSC 7
612 #define OPCODE_TST 8
613 #define OPCODE_TEQ 9
614 #define OPCODE_CMP 10
615 #define OPCODE_CMN 11
616 #define OPCODE_ORR 12
617 #define OPCODE_MOV 13
618 #define OPCODE_BIC 14
619 #define OPCODE_MVN 15
621 #define T2_OPCODE_AND 0
622 #define T2_OPCODE_BIC 1
623 #define T2_OPCODE_ORR 2
624 #define T2_OPCODE_ORN 3
625 #define T2_OPCODE_EOR 4
626 #define T2_OPCODE_ADD 8
627 #define T2_OPCODE_ADC 10
628 #define T2_OPCODE_SBC 11
629 #define T2_OPCODE_SUB 13
630 #define T2_OPCODE_RSB 14
632 #define T_OPCODE_MUL 0x4340
633 #define T_OPCODE_TST 0x4200
634 #define T_OPCODE_CMN 0x42c0
635 #define T_OPCODE_NEG 0x4240
636 #define T_OPCODE_MVN 0x43c0
638 #define T_OPCODE_ADD_R3 0x1800
639 #define T_OPCODE_SUB_R3 0x1a00
640 #define T_OPCODE_ADD_HI 0x4400
641 #define T_OPCODE_ADD_ST 0xb000
642 #define T_OPCODE_SUB_ST 0xb080
643 #define T_OPCODE_ADD_SP 0xa800
644 #define T_OPCODE_ADD_PC 0xa000
645 #define T_OPCODE_ADD_I8 0x3000
646 #define T_OPCODE_SUB_I8 0x3800
647 #define T_OPCODE_ADD_I3 0x1c00
648 #define T_OPCODE_SUB_I3 0x1e00
650 #define T_OPCODE_ASR_R 0x4100
651 #define T_OPCODE_LSL_R 0x4080
652 #define T_OPCODE_LSR_R 0x40c0
653 #define T_OPCODE_ROR_R 0x41c0
654 #define T_OPCODE_ASR_I 0x1000
655 #define T_OPCODE_LSL_I 0x0000
656 #define T_OPCODE_LSR_I 0x0800
658 #define T_OPCODE_MOV_I8 0x2000
659 #define T_OPCODE_CMP_I8 0x2800
660 #define T_OPCODE_CMP_LR 0x4280
661 #define T_OPCODE_MOV_HR 0x4600
662 #define T_OPCODE_CMP_HR 0x4500
664 #define T_OPCODE_LDR_PC 0x4800
665 #define T_OPCODE_LDR_SP 0x9800
666 #define T_OPCODE_STR_SP 0x9000
667 #define T_OPCODE_LDR_IW 0x6800
668 #define T_OPCODE_STR_IW 0x6000
669 #define T_OPCODE_LDR_IH 0x8800
670 #define T_OPCODE_STR_IH 0x8000
671 #define T_OPCODE_LDR_IB 0x7800
672 #define T_OPCODE_STR_IB 0x7000
673 #define T_OPCODE_LDR_RW 0x5800
674 #define T_OPCODE_STR_RW 0x5000
675 #define T_OPCODE_LDR_RH 0x5a00
676 #define T_OPCODE_STR_RH 0x5200
677 #define T_OPCODE_LDR_RB 0x5c00
678 #define T_OPCODE_STR_RB 0x5400
680 #define T_OPCODE_PUSH 0xb400
681 #define T_OPCODE_POP 0xbc00
683 #define T_OPCODE_BRANCH 0xe000
685 #define THUMB_SIZE 2 /* Size of thumb instruction. */
686 #define THUMB_PP_PC_LR 0x0100
687 #define THUMB_LOAD_BIT 0x0800
688 #define THUMB2_LOAD_BIT 0x00100000
690 #define BAD_ARGS _("bad arguments to instruction")
691 #define BAD_SP _("r13 not allowed here")
692 #define BAD_PC _("r15 not allowed here")
693 #define BAD_COND _("instruction cannot be conditional")
694 #define BAD_OVERLAP _("registers may not be the same")
695 #define BAD_HIREG _("lo register required")
696 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
697 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
698 #define BAD_BRANCH _("branch must be last instruction in IT block")
699 #define BAD_NOT_IT _("instruction not allowed in IT block")
700 #define BAD_FPU _("selected FPU does not support instruction")
701 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
702 #define BAD_IT_COND _("incorrect condition in IT block")
703 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
704 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
706 static struct hash_control * arm_ops_hsh;
707 static struct hash_control * arm_cond_hsh;
708 static struct hash_control * arm_shift_hsh;
709 static struct hash_control * arm_psr_hsh;
710 static struct hash_control * arm_v7m_psr_hsh;
711 static struct hash_control * arm_reg_hsh;
712 static struct hash_control * arm_reloc_hsh;
713 static struct hash_control * arm_barrier_opt_hsh;
715 /* Stuff needed to resolve the label ambiguity
718 label: <insn>
719 may differ from:
721 label:
722 <insn> */
724 symbolS * last_label_seen;
725 static int label_is_thumb_function_name = FALSE;
727 /* Literal pool structure. Held on a per-section
728 and per-sub-section basis. */
730 #define MAX_LITERAL_POOL_SIZE 1024
731 typedef struct literal_pool
733 expressionS literals [MAX_LITERAL_POOL_SIZE];
734 unsigned int next_free_entry;
735 unsigned int id;
736 symbolS * symbol;
737 segT section;
738 subsegT sub_section;
739 struct literal_pool * next;
740 } literal_pool;
742 /* Pointer to a linked list of literal pools. */
743 literal_pool * list_of_pools = NULL;
745 #ifdef OBJ_ELF
746 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
747 #else
748 static struct current_it now_it;
749 #endif
751 static inline int
752 now_it_compatible (int cond)
754 return (cond & ~1) == (now_it.cc & ~1);
757 static inline int
758 conditional_insn (void)
760 return inst.cond != COND_ALWAYS;
763 static int in_it_block (void);
765 static int handle_it_state (void);
767 static void force_automatic_it_block_close (void);
769 static void it_fsm_post_encode (void);
771 #define set_it_insn_type(type) \
772 do \
774 inst.it_insn_type = type; \
775 if (handle_it_state () == FAIL) \
776 return; \
778 while (0)
780 #define set_it_insn_type_nonvoid(type, failret) \
781 do \
783 inst.it_insn_type = type; \
784 if (handle_it_state () == FAIL) \
785 return failret; \
787 while(0)
789 #define set_it_insn_type_last() \
790 do \
792 if (inst.cond == COND_ALWAYS) \
793 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
794 else \
795 set_it_insn_type (INSIDE_IT_LAST_INSN); \
797 while (0)
799 /* Pure syntax. */
801 /* This array holds the chars that always start a comment. If the
802 pre-processor is disabled, these aren't very useful. */
803 const char comment_chars[] = "@";
805 /* This array holds the chars that only start a comment at the beginning of
806 a line. If the line seems to have the form '# 123 filename'
807 .line and .file directives will appear in the pre-processed output. */
808 /* Note that input_file.c hand checks for '#' at the beginning of the
809 first line of the input file. This is because the compiler outputs
810 #NO_APP at the beginning of its output. */
811 /* Also note that comments like this one will always work. */
812 const char line_comment_chars[] = "#";
814 const char line_separator_chars[] = ";";
816 /* Chars that can be used to separate mant
817 from exp in floating point numbers. */
818 const char EXP_CHARS[] = "eE";
820 /* Chars that mean this number is a floating point constant. */
821 /* As in 0f12.456 */
822 /* or 0d1.2345e12 */
824 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
826 /* Prefix characters that indicate the start of an immediate
827 value. */
828 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
830 /* Separator character handling. */
832 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
834 static inline int
835 skip_past_char (char ** str, char c)
837 if (**str == c)
839 (*str)++;
840 return SUCCESS;
842 else
843 return FAIL;
846 #define skip_past_comma(str) skip_past_char (str, ',')
848 /* Arithmetic expressions (possibly involving symbols). */
850 /* Return TRUE if anything in the expression is a bignum. */
852 static int
853 walk_no_bignums (symbolS * sp)
855 if (symbol_get_value_expression (sp)->X_op == O_big)
856 return 1;
858 if (symbol_get_value_expression (sp)->X_add_symbol)
860 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
861 || (symbol_get_value_expression (sp)->X_op_symbol
862 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
865 return 0;
868 static int in_my_get_expression = 0;
870 /* Third argument to my_get_expression. */
871 #define GE_NO_PREFIX 0
872 #define GE_IMM_PREFIX 1
873 #define GE_OPT_PREFIX 2
874 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
875 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
876 #define GE_OPT_PREFIX_BIG 3
878 static int
879 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
881 char * save_in;
882 segT seg;
884 /* In unified syntax, all prefixes are optional. */
885 if (unified_syntax)
886 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
887 : GE_OPT_PREFIX;
889 switch (prefix_mode)
891 case GE_NO_PREFIX: break;
892 case GE_IMM_PREFIX:
893 if (!is_immediate_prefix (**str))
895 inst.error = _("immediate expression requires a # prefix");
896 return FAIL;
898 (*str)++;
899 break;
900 case GE_OPT_PREFIX:
901 case GE_OPT_PREFIX_BIG:
902 if (is_immediate_prefix (**str))
903 (*str)++;
904 break;
905 default: abort ();
908 memset (ep, 0, sizeof (expressionS));
910 save_in = input_line_pointer;
911 input_line_pointer = *str;
912 in_my_get_expression = 1;
913 seg = expression (ep);
914 in_my_get_expression = 0;
916 if (ep->X_op == O_illegal)
918 /* We found a bad expression in md_operand(). */
919 *str = input_line_pointer;
920 input_line_pointer = save_in;
921 if (inst.error == NULL)
922 inst.error = _("bad expression");
923 return 1;
926 #ifdef OBJ_AOUT
927 if (seg != absolute_section
928 && seg != text_section
929 && seg != data_section
930 && seg != bss_section
931 && seg != undefined_section)
933 inst.error = _("bad segment");
934 *str = input_line_pointer;
935 input_line_pointer = save_in;
936 return 1;
938 #endif
940 /* Get rid of any bignums now, so that we don't generate an error for which
941 we can't establish a line number later on. Big numbers are never valid
942 in instructions, which is where this routine is always called. */
943 if (prefix_mode != GE_OPT_PREFIX_BIG
944 && (ep->X_op == O_big
945 || (ep->X_add_symbol
946 && (walk_no_bignums (ep->X_add_symbol)
947 || (ep->X_op_symbol
948 && walk_no_bignums (ep->X_op_symbol))))))
950 inst.error = _("invalid constant");
951 *str = input_line_pointer;
952 input_line_pointer = save_in;
953 return 1;
956 *str = input_line_pointer;
957 input_line_pointer = save_in;
958 return 0;
961 /* Turn a string in input_line_pointer into a floating point constant
962 of type TYPE, and store the appropriate bytes in *LITP. The number
963 of LITTLENUMS emitted is stored in *SIZEP. An error message is
964 returned, or NULL on OK.
966 Note that fp constants aren't represent in the normal way on the ARM.
967 In big endian mode, things are as expected. However, in little endian
968 mode fp constants are big-endian word-wise, and little-endian byte-wise
969 within the words. For example, (double) 1.1 in big endian mode is
970 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
971 the byte sequence 99 99 f1 3f 9a 99 99 99.
973 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
975 char *
976 md_atof (int type, char * litP, int * sizeP)
978 int prec;
979 LITTLENUM_TYPE words[MAX_LITTLENUMS];
980 char *t;
981 int i;
983 switch (type)
985 case 'f':
986 case 'F':
987 case 's':
988 case 'S':
989 prec = 2;
990 break;
992 case 'd':
993 case 'D':
994 case 'r':
995 case 'R':
996 prec = 4;
997 break;
999 case 'x':
1000 case 'X':
1001 prec = 5;
1002 break;
1004 case 'p':
1005 case 'P':
1006 prec = 5;
1007 break;
1009 default:
1010 *sizeP = 0;
1011 return _("Unrecognized or unsupported floating point constant");
1014 t = atof_ieee (input_line_pointer, type, words);
1015 if (t)
1016 input_line_pointer = t;
1017 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1019 if (target_big_endian)
1021 for (i = 0; i < prec; i++)
1023 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1024 litP += sizeof (LITTLENUM_TYPE);
1027 else
1029 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1030 for (i = prec - 1; i >= 0; i--)
1032 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1033 litP += sizeof (LITTLENUM_TYPE);
1035 else
1036 /* For a 4 byte float the order of elements in `words' is 1 0.
1037 For an 8 byte float the order is 1 0 3 2. */
1038 for (i = 0; i < prec; i += 2)
1040 md_number_to_chars (litP, (valueT) words[i + 1],
1041 sizeof (LITTLENUM_TYPE));
1042 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1043 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1044 litP += 2 * sizeof (LITTLENUM_TYPE);
1048 return NULL;
1051 /* We handle all bad expressions here, so that we can report the faulty
1052 instruction in the error message. */
1053 void
1054 md_operand (expressionS * expr)
1056 if (in_my_get_expression)
1057 expr->X_op = O_illegal;
1060 /* Immediate values. */
1062 /* Generic immediate-value read function for use in directives.
1063 Accepts anything that 'expression' can fold to a constant.
1064 *val receives the number. */
1065 #ifdef OBJ_ELF
1066 static int
1067 immediate_for_directive (int *val)
1069 expressionS exp;
1070 exp.X_op = O_illegal;
1072 if (is_immediate_prefix (*input_line_pointer))
1074 input_line_pointer++;
1075 expression (&exp);
1078 if (exp.X_op != O_constant)
1080 as_bad (_("expected #constant"));
1081 ignore_rest_of_line ();
1082 return FAIL;
1084 *val = exp.X_add_number;
1085 return SUCCESS;
1087 #endif
1089 /* Register parsing. */
1091 /* Generic register parser. CCP points to what should be the
1092 beginning of a register name. If it is indeed a valid register
1093 name, advance CCP over it and return the reg_entry structure;
1094 otherwise return NULL. Does not issue diagnostics. */
1096 static struct reg_entry *
1097 arm_reg_parse_multi (char **ccp)
1099 char *start = *ccp;
1100 char *p;
1101 struct reg_entry *reg;
1103 #ifdef REGISTER_PREFIX
1104 if (*start != REGISTER_PREFIX)
1105 return NULL;
1106 start++;
1107 #endif
1108 #ifdef OPTIONAL_REGISTER_PREFIX
1109 if (*start == OPTIONAL_REGISTER_PREFIX)
1110 start++;
1111 #endif
1113 p = start;
1114 if (!ISALPHA (*p) || !is_name_beginner (*p))
1115 return NULL;
1118 p++;
1119 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1121 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1123 if (!reg)
1124 return NULL;
1126 *ccp = p;
1127 return reg;
1130 static int
1131 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1132 enum arm_reg_type type)
1134 /* Alternative syntaxes are accepted for a few register classes. */
1135 switch (type)
1137 case REG_TYPE_MVF:
1138 case REG_TYPE_MVD:
1139 case REG_TYPE_MVFX:
1140 case REG_TYPE_MVDX:
1141 /* Generic coprocessor register names are allowed for these. */
1142 if (reg && reg->type == REG_TYPE_CN)
1143 return reg->number;
1144 break;
1146 case REG_TYPE_CP:
1147 /* For backward compatibility, a bare number is valid here. */
1149 unsigned long processor = strtoul (start, ccp, 10);
1150 if (*ccp != start && processor <= 15)
1151 return processor;
1154 case REG_TYPE_MMXWC:
1155 /* WC includes WCG. ??? I'm not sure this is true for all
1156 instructions that take WC registers. */
1157 if (reg && reg->type == REG_TYPE_MMXWCG)
1158 return reg->number;
1159 break;
1161 default:
1162 break;
1165 return FAIL;
1168 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1169 return value is the register number or FAIL. */
1171 static int
1172 arm_reg_parse (char **ccp, enum arm_reg_type type)
1174 char *start = *ccp;
1175 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1176 int ret;
1178 /* Do not allow a scalar (reg+index) to parse as a register. */
1179 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1180 return FAIL;
1182 if (reg && reg->type == type)
1183 return reg->number;
1185 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1186 return ret;
1188 *ccp = start;
1189 return FAIL;
1192 /* Parse a Neon type specifier. *STR should point at the leading '.'
1193 character. Does no verification at this stage that the type fits the opcode
1194 properly. E.g.,
1196 .i32.i32.s16
1197 .s32.f32
1198 .u16
1200 Can all be legally parsed by this function.
1202 Fills in neon_type struct pointer with parsed information, and updates STR
1203 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1204 type, FAIL if not. */
1206 static int
1207 parse_neon_type (struct neon_type *type, char **str)
1209 char *ptr = *str;
1211 if (type)
1212 type->elems = 0;
1214 while (type->elems < NEON_MAX_TYPE_ELS)
1216 enum neon_el_type thistype = NT_untyped;
1217 unsigned thissize = -1u;
1219 if (*ptr != '.')
1220 break;
1222 ptr++;
1224 /* Just a size without an explicit type. */
1225 if (ISDIGIT (*ptr))
1226 goto parsesize;
1228 switch (TOLOWER (*ptr))
1230 case 'i': thistype = NT_integer; break;
1231 case 'f': thistype = NT_float; break;
1232 case 'p': thistype = NT_poly; break;
1233 case 's': thistype = NT_signed; break;
1234 case 'u': thistype = NT_unsigned; break;
1235 case 'd':
1236 thistype = NT_float;
1237 thissize = 64;
1238 ptr++;
1239 goto done;
1240 default:
1241 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1242 return FAIL;
1245 ptr++;
1247 /* .f is an abbreviation for .f32. */
1248 if (thistype == NT_float && !ISDIGIT (*ptr))
1249 thissize = 32;
1250 else
1252 parsesize:
1253 thissize = strtoul (ptr, &ptr, 10);
1255 if (thissize != 8 && thissize != 16 && thissize != 32
1256 && thissize != 64)
1258 as_bad (_("bad size %d in type specifier"), thissize);
1259 return FAIL;
1263 done:
1264 if (type)
1266 type->el[type->elems].type = thistype;
1267 type->el[type->elems].size = thissize;
1268 type->elems++;
1272 /* Empty/missing type is not a successful parse. */
1273 if (type->elems == 0)
1274 return FAIL;
1276 *str = ptr;
1278 return SUCCESS;
1281 /* Errors may be set multiple times during parsing or bit encoding
1282 (particularly in the Neon bits), but usually the earliest error which is set
1283 will be the most meaningful. Avoid overwriting it with later (cascading)
1284 errors by calling this function. */
1286 static void
1287 first_error (const char *err)
1289 if (!inst.error)
1290 inst.error = err;
1293 /* Parse a single type, e.g. ".s32", leading period included. */
1294 static int
1295 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1297 char *str = *ccp;
1298 struct neon_type optype;
1300 if (*str == '.')
1302 if (parse_neon_type (&optype, &str) == SUCCESS)
1304 if (optype.elems == 1)
1305 *vectype = optype.el[0];
1306 else
1308 first_error (_("only one type should be specified for operand"));
1309 return FAIL;
1312 else
1314 first_error (_("vector type expected"));
1315 return FAIL;
1318 else
1319 return FAIL;
1321 *ccp = str;
1323 return SUCCESS;
1326 /* Special meanings for indices (which have a range of 0-7), which will fit into
1327 a 4-bit integer. */
1329 #define NEON_ALL_LANES 15
1330 #define NEON_INTERLEAVE_LANES 14
1332 /* Parse either a register or a scalar, with an optional type. Return the
1333 register number, and optionally fill in the actual type of the register
1334 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1335 type/index information in *TYPEINFO. */
1337 static int
1338 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1339 enum arm_reg_type *rtype,
1340 struct neon_typed_alias *typeinfo)
1342 char *str = *ccp;
1343 struct reg_entry *reg = arm_reg_parse_multi (&str);
1344 struct neon_typed_alias atype;
1345 struct neon_type_el parsetype;
1347 atype.defined = 0;
1348 atype.index = -1;
1349 atype.eltype.type = NT_invtype;
1350 atype.eltype.size = -1;
1352 /* Try alternate syntax for some types of register. Note these are mutually
1353 exclusive with the Neon syntax extensions. */
1354 if (reg == NULL)
1356 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1357 if (altreg != FAIL)
1358 *ccp = str;
1359 if (typeinfo)
1360 *typeinfo = atype;
1361 return altreg;
1364 /* Undo polymorphism when a set of register types may be accepted. */
1365 if ((type == REG_TYPE_NDQ
1366 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1367 || (type == REG_TYPE_VFSD
1368 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1369 || (type == REG_TYPE_NSDQ
1370 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1371 || reg->type == REG_TYPE_NQ))
1372 || (type == REG_TYPE_MMXWC
1373 && (reg->type == REG_TYPE_MMXWCG)))
1374 type = reg->type;
1376 if (type != reg->type)
1377 return FAIL;
1379 if (reg->neon)
1380 atype = *reg->neon;
1382 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1384 if ((atype.defined & NTA_HASTYPE) != 0)
1386 first_error (_("can't redefine type for operand"));
1387 return FAIL;
1389 atype.defined |= NTA_HASTYPE;
1390 atype.eltype = parsetype;
1393 if (skip_past_char (&str, '[') == SUCCESS)
1395 if (type != REG_TYPE_VFD)
1397 first_error (_("only D registers may be indexed"));
1398 return FAIL;
1401 if ((atype.defined & NTA_HASINDEX) != 0)
1403 first_error (_("can't change index for operand"));
1404 return FAIL;
1407 atype.defined |= NTA_HASINDEX;
1409 if (skip_past_char (&str, ']') == SUCCESS)
1410 atype.index = NEON_ALL_LANES;
1411 else
1413 expressionS exp;
1415 my_get_expression (&exp, &str, GE_NO_PREFIX);
1417 if (exp.X_op != O_constant)
1419 first_error (_("constant expression required"));
1420 return FAIL;
1423 if (skip_past_char (&str, ']') == FAIL)
1424 return FAIL;
1426 atype.index = exp.X_add_number;
1430 if (typeinfo)
1431 *typeinfo = atype;
1433 if (rtype)
1434 *rtype = type;
1436 *ccp = str;
1438 return reg->number;
1441 /* Like arm_reg_parse, but allow allow the following extra features:
1442 - If RTYPE is non-zero, return the (possibly restricted) type of the
1443 register (e.g. Neon double or quad reg when either has been requested).
1444 - If this is a Neon vector type with additional type information, fill
1445 in the struct pointed to by VECTYPE (if non-NULL).
1446 This function will fault on encountering a scalar. */
1448 static int
1449 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1450 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1452 struct neon_typed_alias atype;
1453 char *str = *ccp;
1454 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1456 if (reg == FAIL)
1457 return FAIL;
1459 /* Do not allow a scalar (reg+index) to parse as a register. */
1460 if ((atype.defined & NTA_HASINDEX) != 0)
1462 first_error (_("register operand expected, but got scalar"));
1463 return FAIL;
1466 if (vectype)
1467 *vectype = atype.eltype;
1469 *ccp = str;
1471 return reg;
1474 #define NEON_SCALAR_REG(X) ((X) >> 4)
1475 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1477 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1478 have enough information to be able to do a good job bounds-checking. So, we
1479 just do easy checks here, and do further checks later. */
1481 static int
1482 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1484 int reg;
1485 char *str = *ccp;
1486 struct neon_typed_alias atype;
1488 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1490 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1491 return FAIL;
1493 if (atype.index == NEON_ALL_LANES)
1495 first_error (_("scalar must have an index"));
1496 return FAIL;
1498 else if (atype.index >= 64 / elsize)
1500 first_error (_("scalar index out of range"));
1501 return FAIL;
1504 if (type)
1505 *type = atype.eltype;
1507 *ccp = str;
1509 return reg * 16 + atype.index;
1512 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1514 static long
1515 parse_reg_list (char ** strp)
1517 char * str = * strp;
1518 long range = 0;
1519 int another_range;
1521 /* We come back here if we get ranges concatenated by '+' or '|'. */
1524 another_range = 0;
1526 if (*str == '{')
1528 int in_range = 0;
1529 int cur_reg = -1;
1531 str++;
1534 int reg;
1536 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1538 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1539 return FAIL;
1542 if (in_range)
1544 int i;
1546 if (reg <= cur_reg)
1548 first_error (_("bad range in register list"));
1549 return FAIL;
1552 for (i = cur_reg + 1; i < reg; i++)
1554 if (range & (1 << i))
1555 as_tsktsk
1556 (_("Warning: duplicated register (r%d) in register list"),
1558 else
1559 range |= 1 << i;
1561 in_range = 0;
1564 if (range & (1 << reg))
1565 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1566 reg);
1567 else if (reg <= cur_reg)
1568 as_tsktsk (_("Warning: register range not in ascending order"));
1570 range |= 1 << reg;
1571 cur_reg = reg;
1573 while (skip_past_comma (&str) != FAIL
1574 || (in_range = 1, *str++ == '-'));
1575 str--;
1577 if (*str++ != '}')
1579 first_error (_("missing `}'"));
1580 return FAIL;
1583 else
1585 expressionS expr;
1587 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1588 return FAIL;
1590 if (expr.X_op == O_constant)
1592 if (expr.X_add_number
1593 != (expr.X_add_number & 0x0000ffff))
1595 inst.error = _("invalid register mask");
1596 return FAIL;
1599 if ((range & expr.X_add_number) != 0)
1601 int regno = range & expr.X_add_number;
1603 regno &= -regno;
1604 regno = (1 << regno) - 1;
1605 as_tsktsk
1606 (_("Warning: duplicated register (r%d) in register list"),
1607 regno);
1610 range |= expr.X_add_number;
1612 else
1614 if (inst.reloc.type != 0)
1616 inst.error = _("expression too complex");
1617 return FAIL;
1620 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1621 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1622 inst.reloc.pc_rel = 0;
1626 if (*str == '|' || *str == '+')
1628 str++;
1629 another_range = 1;
1632 while (another_range);
1634 *strp = str;
1635 return range;
1638 /* Types of registers in a list. */
1640 enum reg_list_els
1642 REGLIST_VFP_S,
1643 REGLIST_VFP_D,
1644 REGLIST_NEON_D
1647 /* Parse a VFP register list. If the string is invalid return FAIL.
1648 Otherwise return the number of registers, and set PBASE to the first
1649 register. Parses registers of type ETYPE.
1650 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1651 - Q registers can be used to specify pairs of D registers
1652 - { } can be omitted from around a singleton register list
1653 FIXME: This is not implemented, as it would require backtracking in
1654 some cases, e.g.:
1655 vtbl.8 d3,d4,d5
1656 This could be done (the meaning isn't really ambiguous), but doesn't
1657 fit in well with the current parsing framework.
1658 - 32 D registers may be used (also true for VFPv3).
1659 FIXME: Types are ignored in these register lists, which is probably a
1660 bug. */
1662 static int
1663 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1665 char *str = *ccp;
1666 int base_reg;
1667 int new_base;
1668 enum arm_reg_type regtype = 0;
1669 int max_regs = 0;
1670 int count = 0;
1671 int warned = 0;
1672 unsigned long mask = 0;
1673 int i;
1675 if (*str != '{')
1677 inst.error = _("expecting {");
1678 return FAIL;
1681 str++;
1683 switch (etype)
1685 case REGLIST_VFP_S:
1686 regtype = REG_TYPE_VFS;
1687 max_regs = 32;
1688 break;
1690 case REGLIST_VFP_D:
1691 regtype = REG_TYPE_VFD;
1692 break;
1694 case REGLIST_NEON_D:
1695 regtype = REG_TYPE_NDQ;
1696 break;
1699 if (etype != REGLIST_VFP_S)
1701 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1702 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1704 max_regs = 32;
1705 if (thumb_mode)
1706 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1707 fpu_vfp_ext_d32);
1708 else
1709 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1710 fpu_vfp_ext_d32);
1712 else
1713 max_regs = 16;
1716 base_reg = max_regs;
1720 int setmask = 1, addregs = 1;
1722 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1724 if (new_base == FAIL)
1726 first_error (_(reg_expected_msgs[regtype]));
1727 return FAIL;
1730 if (new_base >= max_regs)
1732 first_error (_("register out of range in list"));
1733 return FAIL;
1736 /* Note: a value of 2 * n is returned for the register Q<n>. */
1737 if (regtype == REG_TYPE_NQ)
1739 setmask = 3;
1740 addregs = 2;
1743 if (new_base < base_reg)
1744 base_reg = new_base;
1746 if (mask & (setmask << new_base))
1748 first_error (_("invalid register list"));
1749 return FAIL;
1752 if ((mask >> new_base) != 0 && ! warned)
1754 as_tsktsk (_("register list not in ascending order"));
1755 warned = 1;
1758 mask |= setmask << new_base;
1759 count += addregs;
1761 if (*str == '-') /* We have the start of a range expression */
1763 int high_range;
1765 str++;
1767 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1768 == FAIL)
1770 inst.error = gettext (reg_expected_msgs[regtype]);
1771 return FAIL;
1774 if (high_range >= max_regs)
1776 first_error (_("register out of range in list"));
1777 return FAIL;
1780 if (regtype == REG_TYPE_NQ)
1781 high_range = high_range + 1;
1783 if (high_range <= new_base)
1785 inst.error = _("register range not in ascending order");
1786 return FAIL;
1789 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1791 if (mask & (setmask << new_base))
1793 inst.error = _("invalid register list");
1794 return FAIL;
1797 mask |= setmask << new_base;
1798 count += addregs;
1802 while (skip_past_comma (&str) != FAIL);
1804 str++;
1806 /* Sanity check -- should have raised a parse error above. */
1807 if (count == 0 || count > max_regs)
1808 abort ();
1810 *pbase = base_reg;
1812 /* Final test -- the registers must be consecutive. */
1813 mask >>= base_reg;
1814 for (i = 0; i < count; i++)
1816 if ((mask & (1u << i)) == 0)
1818 inst.error = _("non-contiguous register range");
1819 return FAIL;
1823 *ccp = str;
1825 return count;
1828 /* True if two alias types are the same. */
1830 static bfd_boolean
1831 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1833 if (!a && !b)
1834 return TRUE;
1836 if (!a || !b)
1837 return FALSE;
1839 if (a->defined != b->defined)
1840 return FALSE;
1842 if ((a->defined & NTA_HASTYPE) != 0
1843 && (a->eltype.type != b->eltype.type
1844 || a->eltype.size != b->eltype.size))
1845 return FALSE;
1847 if ((a->defined & NTA_HASINDEX) != 0
1848 && (a->index != b->index))
1849 return FALSE;
1851 return TRUE;
1854 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1855 The base register is put in *PBASE.
1856 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1857 the return value.
1858 The register stride (minus one) is put in bit 4 of the return value.
1859 Bits [6:5] encode the list length (minus one).
1860 The type of the list elements is put in *ELTYPE, if non-NULL. */
1862 #define NEON_LANE(X) ((X) & 0xf)
1863 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1864 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1866 static int
1867 parse_neon_el_struct_list (char **str, unsigned *pbase,
1868 struct neon_type_el *eltype)
1870 char *ptr = *str;
1871 int base_reg = -1;
1872 int reg_incr = -1;
1873 int count = 0;
1874 int lane = -1;
1875 int leading_brace = 0;
1876 enum arm_reg_type rtype = REG_TYPE_NDQ;
1877 int addregs = 1;
1878 const char *const incr_error = _("register stride must be 1 or 2");
1879 const char *const type_error = _("mismatched element/structure types in list");
1880 struct neon_typed_alias firsttype;
1882 if (skip_past_char (&ptr, '{') == SUCCESS)
1883 leading_brace = 1;
1887 struct neon_typed_alias atype;
1888 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1890 if (getreg == FAIL)
1892 first_error (_(reg_expected_msgs[rtype]));
1893 return FAIL;
1896 if (base_reg == -1)
1898 base_reg = getreg;
1899 if (rtype == REG_TYPE_NQ)
1901 reg_incr = 1;
1902 addregs = 2;
1904 firsttype = atype;
1906 else if (reg_incr == -1)
1908 reg_incr = getreg - base_reg;
1909 if (reg_incr < 1 || reg_incr > 2)
1911 first_error (_(incr_error));
1912 return FAIL;
1915 else if (getreg != base_reg + reg_incr * count)
1917 first_error (_(incr_error));
1918 return FAIL;
1921 if (! neon_alias_types_same (&atype, &firsttype))
1923 first_error (_(type_error));
1924 return FAIL;
1927 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1928 modes. */
1929 if (ptr[0] == '-')
1931 struct neon_typed_alias htype;
1932 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1933 if (lane == -1)
1934 lane = NEON_INTERLEAVE_LANES;
1935 else if (lane != NEON_INTERLEAVE_LANES)
1937 first_error (_(type_error));
1938 return FAIL;
1940 if (reg_incr == -1)
1941 reg_incr = 1;
1942 else if (reg_incr != 1)
1944 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1945 return FAIL;
1947 ptr++;
1948 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1949 if (hireg == FAIL)
1951 first_error (_(reg_expected_msgs[rtype]));
1952 return FAIL;
1954 if (! neon_alias_types_same (&htype, &firsttype))
1956 first_error (_(type_error));
1957 return FAIL;
1959 count += hireg + dregs - getreg;
1960 continue;
1963 /* If we're using Q registers, we can't use [] or [n] syntax. */
1964 if (rtype == REG_TYPE_NQ)
1966 count += 2;
1967 continue;
1970 if ((atype.defined & NTA_HASINDEX) != 0)
1972 if (lane == -1)
1973 lane = atype.index;
1974 else if (lane != atype.index)
1976 first_error (_(type_error));
1977 return FAIL;
1980 else if (lane == -1)
1981 lane = NEON_INTERLEAVE_LANES;
1982 else if (lane != NEON_INTERLEAVE_LANES)
1984 first_error (_(type_error));
1985 return FAIL;
1987 count++;
1989 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1991 /* No lane set by [x]. We must be interleaving structures. */
1992 if (lane == -1)
1993 lane = NEON_INTERLEAVE_LANES;
1995 /* Sanity check. */
1996 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1997 || (count > 1 && reg_incr == -1))
1999 first_error (_("error parsing element/structure list"));
2000 return FAIL;
2003 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2005 first_error (_("expected }"));
2006 return FAIL;
2009 if (reg_incr == -1)
2010 reg_incr = 1;
2012 if (eltype)
2013 *eltype = firsttype.eltype;
2015 *pbase = base_reg;
2016 *str = ptr;
2018 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2021 /* Parse an explicit relocation suffix on an expression. This is
2022 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2023 arm_reloc_hsh contains no entries, so this function can only
2024 succeed if there is no () after the word. Returns -1 on error,
2025 BFD_RELOC_UNUSED if there wasn't any suffix. */
2026 static int
2027 parse_reloc (char **str)
2029 struct reloc_entry *r;
2030 char *p, *q;
2032 if (**str != '(')
2033 return BFD_RELOC_UNUSED;
2035 p = *str + 1;
2036 q = p;
2038 while (*q && *q != ')' && *q != ',')
2039 q++;
2040 if (*q != ')')
2041 return -1;
2043 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2044 return -1;
2046 *str = q + 1;
2047 return r->reloc;
2050 /* Directives: register aliases. */
2052 static struct reg_entry *
2053 insert_reg_alias (char *str, int number, int type)
2055 struct reg_entry *new;
2056 const char *name;
2058 if ((new = hash_find (arm_reg_hsh, str)) != 0)
2060 if (new->builtin)
2061 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2063 /* Only warn about a redefinition if it's not defined as the
2064 same register. */
2065 else if (new->number != number || new->type != type)
2066 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2068 return NULL;
2071 name = xstrdup (str);
2072 new = xmalloc (sizeof (struct reg_entry));
2074 new->name = name;
2075 new->number = number;
2076 new->type = type;
2077 new->builtin = FALSE;
2078 new->neon = NULL;
2080 if (hash_insert (arm_reg_hsh, name, (void *) new))
2081 abort ();
2083 return new;
2086 static void
2087 insert_neon_reg_alias (char *str, int number, int type,
2088 struct neon_typed_alias *atype)
2090 struct reg_entry *reg = insert_reg_alias (str, number, type);
2092 if (!reg)
2094 first_error (_("attempt to redefine typed alias"));
2095 return;
2098 if (atype)
2100 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2101 *reg->neon = *atype;
2105 /* Look for the .req directive. This is of the form:
2107 new_register_name .req existing_register_name
2109 If we find one, or if it looks sufficiently like one that we want to
2110 handle any error here, return TRUE. Otherwise return FALSE. */
2112 static bfd_boolean
2113 create_register_alias (char * newname, char *p)
2115 struct reg_entry *old;
2116 char *oldname, *nbuf;
2117 size_t nlen;
2119 /* The input scrubber ensures that whitespace after the mnemonic is
2120 collapsed to single spaces. */
2121 oldname = p;
2122 if (strncmp (oldname, " .req ", 6) != 0)
2123 return FALSE;
2125 oldname += 6;
2126 if (*oldname == '\0')
2127 return FALSE;
2129 old = hash_find (arm_reg_hsh, oldname);
2130 if (!old)
2132 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2133 return TRUE;
2136 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2137 the desired alias name, and p points to its end. If not, then
2138 the desired alias name is in the global original_case_string. */
2139 #ifdef TC_CASE_SENSITIVE
2140 nlen = p - newname;
2141 #else
2142 newname = original_case_string;
2143 nlen = strlen (newname);
2144 #endif
2146 nbuf = alloca (nlen + 1);
2147 memcpy (nbuf, newname, nlen);
2148 nbuf[nlen] = '\0';
2150 /* Create aliases under the new name as stated; an all-lowercase
2151 version of the new name; and an all-uppercase version of the new
2152 name. */
2153 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2155 for (p = nbuf; *p; p++)
2156 *p = TOUPPER (*p);
2158 if (strncmp (nbuf, newname, nlen))
2160 /* If this attempt to create an additional alias fails, do not bother
2161 trying to create the all-lower case alias. We will fail and issue
2162 a second, duplicate error message. This situation arises when the
2163 programmer does something like:
2164 foo .req r0
2165 Foo .req r1
2166 The second .req creates the "Foo" alias but then fails to create
2167 the artificial FOO alias because it has already been created by the
2168 first .req. */
2169 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2170 return TRUE;
2173 for (p = nbuf; *p; p++)
2174 *p = TOLOWER (*p);
2176 if (strncmp (nbuf, newname, nlen))
2177 insert_reg_alias (nbuf, old->number, old->type);
2180 return TRUE;
2183 /* Create a Neon typed/indexed register alias using directives, e.g.:
2184 X .dn d5.s32[1]
2185 Y .qn 6.s16
2186 Z .dn d7
2187 T .dn Z[0]
2188 These typed registers can be used instead of the types specified after the
2189 Neon mnemonic, so long as all operands given have types. Types can also be
2190 specified directly, e.g.:
2191 vadd d0.s32, d1.s32, d2.s32 */
2193 static bfd_boolean
2194 create_neon_reg_alias (char *newname, char *p)
2196 enum arm_reg_type basetype;
2197 struct reg_entry *basereg;
2198 struct reg_entry mybasereg;
2199 struct neon_type ntype;
2200 struct neon_typed_alias typeinfo;
2201 char *namebuf, *nameend;
2202 int namelen;
2204 typeinfo.defined = 0;
2205 typeinfo.eltype.type = NT_invtype;
2206 typeinfo.eltype.size = -1;
2207 typeinfo.index = -1;
2209 nameend = p;
2211 if (strncmp (p, " .dn ", 5) == 0)
2212 basetype = REG_TYPE_VFD;
2213 else if (strncmp (p, " .qn ", 5) == 0)
2214 basetype = REG_TYPE_NQ;
2215 else
2216 return FALSE;
2218 p += 5;
2220 if (*p == '\0')
2221 return FALSE;
2223 basereg = arm_reg_parse_multi (&p);
2225 if (basereg && basereg->type != basetype)
2227 as_bad (_("bad type for register"));
2228 return FALSE;
2231 if (basereg == NULL)
2233 expressionS exp;
2234 /* Try parsing as an integer. */
2235 my_get_expression (&exp, &p, GE_NO_PREFIX);
2236 if (exp.X_op != O_constant)
2238 as_bad (_("expression must be constant"));
2239 return FALSE;
2241 basereg = &mybasereg;
2242 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2243 : exp.X_add_number;
2244 basereg->neon = 0;
2247 if (basereg->neon)
2248 typeinfo = *basereg->neon;
2250 if (parse_neon_type (&ntype, &p) == SUCCESS)
2252 /* We got a type. */
2253 if (typeinfo.defined & NTA_HASTYPE)
2255 as_bad (_("can't redefine the type of a register alias"));
2256 return FALSE;
2259 typeinfo.defined |= NTA_HASTYPE;
2260 if (ntype.elems != 1)
2262 as_bad (_("you must specify a single type only"));
2263 return FALSE;
2265 typeinfo.eltype = ntype.el[0];
2268 if (skip_past_char (&p, '[') == SUCCESS)
2270 expressionS exp;
2271 /* We got a scalar index. */
2273 if (typeinfo.defined & NTA_HASINDEX)
2275 as_bad (_("can't redefine the index of a scalar alias"));
2276 return FALSE;
2279 my_get_expression (&exp, &p, GE_NO_PREFIX);
2281 if (exp.X_op != O_constant)
2283 as_bad (_("scalar index must be constant"));
2284 return FALSE;
2287 typeinfo.defined |= NTA_HASINDEX;
2288 typeinfo.index = exp.X_add_number;
2290 if (skip_past_char (&p, ']') == FAIL)
2292 as_bad (_("expecting ]"));
2293 return FALSE;
2297 namelen = nameend - newname;
2298 namebuf = alloca (namelen + 1);
2299 strncpy (namebuf, newname, namelen);
2300 namebuf[namelen] = '\0';
2302 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2303 typeinfo.defined != 0 ? &typeinfo : NULL);
2305 /* Insert name in all uppercase. */
2306 for (p = namebuf; *p; p++)
2307 *p = TOUPPER (*p);
2309 if (strncmp (namebuf, newname, namelen))
2310 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2311 typeinfo.defined != 0 ? &typeinfo : NULL);
2313 /* Insert name in all lowercase. */
2314 for (p = namebuf; *p; p++)
2315 *p = TOLOWER (*p);
2317 if (strncmp (namebuf, newname, namelen))
2318 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2319 typeinfo.defined != 0 ? &typeinfo : NULL);
2321 return TRUE;
2324 /* Should never be called, as .req goes between the alias and the
2325 register name, not at the beginning of the line. */
2327 static void
2328 s_req (int a ATTRIBUTE_UNUSED)
2330 as_bad (_("invalid syntax for .req directive"));
2333 static void
2334 s_dn (int a ATTRIBUTE_UNUSED)
2336 as_bad (_("invalid syntax for .dn directive"));
2339 static void
2340 s_qn (int a ATTRIBUTE_UNUSED)
2342 as_bad (_("invalid syntax for .qn directive"));
2345 /* The .unreq directive deletes an alias which was previously defined
2346 by .req. For example:
2348 my_alias .req r11
2349 .unreq my_alias */
2351 static void
2352 s_unreq (int a ATTRIBUTE_UNUSED)
2354 char * name;
2355 char saved_char;
2357 name = input_line_pointer;
2359 while (*input_line_pointer != 0
2360 && *input_line_pointer != ' '
2361 && *input_line_pointer != '\n')
2362 ++input_line_pointer;
2364 saved_char = *input_line_pointer;
2365 *input_line_pointer = 0;
2367 if (!*name)
2368 as_bad (_("invalid syntax for .unreq directive"));
2369 else
2371 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2373 if (!reg)
2374 as_bad (_("unknown register alias '%s'"), name);
2375 else if (reg->builtin)
2376 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2377 name);
2378 else
2380 char * p;
2381 char * nbuf;
2383 hash_delete (arm_reg_hsh, name, FALSE);
2384 free ((char *) reg->name);
2385 if (reg->neon)
2386 free (reg->neon);
2387 free (reg);
2389 /* Also locate the all upper case and all lower case versions.
2390 Do not complain if we cannot find one or the other as it
2391 was probably deleted above. */
2393 nbuf = strdup (name);
2394 for (p = nbuf; *p; p++)
2395 *p = TOUPPER (*p);
2396 reg = hash_find (arm_reg_hsh, nbuf);
2397 if (reg)
2399 hash_delete (arm_reg_hsh, nbuf, FALSE);
2400 free ((char *) reg->name);
2401 if (reg->neon)
2402 free (reg->neon);
2403 free (reg);
2406 for (p = nbuf; *p; p++)
2407 *p = TOLOWER (*p);
2408 reg = hash_find (arm_reg_hsh, nbuf);
2409 if (reg)
2411 hash_delete (arm_reg_hsh, nbuf, FALSE);
2412 free ((char *) reg->name);
2413 if (reg->neon)
2414 free (reg->neon);
2415 free (reg);
2418 free (nbuf);
2422 *input_line_pointer = saved_char;
2423 demand_empty_rest_of_line ();
2426 /* Directives: Instruction set selection. */
2428 #ifdef OBJ_ELF
2429 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2430 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2431 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2432 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2434 static enum mstate mapstate = MAP_UNDEFINED;
2436 /* Create a new mapping symbol for the transition to STATE. */
2438 static void
2439 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2441 symbolS * symbolP;
2442 const char * symname;
2443 int type;
2445 switch (state)
2447 case MAP_DATA:
2448 symname = "$d";
2449 type = BSF_NO_FLAGS;
2450 break;
2451 case MAP_ARM:
2452 symname = "$a";
2453 type = BSF_NO_FLAGS;
2454 break;
2455 case MAP_THUMB:
2456 symname = "$t";
2457 type = BSF_NO_FLAGS;
2458 break;
2459 default:
2460 abort ();
2463 symbolP = symbol_new (symname, now_seg, value, frag);
2464 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2466 switch (state)
2468 case MAP_ARM:
2469 THUMB_SET_FUNC (symbolP, 0);
2470 ARM_SET_THUMB (symbolP, 0);
2471 ARM_SET_INTERWORK (symbolP, support_interwork);
2472 break;
2474 case MAP_THUMB:
2475 THUMB_SET_FUNC (symbolP, 1);
2476 ARM_SET_THUMB (symbolP, 1);
2477 ARM_SET_INTERWORK (symbolP, support_interwork);
2478 break;
2480 case MAP_DATA:
2481 default:
2482 break;
2485 /* Save the mapping symbols for future reference. Also check that
2486 we do not place two mapping symbols at the same offset within a
2487 frag. We'll handle overlap between frags in
2488 check_mapping_symbols. */
2489 if (value == 0)
2491 know (frag->tc_frag_data.first_map == NULL);
2492 frag->tc_frag_data.first_map = symbolP;
2494 if (frag->tc_frag_data.last_map != NULL)
2495 know (S_GET_VALUE (frag->tc_frag_data.last_map) < S_GET_VALUE (symbolP));
2496 frag->tc_frag_data.last_map = symbolP;
2499 /* We must sometimes convert a region marked as code to data during
2500 code alignment, if an odd number of bytes have to be padded. The
2501 code mapping symbol is pushed to an aligned address. */
2503 static void
2504 insert_data_mapping_symbol (enum mstate state,
2505 valueT value, fragS *frag, offsetT bytes)
2507 /* If there was already a mapping symbol, remove it. */
2508 if (frag->tc_frag_data.last_map != NULL
2509 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2511 symbolS *symp = frag->tc_frag_data.last_map;
2513 if (value == 0)
2515 know (frag->tc_frag_data.first_map == symp);
2516 frag->tc_frag_data.first_map = NULL;
2518 frag->tc_frag_data.last_map = NULL;
2519 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2522 make_mapping_symbol (MAP_DATA, value, frag);
2523 make_mapping_symbol (state, value + bytes, frag);
2526 static void mapping_state_2 (enum mstate state, int max_chars);
2528 /* Set the mapping state to STATE. Only call this when about to
2529 emit some STATE bytes to the file. */
2531 void
2532 mapping_state (enum mstate state)
2534 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2536 if (mapstate == state)
2537 /* The mapping symbol has already been emitted.
2538 There is nothing else to do. */
2539 return;
2540 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2541 /* This case will be evaluated later in the next else. */
2542 return;
2543 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2544 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2546 /* Only add the symbol if the offset is > 0:
2547 if we're at the first frag, check it's size > 0;
2548 if we're not at the first frag, then for sure
2549 the offset is > 0. */
2550 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2551 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2553 if (add_symbol)
2554 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2557 mapping_state_2 (state, 0);
2558 #undef TRANSITION
2561 /* Same as mapping_state, but MAX_CHARS bytes have already been
2562 allocated. Put the mapping symbol that far back. */
2564 static void
2565 mapping_state_2 (enum mstate state, int max_chars)
2567 if (mapstate == state)
2568 /* The mapping symbol has already been emitted.
2569 There is nothing else to do. */
2570 return;
2572 mapstate = state;
2573 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2574 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2576 #else
2577 #define mapping_state(x) /* nothing */
2578 #define mapping_state_2(x, y) /* nothing */
2579 #endif
2581 /* Find the real, Thumb encoded start of a Thumb function. */
2583 #ifdef OBJ_COFF
2584 static symbolS *
2585 find_real_start (symbolS * symbolP)
2587 char * real_start;
2588 const char * name = S_GET_NAME (symbolP);
2589 symbolS * new_target;
2591 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2592 #define STUB_NAME ".real_start_of"
2594 if (name == NULL)
2595 abort ();
2597 /* The compiler may generate BL instructions to local labels because
2598 it needs to perform a branch to a far away location. These labels
2599 do not have a corresponding ".real_start_of" label. We check
2600 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2601 the ".real_start_of" convention for nonlocal branches. */
2602 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2603 return symbolP;
2605 real_start = ACONCAT ((STUB_NAME, name, NULL));
2606 new_target = symbol_find (real_start);
2608 if (new_target == NULL)
2610 as_warn (_("Failed to find real start of function: %s\n"), name);
2611 new_target = symbolP;
2614 return new_target;
2616 #endif
2618 static void
2619 opcode_select (int width)
2621 switch (width)
2623 case 16:
2624 if (! thumb_mode)
2626 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2627 as_bad (_("selected processor does not support THUMB opcodes"));
2629 thumb_mode = 1;
2630 /* No need to force the alignment, since we will have been
2631 coming from ARM mode, which is word-aligned. */
2632 record_alignment (now_seg, 1);
2634 break;
2636 case 32:
2637 if (thumb_mode)
2639 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2640 as_bad (_("selected processor does not support ARM opcodes"));
2642 thumb_mode = 0;
2644 if (!need_pass_2)
2645 frag_align (2, 0, 0);
2647 record_alignment (now_seg, 1);
2649 break;
2651 default:
2652 as_bad (_("invalid instruction size selected (%d)"), width);
2656 static void
2657 s_arm (int ignore ATTRIBUTE_UNUSED)
2659 opcode_select (32);
2660 demand_empty_rest_of_line ();
2663 static void
2664 s_thumb (int ignore ATTRIBUTE_UNUSED)
2666 opcode_select (16);
2667 demand_empty_rest_of_line ();
2670 static void
2671 s_code (int unused ATTRIBUTE_UNUSED)
2673 int temp;
2675 temp = get_absolute_expression ();
2676 switch (temp)
2678 case 16:
2679 case 32:
2680 opcode_select (temp);
2681 break;
2683 default:
2684 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2688 static void
2689 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2691 /* If we are not already in thumb mode go into it, EVEN if
2692 the target processor does not support thumb instructions.
2693 This is used by gcc/config/arm/lib1funcs.asm for example
2694 to compile interworking support functions even if the
2695 target processor should not support interworking. */
2696 if (! thumb_mode)
2698 thumb_mode = 2;
2699 record_alignment (now_seg, 1);
2702 demand_empty_rest_of_line ();
2705 static void
2706 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2708 s_thumb (0);
2710 /* The following label is the name/address of the start of a Thumb function.
2711 We need to know this for the interworking support. */
2712 label_is_thumb_function_name = TRUE;
2715 /* Perform a .set directive, but also mark the alias as
2716 being a thumb function. */
2718 static void
2719 s_thumb_set (int equiv)
2721 /* XXX the following is a duplicate of the code for s_set() in read.c
2722 We cannot just call that code as we need to get at the symbol that
2723 is created. */
2724 char * name;
2725 char delim;
2726 char * end_name;
2727 symbolS * symbolP;
2729 /* Especial apologies for the random logic:
2730 This just grew, and could be parsed much more simply!
2731 Dean - in haste. */
2732 name = input_line_pointer;
2733 delim = get_symbol_end ();
2734 end_name = input_line_pointer;
2735 *end_name = delim;
2737 if (*input_line_pointer != ',')
2739 *end_name = 0;
2740 as_bad (_("expected comma after name \"%s\""), name);
2741 *end_name = delim;
2742 ignore_rest_of_line ();
2743 return;
2746 input_line_pointer++;
2747 *end_name = 0;
2749 if (name[0] == '.' && name[1] == '\0')
2751 /* XXX - this should not happen to .thumb_set. */
2752 abort ();
2755 if ((symbolP = symbol_find (name)) == NULL
2756 && (symbolP = md_undefined_symbol (name)) == NULL)
2758 #ifndef NO_LISTING
2759 /* When doing symbol listings, play games with dummy fragments living
2760 outside the normal fragment chain to record the file and line info
2761 for this symbol. */
2762 if (listing & LISTING_SYMBOLS)
2764 extern struct list_info_struct * listing_tail;
2765 fragS * dummy_frag = xmalloc (sizeof (fragS));
2767 memset (dummy_frag, 0, sizeof (fragS));
2768 dummy_frag->fr_type = rs_fill;
2769 dummy_frag->line = listing_tail;
2770 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2771 dummy_frag->fr_symbol = symbolP;
2773 else
2774 #endif
2775 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2777 #ifdef OBJ_COFF
2778 /* "set" symbols are local unless otherwise specified. */
2779 SF_SET_LOCAL (symbolP);
2780 #endif /* OBJ_COFF */
2781 } /* Make a new symbol. */
2783 symbol_table_insert (symbolP);
2785 * end_name = delim;
2787 if (equiv
2788 && S_IS_DEFINED (symbolP)
2789 && S_GET_SEGMENT (symbolP) != reg_section)
2790 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2792 pseudo_set (symbolP);
2794 demand_empty_rest_of_line ();
2796 /* XXX Now we come to the Thumb specific bit of code. */
2798 THUMB_SET_FUNC (symbolP, 1);
2799 ARM_SET_THUMB (symbolP, 1);
2800 #if defined OBJ_ELF || defined OBJ_COFF
2801 ARM_SET_INTERWORK (symbolP, support_interwork);
2802 #endif
2805 /* Directives: Mode selection. */
2807 /* .syntax [unified|divided] - choose the new unified syntax
2808 (same for Arm and Thumb encoding, modulo slight differences in what
2809 can be represented) or the old divergent syntax for each mode. */
2810 static void
2811 s_syntax (int unused ATTRIBUTE_UNUSED)
2813 char *name, delim;
2815 name = input_line_pointer;
2816 delim = get_symbol_end ();
2818 if (!strcasecmp (name, "unified"))
2819 unified_syntax = TRUE;
2820 else if (!strcasecmp (name, "divided"))
2821 unified_syntax = FALSE;
2822 else
2824 as_bad (_("unrecognized syntax mode \"%s\""), name);
2825 return;
2827 *input_line_pointer = delim;
2828 demand_empty_rest_of_line ();
2831 /* Directives: sectioning and alignment. */
2833 /* Same as s_align_ptwo but align 0 => align 2. */
2835 static void
2836 s_align (int unused ATTRIBUTE_UNUSED)
2838 int temp;
2839 bfd_boolean fill_p;
2840 long temp_fill;
2841 long max_alignment = 15;
2843 temp = get_absolute_expression ();
2844 if (temp > max_alignment)
2845 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2846 else if (temp < 0)
2848 as_bad (_("alignment negative. 0 assumed."));
2849 temp = 0;
2852 if (*input_line_pointer == ',')
2854 input_line_pointer++;
2855 temp_fill = get_absolute_expression ();
2856 fill_p = TRUE;
2858 else
2860 fill_p = FALSE;
2861 temp_fill = 0;
2864 if (!temp)
2865 temp = 2;
2867 /* Only make a frag if we HAVE to. */
2868 if (temp && !need_pass_2)
2870 if (!fill_p && subseg_text_p (now_seg))
2871 frag_align_code (temp, 0);
2872 else
2873 frag_align (temp, (int) temp_fill, 0);
2875 demand_empty_rest_of_line ();
2877 record_alignment (now_seg, temp);
2880 static void
2881 s_bss (int ignore ATTRIBUTE_UNUSED)
2883 /* We don't support putting frags in the BSS segment, we fake it by
2884 marking in_bss, then looking at s_skip for clues. */
2885 subseg_set (bss_section, 0);
2886 demand_empty_rest_of_line ();
2888 #ifdef md_elf_section_change_hook
2889 md_elf_section_change_hook ();
2890 #endif
2893 static void
2894 s_even (int ignore ATTRIBUTE_UNUSED)
2896 /* Never make frag if expect extra pass. */
2897 if (!need_pass_2)
2898 frag_align (1, 0, 0);
2900 record_alignment (now_seg, 1);
2902 demand_empty_rest_of_line ();
2905 /* Directives: Literal pools. */
2907 static literal_pool *
2908 find_literal_pool (void)
2910 literal_pool * pool;
2912 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2914 if (pool->section == now_seg
2915 && pool->sub_section == now_subseg)
2916 break;
2919 return pool;
2922 static literal_pool *
2923 find_or_make_literal_pool (void)
2925 /* Next literal pool ID number. */
2926 static unsigned int latest_pool_num = 1;
2927 literal_pool * pool;
2929 pool = find_literal_pool ();
2931 if (pool == NULL)
2933 /* Create a new pool. */
2934 pool = xmalloc (sizeof (* pool));
2935 if (! pool)
2936 return NULL;
2938 pool->next_free_entry = 0;
2939 pool->section = now_seg;
2940 pool->sub_section = now_subseg;
2941 pool->next = list_of_pools;
2942 pool->symbol = NULL;
2944 /* Add it to the list. */
2945 list_of_pools = pool;
2948 /* New pools, and emptied pools, will have a NULL symbol. */
2949 if (pool->symbol == NULL)
2951 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2952 (valueT) 0, &zero_address_frag);
2953 pool->id = latest_pool_num ++;
2956 /* Done. */
2957 return pool;
2960 /* Add the literal in the global 'inst'
2961 structure to the relevant literal pool. */
2963 static int
2964 add_to_lit_pool (void)
2966 literal_pool * pool;
2967 unsigned int entry;
2969 pool = find_or_make_literal_pool ();
2971 /* Check if this literal value is already in the pool. */
2972 for (entry = 0; entry < pool->next_free_entry; entry ++)
2974 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2975 && (inst.reloc.exp.X_op == O_constant)
2976 && (pool->literals[entry].X_add_number
2977 == inst.reloc.exp.X_add_number)
2978 && (pool->literals[entry].X_unsigned
2979 == inst.reloc.exp.X_unsigned))
2980 break;
2982 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2983 && (inst.reloc.exp.X_op == O_symbol)
2984 && (pool->literals[entry].X_add_number
2985 == inst.reloc.exp.X_add_number)
2986 && (pool->literals[entry].X_add_symbol
2987 == inst.reloc.exp.X_add_symbol)
2988 && (pool->literals[entry].X_op_symbol
2989 == inst.reloc.exp.X_op_symbol))
2990 break;
2993 /* Do we need to create a new entry? */
2994 if (entry == pool->next_free_entry)
2996 if (entry >= MAX_LITERAL_POOL_SIZE)
2998 inst.error = _("literal pool overflow");
2999 return FAIL;
3002 pool->literals[entry] = inst.reloc.exp;
3003 pool->next_free_entry += 1;
3006 inst.reloc.exp.X_op = O_symbol;
3007 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3008 inst.reloc.exp.X_add_symbol = pool->symbol;
3010 return SUCCESS;
3013 /* Can't use symbol_new here, so have to create a symbol and then at
3014 a later date assign it a value. Thats what these functions do. */
3016 static void
3017 symbol_locate (symbolS * symbolP,
3018 const char * name, /* It is copied, the caller can modify. */
3019 segT segment, /* Segment identifier (SEG_<something>). */
3020 valueT valu, /* Symbol value. */
3021 fragS * frag) /* Associated fragment. */
3023 unsigned int name_length;
3024 char * preserved_copy_of_name;
3026 name_length = strlen (name) + 1; /* +1 for \0. */
3027 obstack_grow (&notes, name, name_length);
3028 preserved_copy_of_name = obstack_finish (&notes);
3030 #ifdef tc_canonicalize_symbol_name
3031 preserved_copy_of_name =
3032 tc_canonicalize_symbol_name (preserved_copy_of_name);
3033 #endif
3035 S_SET_NAME (symbolP, preserved_copy_of_name);
3037 S_SET_SEGMENT (symbolP, segment);
3038 S_SET_VALUE (symbolP, valu);
3039 symbol_clear_list_pointers (symbolP);
3041 symbol_set_frag (symbolP, frag);
3043 /* Link to end of symbol chain. */
3045 extern int symbol_table_frozen;
3047 if (symbol_table_frozen)
3048 abort ();
3051 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3053 obj_symbol_new_hook (symbolP);
3055 #ifdef tc_symbol_new_hook
3056 tc_symbol_new_hook (symbolP);
3057 #endif
3059 #ifdef DEBUG_SYMS
3060 verify_symbol_chain (symbol_rootP, symbol_lastP);
3061 #endif /* DEBUG_SYMS */
3065 static void
3066 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3068 unsigned int entry;
3069 literal_pool * pool;
3070 char sym_name[20];
3072 pool = find_literal_pool ();
3073 if (pool == NULL
3074 || pool->symbol == NULL
3075 || pool->next_free_entry == 0)
3076 return;
3078 mapping_state (MAP_DATA);
3080 /* Align pool as you have word accesses.
3081 Only make a frag if we have to. */
3082 if (!need_pass_2)
3083 frag_align (2, 0, 0);
3085 record_alignment (now_seg, 2);
3087 sprintf (sym_name, "$$lit_\002%x", pool->id);
3089 symbol_locate (pool->symbol, sym_name, now_seg,
3090 (valueT) frag_now_fix (), frag_now);
3091 symbol_table_insert (pool->symbol);
3093 ARM_SET_THUMB (pool->symbol, thumb_mode);
3095 #if defined OBJ_COFF || defined OBJ_ELF
3096 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3097 #endif
3099 for (entry = 0; entry < pool->next_free_entry; entry ++)
3100 /* First output the expression in the instruction to the pool. */
3101 emit_expr (&(pool->literals[entry]), 4); /* .word */
3103 /* Mark the pool as empty. */
3104 pool->next_free_entry = 0;
3105 pool->symbol = NULL;
3108 #ifdef OBJ_ELF
3109 /* Forward declarations for functions below, in the MD interface
3110 section. */
3111 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3112 static valueT create_unwind_entry (int);
3113 static void start_unwind_section (const segT, int);
3114 static void add_unwind_opcode (valueT, int);
3115 static void flush_pending_unwind (void);
3117 /* Directives: Data. */
3119 static void
3120 s_arm_elf_cons (int nbytes)
3122 expressionS exp;
3124 #ifdef md_flush_pending_output
3125 md_flush_pending_output ();
3126 #endif
3128 if (is_it_end_of_statement ())
3130 demand_empty_rest_of_line ();
3131 return;
3134 #ifdef md_cons_align
3135 md_cons_align (nbytes);
3136 #endif
3138 mapping_state (MAP_DATA);
3141 int reloc;
3142 char *base = input_line_pointer;
3144 expression (& exp);
3146 if (exp.X_op != O_symbol)
3147 emit_expr (&exp, (unsigned int) nbytes);
3148 else
3150 char *before_reloc = input_line_pointer;
3151 reloc = parse_reloc (&input_line_pointer);
3152 if (reloc == -1)
3154 as_bad (_("unrecognized relocation suffix"));
3155 ignore_rest_of_line ();
3156 return;
3158 else if (reloc == BFD_RELOC_UNUSED)
3159 emit_expr (&exp, (unsigned int) nbytes);
3160 else
3162 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
3163 int size = bfd_get_reloc_size (howto);
3165 if (reloc == BFD_RELOC_ARM_PLT32)
3167 as_bad (_("(plt) is only valid on branch targets"));
3168 reloc = BFD_RELOC_UNUSED;
3169 size = 0;
3172 if (size > nbytes)
3173 as_bad (_("%s relocations do not fit in %d bytes"),
3174 howto->name, nbytes);
3175 else
3177 /* We've parsed an expression stopping at O_symbol.
3178 But there may be more expression left now that we
3179 have parsed the relocation marker. Parse it again.
3180 XXX Surely there is a cleaner way to do this. */
3181 char *p = input_line_pointer;
3182 int offset;
3183 char *save_buf = alloca (input_line_pointer - base);
3184 memcpy (save_buf, base, input_line_pointer - base);
3185 memmove (base + (input_line_pointer - before_reloc),
3186 base, before_reloc - base);
3188 input_line_pointer = base + (input_line_pointer-before_reloc);
3189 expression (&exp);
3190 memcpy (base, save_buf, p - base);
3192 offset = nbytes - size;
3193 p = frag_more ((int) nbytes);
3194 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3195 size, &exp, 0, reloc);
3200 while (*input_line_pointer++ == ',');
3202 /* Put terminator back into stream. */
3203 input_line_pointer --;
3204 demand_empty_rest_of_line ();
3207 /* Emit an expression containing a 32-bit thumb instruction.
3208 Implementation based on put_thumb32_insn. */
3210 static void
3211 emit_thumb32_expr (expressionS * exp)
3213 expressionS exp_high = *exp;
3215 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3216 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3217 exp->X_add_number &= 0xffff;
3218 emit_expr (exp, (unsigned int) THUMB_SIZE);
3221 /* Guess the instruction size based on the opcode. */
3223 static int
3224 thumb_insn_size (int opcode)
3226 if ((unsigned int) opcode < 0xe800u)
3227 return 2;
3228 else if ((unsigned int) opcode >= 0xe8000000u)
3229 return 4;
3230 else
3231 return 0;
3234 static bfd_boolean
3235 emit_insn (expressionS *exp, int nbytes)
3237 int size = 0;
3239 if (exp->X_op == O_constant)
3241 size = nbytes;
3243 if (size == 0)
3244 size = thumb_insn_size (exp->X_add_number);
3246 if (size != 0)
3248 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3250 as_bad (_(".inst.n operand too big. "\
3251 "Use .inst.w instead"));
3252 size = 0;
3254 else
3256 if (now_it.state == AUTOMATIC_IT_BLOCK)
3257 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3258 else
3259 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3261 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3262 emit_thumb32_expr (exp);
3263 else
3264 emit_expr (exp, (unsigned int) size);
3266 it_fsm_post_encode ();
3269 else
3270 as_bad (_("cannot determine Thumb instruction size. " \
3271 "Use .inst.n/.inst.w instead"));
3273 else
3274 as_bad (_("constant expression required"));
3276 return (size != 0);
3279 /* Like s_arm_elf_cons but do not use md_cons_align and
3280 set the mapping state to MAP_ARM/MAP_THUMB. */
3282 static void
3283 s_arm_elf_inst (int nbytes)
3285 if (is_it_end_of_statement ())
3287 demand_empty_rest_of_line ();
3288 return;
3291 /* Calling mapping_state () here will not change ARM/THUMB,
3292 but will ensure not to be in DATA state. */
3294 if (thumb_mode)
3295 mapping_state (MAP_THUMB);
3296 else
3298 if (nbytes != 0)
3300 as_bad (_("width suffixes are invalid in ARM mode"));
3301 ignore_rest_of_line ();
3302 return;
3305 nbytes = 4;
3307 mapping_state (MAP_ARM);
3312 expressionS exp;
3314 expression (& exp);
3316 if (! emit_insn (& exp, nbytes))
3318 ignore_rest_of_line ();
3319 return;
3322 while (*input_line_pointer++ == ',');
3324 /* Put terminator back into stream. */
3325 input_line_pointer --;
3326 demand_empty_rest_of_line ();
3329 /* Parse a .rel31 directive. */
3331 static void
3332 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3334 expressionS exp;
3335 char *p;
3336 valueT highbit;
3338 highbit = 0;
3339 if (*input_line_pointer == '1')
3340 highbit = 0x80000000;
3341 else if (*input_line_pointer != '0')
3342 as_bad (_("expected 0 or 1"));
3344 input_line_pointer++;
3345 if (*input_line_pointer != ',')
3346 as_bad (_("missing comma"));
3347 input_line_pointer++;
3349 #ifdef md_flush_pending_output
3350 md_flush_pending_output ();
3351 #endif
3353 #ifdef md_cons_align
3354 md_cons_align (4);
3355 #endif
3357 mapping_state (MAP_DATA);
3359 expression (&exp);
3361 p = frag_more (4);
3362 md_number_to_chars (p, highbit, 4);
3363 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3364 BFD_RELOC_ARM_PREL31);
3366 demand_empty_rest_of_line ();
3369 /* Directives: AEABI stack-unwind tables. */
3371 /* Parse an unwind_fnstart directive. Simply records the current location. */
3373 static void
3374 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3376 demand_empty_rest_of_line ();
3377 if (unwind.proc_start)
3379 as_bad (_("duplicate .fnstart directive"));
3380 return;
3383 /* Mark the start of the function. */
3384 unwind.proc_start = expr_build_dot ();
3386 /* Reset the rest of the unwind info. */
3387 unwind.opcode_count = 0;
3388 unwind.table_entry = NULL;
3389 unwind.personality_routine = NULL;
3390 unwind.personality_index = -1;
3391 unwind.frame_size = 0;
3392 unwind.fp_offset = 0;
3393 unwind.fp_reg = REG_SP;
3394 unwind.fp_used = 0;
3395 unwind.sp_restored = 0;
3399 /* Parse a handlerdata directive. Creates the exception handling table entry
3400 for the function. */
3402 static void
3403 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3405 demand_empty_rest_of_line ();
3406 if (!unwind.proc_start)
3407 as_bad (MISSING_FNSTART);
3409 if (unwind.table_entry)
3410 as_bad (_("duplicate .handlerdata directive"));
3412 create_unwind_entry (1);
3415 /* Parse an unwind_fnend directive. Generates the index table entry. */
3417 static void
3418 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3420 long where;
3421 char *ptr;
3422 valueT val;
3424 demand_empty_rest_of_line ();
3426 if (!unwind.proc_start)
3428 as_bad (_(".fnend directive without .fnstart"));
3429 return;
3432 /* Add eh table entry. */
3433 if (unwind.table_entry == NULL)
3434 val = create_unwind_entry (0);
3435 else
3436 val = 0;
3438 /* Add index table entry. This is two words. */
3439 start_unwind_section (unwind.saved_seg, 1);
3440 frag_align (2, 0, 0);
3441 record_alignment (now_seg, 2);
3443 ptr = frag_more (8);
3444 where = frag_now_fix () - 8;
3446 /* Self relative offset of the function start. */
3447 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3448 BFD_RELOC_ARM_PREL31);
3450 /* Indicate dependency on EHABI-defined personality routines to the
3451 linker, if it hasn't been done already. */
3452 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3453 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3455 static const char *const name[] =
3457 "__aeabi_unwind_cpp_pr0",
3458 "__aeabi_unwind_cpp_pr1",
3459 "__aeabi_unwind_cpp_pr2"
3461 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3462 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3463 marked_pr_dependency |= 1 << unwind.personality_index;
3464 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3465 = marked_pr_dependency;
3468 if (val)
3469 /* Inline exception table entry. */
3470 md_number_to_chars (ptr + 4, val, 4);
3471 else
3472 /* Self relative offset of the table entry. */
3473 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3474 BFD_RELOC_ARM_PREL31);
3476 /* Restore the original section. */
3477 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3479 unwind.proc_start = NULL;
3483 /* Parse an unwind_cantunwind directive. */
3485 static void
3486 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3488 demand_empty_rest_of_line ();
3489 if (!unwind.proc_start)
3490 as_bad (MISSING_FNSTART);
3492 if (unwind.personality_routine || unwind.personality_index != -1)
3493 as_bad (_("personality routine specified for cantunwind frame"));
3495 unwind.personality_index = -2;
3499 /* Parse a personalityindex directive. */
3501 static void
3502 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3504 expressionS exp;
3506 if (!unwind.proc_start)
3507 as_bad (MISSING_FNSTART);
3509 if (unwind.personality_routine || unwind.personality_index != -1)
3510 as_bad (_("duplicate .personalityindex directive"));
3512 expression (&exp);
3514 if (exp.X_op != O_constant
3515 || exp.X_add_number < 0 || exp.X_add_number > 15)
3517 as_bad (_("bad personality routine number"));
3518 ignore_rest_of_line ();
3519 return;
3522 unwind.personality_index = exp.X_add_number;
3524 demand_empty_rest_of_line ();
3528 /* Parse a personality directive. */
3530 static void
3531 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3533 char *name, *p, c;
3535 if (!unwind.proc_start)
3536 as_bad (MISSING_FNSTART);
3538 if (unwind.personality_routine || unwind.personality_index != -1)
3539 as_bad (_("duplicate .personality directive"));
3541 name = input_line_pointer;
3542 c = get_symbol_end ();
3543 p = input_line_pointer;
3544 unwind.personality_routine = symbol_find_or_make (name);
3545 *p = c;
3546 demand_empty_rest_of_line ();
3550 /* Parse a directive saving core registers. */
3552 static void
3553 s_arm_unwind_save_core (void)
3555 valueT op;
3556 long range;
3557 int n;
3559 range = parse_reg_list (&input_line_pointer);
3560 if (range == FAIL)
3562 as_bad (_("expected register list"));
3563 ignore_rest_of_line ();
3564 return;
3567 demand_empty_rest_of_line ();
3569 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3570 into .unwind_save {..., sp...}. We aren't bothered about the value of
3571 ip because it is clobbered by calls. */
3572 if (unwind.sp_restored && unwind.fp_reg == 12
3573 && (range & 0x3000) == 0x1000)
3575 unwind.opcode_count--;
3576 unwind.sp_restored = 0;
3577 range = (range | 0x2000) & ~0x1000;
3578 unwind.pending_offset = 0;
3581 /* Pop r4-r15. */
3582 if (range & 0xfff0)
3584 /* See if we can use the short opcodes. These pop a block of up to 8
3585 registers starting with r4, plus maybe r14. */
3586 for (n = 0; n < 8; n++)
3588 /* Break at the first non-saved register. */
3589 if ((range & (1 << (n + 4))) == 0)
3590 break;
3592 /* See if there are any other bits set. */
3593 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3595 /* Use the long form. */
3596 op = 0x8000 | ((range >> 4) & 0xfff);
3597 add_unwind_opcode (op, 2);
3599 else
3601 /* Use the short form. */
3602 if (range & 0x4000)
3603 op = 0xa8; /* Pop r14. */
3604 else
3605 op = 0xa0; /* Do not pop r14. */
3606 op |= (n - 1);
3607 add_unwind_opcode (op, 1);
3611 /* Pop r0-r3. */
3612 if (range & 0xf)
3614 op = 0xb100 | (range & 0xf);
3615 add_unwind_opcode (op, 2);
3618 /* Record the number of bytes pushed. */
3619 for (n = 0; n < 16; n++)
3621 if (range & (1 << n))
3622 unwind.frame_size += 4;
3627 /* Parse a directive saving FPA registers. */
3629 static void
3630 s_arm_unwind_save_fpa (int reg)
3632 expressionS exp;
3633 int num_regs;
3634 valueT op;
3636 /* Get Number of registers to transfer. */
3637 if (skip_past_comma (&input_line_pointer) != FAIL)
3638 expression (&exp);
3639 else
3640 exp.X_op = O_illegal;
3642 if (exp.X_op != O_constant)
3644 as_bad (_("expected , <constant>"));
3645 ignore_rest_of_line ();
3646 return;
3649 num_regs = exp.X_add_number;
3651 if (num_regs < 1 || num_regs > 4)
3653 as_bad (_("number of registers must be in the range [1:4]"));
3654 ignore_rest_of_line ();
3655 return;
3658 demand_empty_rest_of_line ();
3660 if (reg == 4)
3662 /* Short form. */
3663 op = 0xb4 | (num_regs - 1);
3664 add_unwind_opcode (op, 1);
3666 else
3668 /* Long form. */
3669 op = 0xc800 | (reg << 4) | (num_regs - 1);
3670 add_unwind_opcode (op, 2);
3672 unwind.frame_size += num_regs * 12;
3676 /* Parse a directive saving VFP registers for ARMv6 and above. */
3678 static void
3679 s_arm_unwind_save_vfp_armv6 (void)
3681 int count;
3682 unsigned int start;
3683 valueT op;
3684 int num_vfpv3_regs = 0;
3685 int num_regs_below_16;
3687 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3688 if (count == FAIL)
3690 as_bad (_("expected register list"));
3691 ignore_rest_of_line ();
3692 return;
3695 demand_empty_rest_of_line ();
3697 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3698 than FSTMX/FLDMX-style ones). */
3700 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3701 if (start >= 16)
3702 num_vfpv3_regs = count;
3703 else if (start + count > 16)
3704 num_vfpv3_regs = start + count - 16;
3706 if (num_vfpv3_regs > 0)
3708 int start_offset = start > 16 ? start - 16 : 0;
3709 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3710 add_unwind_opcode (op, 2);
3713 /* Generate opcode for registers numbered in the range 0 .. 15. */
3714 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3715 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3716 if (num_regs_below_16 > 0)
3718 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3719 add_unwind_opcode (op, 2);
3722 unwind.frame_size += count * 8;
3726 /* Parse a directive saving VFP registers for pre-ARMv6. */
3728 static void
3729 s_arm_unwind_save_vfp (void)
3731 int count;
3732 unsigned int reg;
3733 valueT op;
3735 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3736 if (count == FAIL)
3738 as_bad (_("expected register list"));
3739 ignore_rest_of_line ();
3740 return;
3743 demand_empty_rest_of_line ();
3745 if (reg == 8)
3747 /* Short form. */
3748 op = 0xb8 | (count - 1);
3749 add_unwind_opcode (op, 1);
3751 else
3753 /* Long form. */
3754 op = 0xb300 | (reg << 4) | (count - 1);
3755 add_unwind_opcode (op, 2);
3757 unwind.frame_size += count * 8 + 4;
3761 /* Parse a directive saving iWMMXt data registers. */
3763 static void
3764 s_arm_unwind_save_mmxwr (void)
3766 int reg;
3767 int hi_reg;
3768 int i;
3769 unsigned mask = 0;
3770 valueT op;
3772 if (*input_line_pointer == '{')
3773 input_line_pointer++;
3777 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3779 if (reg == FAIL)
3781 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3782 goto error;
3785 if (mask >> reg)
3786 as_tsktsk (_("register list not in ascending order"));
3787 mask |= 1 << reg;
3789 if (*input_line_pointer == '-')
3791 input_line_pointer++;
3792 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3793 if (hi_reg == FAIL)
3795 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3796 goto error;
3798 else if (reg >= hi_reg)
3800 as_bad (_("bad register range"));
3801 goto error;
3803 for (; reg < hi_reg; reg++)
3804 mask |= 1 << reg;
3807 while (skip_past_comma (&input_line_pointer) != FAIL);
3809 if (*input_line_pointer == '}')
3810 input_line_pointer++;
3812 demand_empty_rest_of_line ();
3814 /* Generate any deferred opcodes because we're going to be looking at
3815 the list. */
3816 flush_pending_unwind ();
3818 for (i = 0; i < 16; i++)
3820 if (mask & (1 << i))
3821 unwind.frame_size += 8;
3824 /* Attempt to combine with a previous opcode. We do this because gcc
3825 likes to output separate unwind directives for a single block of
3826 registers. */
3827 if (unwind.opcode_count > 0)
3829 i = unwind.opcodes[unwind.opcode_count - 1];
3830 if ((i & 0xf8) == 0xc0)
3832 i &= 7;
3833 /* Only merge if the blocks are contiguous. */
3834 if (i < 6)
3836 if ((mask & 0xfe00) == (1 << 9))
3838 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3839 unwind.opcode_count--;
3842 else if (i == 6 && unwind.opcode_count >= 2)
3844 i = unwind.opcodes[unwind.opcode_count - 2];
3845 reg = i >> 4;
3846 i &= 0xf;
3848 op = 0xffff << (reg - 1);
3849 if (reg > 0
3850 && ((mask & op) == (1u << (reg - 1))))
3852 op = (1 << (reg + i + 1)) - 1;
3853 op &= ~((1 << reg) - 1);
3854 mask |= op;
3855 unwind.opcode_count -= 2;
3861 hi_reg = 15;
3862 /* We want to generate opcodes in the order the registers have been
3863 saved, ie. descending order. */
3864 for (reg = 15; reg >= -1; reg--)
3866 /* Save registers in blocks. */
3867 if (reg < 0
3868 || !(mask & (1 << reg)))
3870 /* We found an unsaved reg. Generate opcodes to save the
3871 preceding block. */
3872 if (reg != hi_reg)
3874 if (reg == 9)
3876 /* Short form. */
3877 op = 0xc0 | (hi_reg - 10);
3878 add_unwind_opcode (op, 1);
3880 else
3882 /* Long form. */
3883 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3884 add_unwind_opcode (op, 2);
3887 hi_reg = reg - 1;
3891 return;
3892 error:
3893 ignore_rest_of_line ();
3896 static void
3897 s_arm_unwind_save_mmxwcg (void)
3899 int reg;
3900 int hi_reg;
3901 unsigned mask = 0;
3902 valueT op;
3904 if (*input_line_pointer == '{')
3905 input_line_pointer++;
3909 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3911 if (reg == FAIL)
3913 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3914 goto error;
3917 reg -= 8;
3918 if (mask >> reg)
3919 as_tsktsk (_("register list not in ascending order"));
3920 mask |= 1 << reg;
3922 if (*input_line_pointer == '-')
3924 input_line_pointer++;
3925 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3926 if (hi_reg == FAIL)
3928 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3929 goto error;
3931 else if (reg >= hi_reg)
3933 as_bad (_("bad register range"));
3934 goto error;
3936 for (; reg < hi_reg; reg++)
3937 mask |= 1 << reg;
3940 while (skip_past_comma (&input_line_pointer) != FAIL);
3942 if (*input_line_pointer == '}')
3943 input_line_pointer++;
3945 demand_empty_rest_of_line ();
3947 /* Generate any deferred opcodes because we're going to be looking at
3948 the list. */
3949 flush_pending_unwind ();
3951 for (reg = 0; reg < 16; reg++)
3953 if (mask & (1 << reg))
3954 unwind.frame_size += 4;
3956 op = 0xc700 | mask;
3957 add_unwind_opcode (op, 2);
3958 return;
3959 error:
3960 ignore_rest_of_line ();
3964 /* Parse an unwind_save directive.
3965 If the argument is non-zero, this is a .vsave directive. */
3967 static void
3968 s_arm_unwind_save (int arch_v6)
3970 char *peek;
3971 struct reg_entry *reg;
3972 bfd_boolean had_brace = FALSE;
3974 if (!unwind.proc_start)
3975 as_bad (MISSING_FNSTART);
3977 /* Figure out what sort of save we have. */
3978 peek = input_line_pointer;
3980 if (*peek == '{')
3982 had_brace = TRUE;
3983 peek++;
3986 reg = arm_reg_parse_multi (&peek);
3988 if (!reg)
3990 as_bad (_("register expected"));
3991 ignore_rest_of_line ();
3992 return;
3995 switch (reg->type)
3997 case REG_TYPE_FN:
3998 if (had_brace)
4000 as_bad (_("FPA .unwind_save does not take a register list"));
4001 ignore_rest_of_line ();
4002 return;
4004 input_line_pointer = peek;
4005 s_arm_unwind_save_fpa (reg->number);
4006 return;
4008 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4009 case REG_TYPE_VFD:
4010 if (arch_v6)
4011 s_arm_unwind_save_vfp_armv6 ();
4012 else
4013 s_arm_unwind_save_vfp ();
4014 return;
4015 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4016 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4018 default:
4019 as_bad (_(".unwind_save does not support this kind of register"));
4020 ignore_rest_of_line ();
4025 /* Parse an unwind_movsp directive. */
4027 static void
4028 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4030 int reg;
4031 valueT op;
4032 int offset;
4034 if (!unwind.proc_start)
4035 as_bad (MISSING_FNSTART);
4037 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4038 if (reg == FAIL)
4040 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4041 ignore_rest_of_line ();
4042 return;
4045 /* Optional constant. */
4046 if (skip_past_comma (&input_line_pointer) != FAIL)
4048 if (immediate_for_directive (&offset) == FAIL)
4049 return;
4051 else
4052 offset = 0;
4054 demand_empty_rest_of_line ();
4056 if (reg == REG_SP || reg == REG_PC)
4058 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4059 return;
4062 if (unwind.fp_reg != REG_SP)
4063 as_bad (_("unexpected .unwind_movsp directive"));
4065 /* Generate opcode to restore the value. */
4066 op = 0x90 | reg;
4067 add_unwind_opcode (op, 1);
4069 /* Record the information for later. */
4070 unwind.fp_reg = reg;
4071 unwind.fp_offset = unwind.frame_size - offset;
4072 unwind.sp_restored = 1;
4075 /* Parse an unwind_pad directive. */
4077 static void
4078 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4080 int offset;
4082 if (!unwind.proc_start)
4083 as_bad (MISSING_FNSTART);
4085 if (immediate_for_directive (&offset) == FAIL)
4086 return;
4088 if (offset & 3)
4090 as_bad (_("stack increment must be multiple of 4"));
4091 ignore_rest_of_line ();
4092 return;
4095 /* Don't generate any opcodes, just record the details for later. */
4096 unwind.frame_size += offset;
4097 unwind.pending_offset += offset;
4099 demand_empty_rest_of_line ();
4102 /* Parse an unwind_setfp directive. */
4104 static void
4105 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4107 int sp_reg;
4108 int fp_reg;
4109 int offset;
4111 if (!unwind.proc_start)
4112 as_bad (MISSING_FNSTART);
4114 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4115 if (skip_past_comma (&input_line_pointer) == FAIL)
4116 sp_reg = FAIL;
4117 else
4118 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4120 if (fp_reg == FAIL || sp_reg == FAIL)
4122 as_bad (_("expected <reg>, <reg>"));
4123 ignore_rest_of_line ();
4124 return;
4127 /* Optional constant. */
4128 if (skip_past_comma (&input_line_pointer) != FAIL)
4130 if (immediate_for_directive (&offset) == FAIL)
4131 return;
4133 else
4134 offset = 0;
4136 demand_empty_rest_of_line ();
4138 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4140 as_bad (_("register must be either sp or set by a previous"
4141 "unwind_movsp directive"));
4142 return;
4145 /* Don't generate any opcodes, just record the information for later. */
4146 unwind.fp_reg = fp_reg;
4147 unwind.fp_used = 1;
4148 if (sp_reg == REG_SP)
4149 unwind.fp_offset = unwind.frame_size - offset;
4150 else
4151 unwind.fp_offset -= offset;
4154 /* Parse an unwind_raw directive. */
4156 static void
4157 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4159 expressionS exp;
4160 /* This is an arbitrary limit. */
4161 unsigned char op[16];
4162 int count;
4164 if (!unwind.proc_start)
4165 as_bad (MISSING_FNSTART);
4167 expression (&exp);
4168 if (exp.X_op == O_constant
4169 && skip_past_comma (&input_line_pointer) != FAIL)
4171 unwind.frame_size += exp.X_add_number;
4172 expression (&exp);
4174 else
4175 exp.X_op = O_illegal;
4177 if (exp.X_op != O_constant)
4179 as_bad (_("expected <offset>, <opcode>"));
4180 ignore_rest_of_line ();
4181 return;
4184 count = 0;
4186 /* Parse the opcode. */
4187 for (;;)
4189 if (count >= 16)
4191 as_bad (_("unwind opcode too long"));
4192 ignore_rest_of_line ();
4194 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4196 as_bad (_("invalid unwind opcode"));
4197 ignore_rest_of_line ();
4198 return;
4200 op[count++] = exp.X_add_number;
4202 /* Parse the next byte. */
4203 if (skip_past_comma (&input_line_pointer) == FAIL)
4204 break;
4206 expression (&exp);
4209 /* Add the opcode bytes in reverse order. */
4210 while (count--)
4211 add_unwind_opcode (op[count], 1);
4213 demand_empty_rest_of_line ();
4217 /* Parse a .eabi_attribute directive. */
4219 static void
4220 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4222 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4224 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4225 attributes_set_explicitly[tag] = 1;
4227 #endif /* OBJ_ELF */
4229 static void s_arm_arch (int);
4230 static void s_arm_object_arch (int);
4231 static void s_arm_cpu (int);
4232 static void s_arm_fpu (int);
4234 #ifdef TE_PE
4236 static void
4237 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4239 expressionS exp;
4243 expression (&exp);
4244 if (exp.X_op == O_symbol)
4245 exp.X_op = O_secrel;
4247 emit_expr (&exp, 4);
4249 while (*input_line_pointer++ == ',');
4251 input_line_pointer--;
4252 demand_empty_rest_of_line ();
4254 #endif /* TE_PE */
4256 /* This table describes all the machine specific pseudo-ops the assembler
4257 has to support. The fields are:
4258 pseudo-op name without dot
4259 function to call to execute this pseudo-op
4260 Integer arg to pass to the function. */
4262 const pseudo_typeS md_pseudo_table[] =
4264 /* Never called because '.req' does not start a line. */
4265 { "req", s_req, 0 },
4266 /* Following two are likewise never called. */
4267 { "dn", s_dn, 0 },
4268 { "qn", s_qn, 0 },
4269 { "unreq", s_unreq, 0 },
4270 { "bss", s_bss, 0 },
4271 { "align", s_align, 0 },
4272 { "arm", s_arm, 0 },
4273 { "thumb", s_thumb, 0 },
4274 { "code", s_code, 0 },
4275 { "force_thumb", s_force_thumb, 0 },
4276 { "thumb_func", s_thumb_func, 0 },
4277 { "thumb_set", s_thumb_set, 0 },
4278 { "even", s_even, 0 },
4279 { "ltorg", s_ltorg, 0 },
4280 { "pool", s_ltorg, 0 },
4281 { "syntax", s_syntax, 0 },
4282 { "cpu", s_arm_cpu, 0 },
4283 { "arch", s_arm_arch, 0 },
4284 { "object_arch", s_arm_object_arch, 0 },
4285 { "fpu", s_arm_fpu, 0 },
4286 #ifdef OBJ_ELF
4287 { "word", s_arm_elf_cons, 4 },
4288 { "long", s_arm_elf_cons, 4 },
4289 { "inst.n", s_arm_elf_inst, 2 },
4290 { "inst.w", s_arm_elf_inst, 4 },
4291 { "inst", s_arm_elf_inst, 0 },
4292 { "rel31", s_arm_rel31, 0 },
4293 { "fnstart", s_arm_unwind_fnstart, 0 },
4294 { "fnend", s_arm_unwind_fnend, 0 },
4295 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4296 { "personality", s_arm_unwind_personality, 0 },
4297 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4298 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4299 { "save", s_arm_unwind_save, 0 },
4300 { "vsave", s_arm_unwind_save, 1 },
4301 { "movsp", s_arm_unwind_movsp, 0 },
4302 { "pad", s_arm_unwind_pad, 0 },
4303 { "setfp", s_arm_unwind_setfp, 0 },
4304 { "unwind_raw", s_arm_unwind_raw, 0 },
4305 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4306 #else
4307 { "word", cons, 4},
4309 /* These are used for dwarf. */
4310 {"2byte", cons, 2},
4311 {"4byte", cons, 4},
4312 {"8byte", cons, 8},
4313 /* These are used for dwarf2. */
4314 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4315 { "loc", dwarf2_directive_loc, 0 },
4316 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4317 #endif
4318 { "extend", float_cons, 'x' },
4319 { "ldouble", float_cons, 'x' },
4320 { "packed", float_cons, 'p' },
4321 #ifdef TE_PE
4322 {"secrel32", pe_directive_secrel, 0},
4323 #endif
4324 { 0, 0, 0 }
4327 /* Parser functions used exclusively in instruction operands. */
4329 /* Generic immediate-value read function for use in insn parsing.
4330 STR points to the beginning of the immediate (the leading #);
4331 VAL receives the value; if the value is outside [MIN, MAX]
4332 issue an error. PREFIX_OPT is true if the immediate prefix is
4333 optional. */
4335 static int
4336 parse_immediate (char **str, int *val, int min, int max,
4337 bfd_boolean prefix_opt)
4339 expressionS exp;
4340 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4341 if (exp.X_op != O_constant)
4343 inst.error = _("constant expression required");
4344 return FAIL;
4347 if (exp.X_add_number < min || exp.X_add_number > max)
4349 inst.error = _("immediate value out of range");
4350 return FAIL;
4353 *val = exp.X_add_number;
4354 return SUCCESS;
4357 /* Less-generic immediate-value read function with the possibility of loading a
4358 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4359 instructions. Puts the result directly in inst.operands[i]. */
4361 static int
4362 parse_big_immediate (char **str, int i)
4364 expressionS exp;
4365 char *ptr = *str;
4367 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4369 if (exp.X_op == O_constant)
4371 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4372 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4373 O_constant. We have to be careful not to break compilation for
4374 32-bit X_add_number, though. */
4375 if ((exp.X_add_number & ~0xffffffffl) != 0)
4377 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4378 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4379 inst.operands[i].regisimm = 1;
4382 else if (exp.X_op == O_big
4383 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4384 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4386 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4387 /* Bignums have their least significant bits in
4388 generic_bignum[0]. Make sure we put 32 bits in imm and
4389 32 bits in reg, in a (hopefully) portable way. */
4390 gas_assert (parts != 0);
4391 inst.operands[i].imm = 0;
4392 for (j = 0; j < parts; j++, idx++)
4393 inst.operands[i].imm |= generic_bignum[idx]
4394 << (LITTLENUM_NUMBER_OF_BITS * j);
4395 inst.operands[i].reg = 0;
4396 for (j = 0; j < parts; j++, idx++)
4397 inst.operands[i].reg |= generic_bignum[idx]
4398 << (LITTLENUM_NUMBER_OF_BITS * j);
4399 inst.operands[i].regisimm = 1;
4401 else
4402 return FAIL;
4404 *str = ptr;
4406 return SUCCESS;
4409 /* Returns the pseudo-register number of an FPA immediate constant,
4410 or FAIL if there isn't a valid constant here. */
4412 static int
4413 parse_fpa_immediate (char ** str)
4415 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4416 char * save_in;
4417 expressionS exp;
4418 int i;
4419 int j;
4421 /* First try and match exact strings, this is to guarantee
4422 that some formats will work even for cross assembly. */
4424 for (i = 0; fp_const[i]; i++)
4426 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4428 char *start = *str;
4430 *str += strlen (fp_const[i]);
4431 if (is_end_of_line[(unsigned char) **str])
4432 return i + 8;
4433 *str = start;
4437 /* Just because we didn't get a match doesn't mean that the constant
4438 isn't valid, just that it is in a format that we don't
4439 automatically recognize. Try parsing it with the standard
4440 expression routines. */
4442 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4444 /* Look for a raw floating point number. */
4445 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4446 && is_end_of_line[(unsigned char) *save_in])
4448 for (i = 0; i < NUM_FLOAT_VALS; i++)
4450 for (j = 0; j < MAX_LITTLENUMS; j++)
4452 if (words[j] != fp_values[i][j])
4453 break;
4456 if (j == MAX_LITTLENUMS)
4458 *str = save_in;
4459 return i + 8;
4464 /* Try and parse a more complex expression, this will probably fail
4465 unless the code uses a floating point prefix (eg "0f"). */
4466 save_in = input_line_pointer;
4467 input_line_pointer = *str;
4468 if (expression (&exp) == absolute_section
4469 && exp.X_op == O_big
4470 && exp.X_add_number < 0)
4472 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4473 Ditto for 15. */
4474 if (gen_to_words (words, 5, (long) 15) == 0)
4476 for (i = 0; i < NUM_FLOAT_VALS; i++)
4478 for (j = 0; j < MAX_LITTLENUMS; j++)
4480 if (words[j] != fp_values[i][j])
4481 break;
4484 if (j == MAX_LITTLENUMS)
4486 *str = input_line_pointer;
4487 input_line_pointer = save_in;
4488 return i + 8;
4494 *str = input_line_pointer;
4495 input_line_pointer = save_in;
4496 inst.error = _("invalid FPA immediate expression");
4497 return FAIL;
4500 /* Returns 1 if a number has "quarter-precision" float format
4501 0baBbbbbbc defgh000 00000000 00000000. */
4503 static int
4504 is_quarter_float (unsigned imm)
4506 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4507 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4510 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4511 0baBbbbbbc defgh000 00000000 00000000.
4512 The zero and minus-zero cases need special handling, since they can't be
4513 encoded in the "quarter-precision" float format, but can nonetheless be
4514 loaded as integer constants. */
4516 static unsigned
4517 parse_qfloat_immediate (char **ccp, int *immed)
4519 char *str = *ccp;
4520 char *fpnum;
4521 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4522 int found_fpchar = 0;
4524 skip_past_char (&str, '#');
4526 /* We must not accidentally parse an integer as a floating-point number. Make
4527 sure that the value we parse is not an integer by checking for special
4528 characters '.' or 'e'.
4529 FIXME: This is a horrible hack, but doing better is tricky because type
4530 information isn't in a very usable state at parse time. */
4531 fpnum = str;
4532 skip_whitespace (fpnum);
4534 if (strncmp (fpnum, "0x", 2) == 0)
4535 return FAIL;
4536 else
4538 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4539 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4541 found_fpchar = 1;
4542 break;
4545 if (!found_fpchar)
4546 return FAIL;
4549 if ((str = atof_ieee (str, 's', words)) != NULL)
4551 unsigned fpword = 0;
4552 int i;
4554 /* Our FP word must be 32 bits (single-precision FP). */
4555 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4557 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4558 fpword |= words[i];
4561 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4562 *immed = fpword;
4563 else
4564 return FAIL;
4566 *ccp = str;
4568 return SUCCESS;
4571 return FAIL;
4574 /* Shift operands. */
4575 enum shift_kind
4577 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4580 struct asm_shift_name
4582 const char *name;
4583 enum shift_kind kind;
4586 /* Third argument to parse_shift. */
4587 enum parse_shift_mode
4589 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4590 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4591 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4592 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4593 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4596 /* Parse a <shift> specifier on an ARM data processing instruction.
4597 This has three forms:
4599 (LSL|LSR|ASL|ASR|ROR) Rs
4600 (LSL|LSR|ASL|ASR|ROR) #imm
4603 Note that ASL is assimilated to LSL in the instruction encoding, and
4604 RRX to ROR #0 (which cannot be written as such). */
4606 static int
4607 parse_shift (char **str, int i, enum parse_shift_mode mode)
4609 const struct asm_shift_name *shift_name;
4610 enum shift_kind shift;
4611 char *s = *str;
4612 char *p = s;
4613 int reg;
4615 for (p = *str; ISALPHA (*p); p++)
4618 if (p == *str)
4620 inst.error = _("shift expression expected");
4621 return FAIL;
4624 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4626 if (shift_name == NULL)
4628 inst.error = _("shift expression expected");
4629 return FAIL;
4632 shift = shift_name->kind;
4634 switch (mode)
4636 case NO_SHIFT_RESTRICT:
4637 case SHIFT_IMMEDIATE: break;
4639 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4640 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4642 inst.error = _("'LSL' or 'ASR' required");
4643 return FAIL;
4645 break;
4647 case SHIFT_LSL_IMMEDIATE:
4648 if (shift != SHIFT_LSL)
4650 inst.error = _("'LSL' required");
4651 return FAIL;
4653 break;
4655 case SHIFT_ASR_IMMEDIATE:
4656 if (shift != SHIFT_ASR)
4658 inst.error = _("'ASR' required");
4659 return FAIL;
4661 break;
4663 default: abort ();
4666 if (shift != SHIFT_RRX)
4668 /* Whitespace can appear here if the next thing is a bare digit. */
4669 skip_whitespace (p);
4671 if (mode == NO_SHIFT_RESTRICT
4672 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4674 inst.operands[i].imm = reg;
4675 inst.operands[i].immisreg = 1;
4677 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4678 return FAIL;
4680 inst.operands[i].shift_kind = shift;
4681 inst.operands[i].shifted = 1;
4682 *str = p;
4683 return SUCCESS;
4686 /* Parse a <shifter_operand> for an ARM data processing instruction:
4688 #<immediate>
4689 #<immediate>, <rotate>
4690 <Rm>
4691 <Rm>, <shift>
4693 where <shift> is defined by parse_shift above, and <rotate> is a
4694 multiple of 2 between 0 and 30. Validation of immediate operands
4695 is deferred to md_apply_fix. */
4697 static int
4698 parse_shifter_operand (char **str, int i)
4700 int value;
4701 expressionS expr;
4703 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4705 inst.operands[i].reg = value;
4706 inst.operands[i].isreg = 1;
4708 /* parse_shift will override this if appropriate */
4709 inst.reloc.exp.X_op = O_constant;
4710 inst.reloc.exp.X_add_number = 0;
4712 if (skip_past_comma (str) == FAIL)
4713 return SUCCESS;
4715 /* Shift operation on register. */
4716 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4719 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4720 return FAIL;
4722 if (skip_past_comma (str) == SUCCESS)
4724 /* #x, y -- ie explicit rotation by Y. */
4725 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4726 return FAIL;
4728 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4730 inst.error = _("constant expression expected");
4731 return FAIL;
4734 value = expr.X_add_number;
4735 if (value < 0 || value > 30 || value % 2 != 0)
4737 inst.error = _("invalid rotation");
4738 return FAIL;
4740 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4742 inst.error = _("invalid constant");
4743 return FAIL;
4746 /* Convert to decoded value. md_apply_fix will put it back. */
4747 inst.reloc.exp.X_add_number
4748 = (((inst.reloc.exp.X_add_number << (32 - value))
4749 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4752 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4753 inst.reloc.pc_rel = 0;
4754 return SUCCESS;
4757 /* Group relocation information. Each entry in the table contains the
4758 textual name of the relocation as may appear in assembler source
4759 and must end with a colon.
4760 Along with this textual name are the relocation codes to be used if
4761 the corresponding instruction is an ALU instruction (ADD or SUB only),
4762 an LDR, an LDRS, or an LDC. */
4764 struct group_reloc_table_entry
4766 const char *name;
4767 int alu_code;
4768 int ldr_code;
4769 int ldrs_code;
4770 int ldc_code;
4773 typedef enum
4775 /* Varieties of non-ALU group relocation. */
4777 GROUP_LDR,
4778 GROUP_LDRS,
4779 GROUP_LDC
4780 } group_reloc_type;
4782 static struct group_reloc_table_entry group_reloc_table[] =
4783 { /* Program counter relative: */
4784 { "pc_g0_nc",
4785 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4786 0, /* LDR */
4787 0, /* LDRS */
4788 0 }, /* LDC */
4789 { "pc_g0",
4790 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4791 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4792 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4793 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4794 { "pc_g1_nc",
4795 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4796 0, /* LDR */
4797 0, /* LDRS */
4798 0 }, /* LDC */
4799 { "pc_g1",
4800 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4801 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4802 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4803 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4804 { "pc_g2",
4805 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4806 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4807 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4808 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4809 /* Section base relative */
4810 { "sb_g0_nc",
4811 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4812 0, /* LDR */
4813 0, /* LDRS */
4814 0 }, /* LDC */
4815 { "sb_g0",
4816 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4817 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4818 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4819 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4820 { "sb_g1_nc",
4821 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4822 0, /* LDR */
4823 0, /* LDRS */
4824 0 }, /* LDC */
4825 { "sb_g1",
4826 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4827 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4828 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4829 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4830 { "sb_g2",
4831 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4832 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4833 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4834 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4836 /* Given the address of a pointer pointing to the textual name of a group
4837 relocation as may appear in assembler source, attempt to find its details
4838 in group_reloc_table. The pointer will be updated to the character after
4839 the trailing colon. On failure, FAIL will be returned; SUCCESS
4840 otherwise. On success, *entry will be updated to point at the relevant
4841 group_reloc_table entry. */
4843 static int
4844 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4846 unsigned int i;
4847 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4849 int length = strlen (group_reloc_table[i].name);
4851 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4852 && (*str)[length] == ':')
4854 *out = &group_reloc_table[i];
4855 *str += (length + 1);
4856 return SUCCESS;
4860 return FAIL;
4863 /* Parse a <shifter_operand> for an ARM data processing instruction
4864 (as for parse_shifter_operand) where group relocations are allowed:
4866 #<immediate>
4867 #<immediate>, <rotate>
4868 #:<group_reloc>:<expression>
4869 <Rm>
4870 <Rm>, <shift>
4872 where <group_reloc> is one of the strings defined in group_reloc_table.
4873 The hashes are optional.
4875 Everything else is as for parse_shifter_operand. */
4877 static parse_operand_result
4878 parse_shifter_operand_group_reloc (char **str, int i)
4880 /* Determine if we have the sequence of characters #: or just :
4881 coming next. If we do, then we check for a group relocation.
4882 If we don't, punt the whole lot to parse_shifter_operand. */
4884 if (((*str)[0] == '#' && (*str)[1] == ':')
4885 || (*str)[0] == ':')
4887 struct group_reloc_table_entry *entry;
4889 if ((*str)[0] == '#')
4890 (*str) += 2;
4891 else
4892 (*str)++;
4894 /* Try to parse a group relocation. Anything else is an error. */
4895 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4897 inst.error = _("unknown group relocation");
4898 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4901 /* We now have the group relocation table entry corresponding to
4902 the name in the assembler source. Next, we parse the expression. */
4903 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4904 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4906 /* Record the relocation type (always the ALU variant here). */
4907 inst.reloc.type = entry->alu_code;
4908 gas_assert (inst.reloc.type != 0);
4910 return PARSE_OPERAND_SUCCESS;
4912 else
4913 return parse_shifter_operand (str, i) == SUCCESS
4914 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4916 /* Never reached. */
4919 /* Parse all forms of an ARM address expression. Information is written
4920 to inst.operands[i] and/or inst.reloc.
4922 Preindexed addressing (.preind=1):
4924 [Rn, #offset] .reg=Rn .reloc.exp=offset
4925 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4926 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4927 .shift_kind=shift .reloc.exp=shift_imm
4929 These three may have a trailing ! which causes .writeback to be set also.
4931 Postindexed addressing (.postind=1, .writeback=1):
4933 [Rn], #offset .reg=Rn .reloc.exp=offset
4934 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4935 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4936 .shift_kind=shift .reloc.exp=shift_imm
4938 Unindexed addressing (.preind=0, .postind=0):
4940 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4942 Other:
4944 [Rn]{!} shorthand for [Rn,#0]{!}
4945 =immediate .isreg=0 .reloc.exp=immediate
4946 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4948 It is the caller's responsibility to check for addressing modes not
4949 supported by the instruction, and to set inst.reloc.type. */
4951 static parse_operand_result
4952 parse_address_main (char **str, int i, int group_relocations,
4953 group_reloc_type group_type)
4955 char *p = *str;
4956 int reg;
4958 if (skip_past_char (&p, '[') == FAIL)
4960 if (skip_past_char (&p, '=') == FAIL)
4962 /* bare address - translate to PC-relative offset */
4963 inst.reloc.pc_rel = 1;
4964 inst.operands[i].reg = REG_PC;
4965 inst.operands[i].isreg = 1;
4966 inst.operands[i].preind = 1;
4968 /* else a load-constant pseudo op, no special treatment needed here */
4970 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4971 return PARSE_OPERAND_FAIL;
4973 *str = p;
4974 return PARSE_OPERAND_SUCCESS;
4977 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4979 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4980 return PARSE_OPERAND_FAIL;
4982 inst.operands[i].reg = reg;
4983 inst.operands[i].isreg = 1;
4985 if (skip_past_comma (&p) == SUCCESS)
4987 inst.operands[i].preind = 1;
4989 if (*p == '+') p++;
4990 else if (*p == '-') p++, inst.operands[i].negative = 1;
4992 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4994 inst.operands[i].imm = reg;
4995 inst.operands[i].immisreg = 1;
4997 if (skip_past_comma (&p) == SUCCESS)
4998 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4999 return PARSE_OPERAND_FAIL;
5001 else if (skip_past_char (&p, ':') == SUCCESS)
5003 /* FIXME: '@' should be used here, but it's filtered out by generic
5004 code before we get to see it here. This may be subject to
5005 change. */
5006 expressionS exp;
5007 my_get_expression (&exp, &p, GE_NO_PREFIX);
5008 if (exp.X_op != O_constant)
5010 inst.error = _("alignment must be constant");
5011 return PARSE_OPERAND_FAIL;
5013 inst.operands[i].imm = exp.X_add_number << 8;
5014 inst.operands[i].immisalign = 1;
5015 /* Alignments are not pre-indexes. */
5016 inst.operands[i].preind = 0;
5018 else
5020 if (inst.operands[i].negative)
5022 inst.operands[i].negative = 0;
5023 p--;
5026 if (group_relocations
5027 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5029 struct group_reloc_table_entry *entry;
5031 /* Skip over the #: or : sequence. */
5032 if (*p == '#')
5033 p += 2;
5034 else
5035 p++;
5037 /* Try to parse a group relocation. Anything else is an
5038 error. */
5039 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5041 inst.error = _("unknown group relocation");
5042 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5045 /* We now have the group relocation table entry corresponding to
5046 the name in the assembler source. Next, we parse the
5047 expression. */
5048 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5049 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5051 /* Record the relocation type. */
5052 switch (group_type)
5054 case GROUP_LDR:
5055 inst.reloc.type = entry->ldr_code;
5056 break;
5058 case GROUP_LDRS:
5059 inst.reloc.type = entry->ldrs_code;
5060 break;
5062 case GROUP_LDC:
5063 inst.reloc.type = entry->ldc_code;
5064 break;
5066 default:
5067 gas_assert (0);
5070 if (inst.reloc.type == 0)
5072 inst.error = _("this group relocation is not allowed on this instruction");
5073 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5076 else
5077 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5078 return PARSE_OPERAND_FAIL;
5082 if (skip_past_char (&p, ']') == FAIL)
5084 inst.error = _("']' expected");
5085 return PARSE_OPERAND_FAIL;
5088 if (skip_past_char (&p, '!') == SUCCESS)
5089 inst.operands[i].writeback = 1;
5091 else if (skip_past_comma (&p) == SUCCESS)
5093 if (skip_past_char (&p, '{') == SUCCESS)
5095 /* [Rn], {expr} - unindexed, with option */
5096 if (parse_immediate (&p, &inst.operands[i].imm,
5097 0, 255, TRUE) == FAIL)
5098 return PARSE_OPERAND_FAIL;
5100 if (skip_past_char (&p, '}') == FAIL)
5102 inst.error = _("'}' expected at end of 'option' field");
5103 return PARSE_OPERAND_FAIL;
5105 if (inst.operands[i].preind)
5107 inst.error = _("cannot combine index with option");
5108 return PARSE_OPERAND_FAIL;
5110 *str = p;
5111 return PARSE_OPERAND_SUCCESS;
5113 else
5115 inst.operands[i].postind = 1;
5116 inst.operands[i].writeback = 1;
5118 if (inst.operands[i].preind)
5120 inst.error = _("cannot combine pre- and post-indexing");
5121 return PARSE_OPERAND_FAIL;
5124 if (*p == '+') p++;
5125 else if (*p == '-') p++, inst.operands[i].negative = 1;
5127 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5129 /* We might be using the immediate for alignment already. If we
5130 are, OR the register number into the low-order bits. */
5131 if (inst.operands[i].immisalign)
5132 inst.operands[i].imm |= reg;
5133 else
5134 inst.operands[i].imm = reg;
5135 inst.operands[i].immisreg = 1;
5137 if (skip_past_comma (&p) == SUCCESS)
5138 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5139 return PARSE_OPERAND_FAIL;
5141 else
5143 if (inst.operands[i].negative)
5145 inst.operands[i].negative = 0;
5146 p--;
5148 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5149 return PARSE_OPERAND_FAIL;
5154 /* If at this point neither .preind nor .postind is set, we have a
5155 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5156 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5158 inst.operands[i].preind = 1;
5159 inst.reloc.exp.X_op = O_constant;
5160 inst.reloc.exp.X_add_number = 0;
5162 *str = p;
5163 return PARSE_OPERAND_SUCCESS;
5166 static int
5167 parse_address (char **str, int i)
5169 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
5170 ? SUCCESS : FAIL;
5173 static parse_operand_result
5174 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5176 return parse_address_main (str, i, 1, type);
5179 /* Parse an operand for a MOVW or MOVT instruction. */
5180 static int
5181 parse_half (char **str)
5183 char * p;
5185 p = *str;
5186 skip_past_char (&p, '#');
5187 if (strncasecmp (p, ":lower16:", 9) == 0)
5188 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5189 else if (strncasecmp (p, ":upper16:", 9) == 0)
5190 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5192 if (inst.reloc.type != BFD_RELOC_UNUSED)
5194 p += 9;
5195 skip_whitespace (p);
5198 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5199 return FAIL;
5201 if (inst.reloc.type == BFD_RELOC_UNUSED)
5203 if (inst.reloc.exp.X_op != O_constant)
5205 inst.error = _("constant expression expected");
5206 return FAIL;
5208 if (inst.reloc.exp.X_add_number < 0
5209 || inst.reloc.exp.X_add_number > 0xffff)
5211 inst.error = _("immediate value out of range");
5212 return FAIL;
5215 *str = p;
5216 return SUCCESS;
5219 /* Miscellaneous. */
5221 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5222 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5223 static int
5224 parse_psr (char **str)
5226 char *p;
5227 unsigned long psr_field;
5228 const struct asm_psr *psr;
5229 char *start;
5231 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5232 feature for ease of use and backwards compatibility. */
5233 p = *str;
5234 if (strncasecmp (p, "SPSR", 4) == 0)
5235 psr_field = SPSR_BIT;
5236 else if (strncasecmp (p, "CPSR", 4) == 0)
5237 psr_field = 0;
5238 else
5240 start = p;
5242 p++;
5243 while (ISALNUM (*p) || *p == '_');
5245 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
5246 if (!psr)
5247 return FAIL;
5249 *str = p;
5250 return psr->field;
5253 p += 4;
5254 if (*p == '_')
5256 /* A suffix follows. */
5257 p++;
5258 start = p;
5261 p++;
5262 while (ISALNUM (*p) || *p == '_');
5264 psr = hash_find_n (arm_psr_hsh, start, p - start);
5265 if (!psr)
5266 goto error;
5268 psr_field |= psr->field;
5270 else
5272 if (ISALNUM (*p))
5273 goto error; /* Garbage after "[CS]PSR". */
5275 psr_field |= (PSR_c | PSR_f);
5277 *str = p;
5278 return psr_field;
5280 error:
5281 inst.error = _("flag for {c}psr instruction expected");
5282 return FAIL;
5285 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5286 value suitable for splatting into the AIF field of the instruction. */
5288 static int
5289 parse_cps_flags (char **str)
5291 int val = 0;
5292 int saw_a_flag = 0;
5293 char *s = *str;
5295 for (;;)
5296 switch (*s++)
5298 case '\0': case ',':
5299 goto done;
5301 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5302 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5303 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5305 default:
5306 inst.error = _("unrecognized CPS flag");
5307 return FAIL;
5310 done:
5311 if (saw_a_flag == 0)
5313 inst.error = _("missing CPS flags");
5314 return FAIL;
5317 *str = s - 1;
5318 return val;
5321 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5322 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5324 static int
5325 parse_endian_specifier (char **str)
5327 int little_endian;
5328 char *s = *str;
5330 if (strncasecmp (s, "BE", 2))
5331 little_endian = 0;
5332 else if (strncasecmp (s, "LE", 2))
5333 little_endian = 1;
5334 else
5336 inst.error = _("valid endian specifiers are be or le");
5337 return FAIL;
5340 if (ISALNUM (s[2]) || s[2] == '_')
5342 inst.error = _("valid endian specifiers are be or le");
5343 return FAIL;
5346 *str = s + 2;
5347 return little_endian;
5350 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5351 value suitable for poking into the rotate field of an sxt or sxta
5352 instruction, or FAIL on error. */
5354 static int
5355 parse_ror (char **str)
5357 int rot;
5358 char *s = *str;
5360 if (strncasecmp (s, "ROR", 3) == 0)
5361 s += 3;
5362 else
5364 inst.error = _("missing rotation field after comma");
5365 return FAIL;
5368 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5369 return FAIL;
5371 switch (rot)
5373 case 0: *str = s; return 0x0;
5374 case 8: *str = s; return 0x1;
5375 case 16: *str = s; return 0x2;
5376 case 24: *str = s; return 0x3;
5378 default:
5379 inst.error = _("rotation can only be 0, 8, 16, or 24");
5380 return FAIL;
5384 /* Parse a conditional code (from conds[] below). The value returned is in the
5385 range 0 .. 14, or FAIL. */
5386 static int
5387 parse_cond (char **str)
5389 char *q;
5390 const struct asm_cond *c;
5391 int n;
5392 /* Condition codes are always 2 characters, so matching up to
5393 3 characters is sufficient. */
5394 char cond[3];
5396 q = *str;
5397 n = 0;
5398 while (ISALPHA (*q) && n < 3)
5400 cond[n] = TOLOWER (*q);
5401 q++;
5402 n++;
5405 c = hash_find_n (arm_cond_hsh, cond, n);
5406 if (!c)
5408 inst.error = _("condition required");
5409 return FAIL;
5412 *str = q;
5413 return c->value;
5416 /* Parse an option for a barrier instruction. Returns the encoding for the
5417 option, or FAIL. */
5418 static int
5419 parse_barrier (char **str)
5421 char *p, *q;
5422 const struct asm_barrier_opt *o;
5424 p = q = *str;
5425 while (ISALPHA (*q))
5426 q++;
5428 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5429 if (!o)
5430 return FAIL;
5432 *str = q;
5433 return o->value;
5436 /* Parse the operands of a table branch instruction. Similar to a memory
5437 operand. */
5438 static int
5439 parse_tb (char **str)
5441 char * p = *str;
5442 int reg;
5444 if (skip_past_char (&p, '[') == FAIL)
5446 inst.error = _("'[' expected");
5447 return FAIL;
5450 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5452 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5453 return FAIL;
5455 inst.operands[0].reg = reg;
5457 if (skip_past_comma (&p) == FAIL)
5459 inst.error = _("',' expected");
5460 return FAIL;
5463 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5465 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5466 return FAIL;
5468 inst.operands[0].imm = reg;
5470 if (skip_past_comma (&p) == SUCCESS)
5472 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5473 return FAIL;
5474 if (inst.reloc.exp.X_add_number != 1)
5476 inst.error = _("invalid shift");
5477 return FAIL;
5479 inst.operands[0].shifted = 1;
5482 if (skip_past_char (&p, ']') == FAIL)
5484 inst.error = _("']' expected");
5485 return FAIL;
5487 *str = p;
5488 return SUCCESS;
5491 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5492 information on the types the operands can take and how they are encoded.
5493 Up to four operands may be read; this function handles setting the
5494 ".present" field for each read operand itself.
5495 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5496 else returns FAIL. */
5498 static int
5499 parse_neon_mov (char **str, int *which_operand)
5501 int i = *which_operand, val;
5502 enum arm_reg_type rtype;
5503 char *ptr = *str;
5504 struct neon_type_el optype;
5506 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5508 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5509 inst.operands[i].reg = val;
5510 inst.operands[i].isscalar = 1;
5511 inst.operands[i].vectype = optype;
5512 inst.operands[i++].present = 1;
5514 if (skip_past_comma (&ptr) == FAIL)
5515 goto wanted_comma;
5517 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5518 goto wanted_arm;
5520 inst.operands[i].reg = val;
5521 inst.operands[i].isreg = 1;
5522 inst.operands[i].present = 1;
5524 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5525 != FAIL)
5527 /* Cases 0, 1, 2, 3, 5 (D only). */
5528 if (skip_past_comma (&ptr) == FAIL)
5529 goto wanted_comma;
5531 inst.operands[i].reg = val;
5532 inst.operands[i].isreg = 1;
5533 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5534 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5535 inst.operands[i].isvec = 1;
5536 inst.operands[i].vectype = optype;
5537 inst.operands[i++].present = 1;
5539 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5541 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5542 Case 13: VMOV <Sd>, <Rm> */
5543 inst.operands[i].reg = val;
5544 inst.operands[i].isreg = 1;
5545 inst.operands[i].present = 1;
5547 if (rtype == REG_TYPE_NQ)
5549 first_error (_("can't use Neon quad register here"));
5550 return FAIL;
5552 else if (rtype != REG_TYPE_VFS)
5554 i++;
5555 if (skip_past_comma (&ptr) == FAIL)
5556 goto wanted_comma;
5557 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5558 goto wanted_arm;
5559 inst.operands[i].reg = val;
5560 inst.operands[i].isreg = 1;
5561 inst.operands[i].present = 1;
5564 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5565 &optype)) != FAIL)
5567 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5568 Case 1: VMOV<c><q> <Dd>, <Dm>
5569 Case 8: VMOV.F32 <Sd>, <Sm>
5570 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5572 inst.operands[i].reg = val;
5573 inst.operands[i].isreg = 1;
5574 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5575 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5576 inst.operands[i].isvec = 1;
5577 inst.operands[i].vectype = optype;
5578 inst.operands[i].present = 1;
5580 if (skip_past_comma (&ptr) == SUCCESS)
5582 /* Case 15. */
5583 i++;
5585 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5586 goto wanted_arm;
5588 inst.operands[i].reg = val;
5589 inst.operands[i].isreg = 1;
5590 inst.operands[i++].present = 1;
5592 if (skip_past_comma (&ptr) == FAIL)
5593 goto wanted_comma;
5595 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5596 goto wanted_arm;
5598 inst.operands[i].reg = val;
5599 inst.operands[i].isreg = 1;
5600 inst.operands[i++].present = 1;
5603 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5604 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5605 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5606 Case 10: VMOV.F32 <Sd>, #<imm>
5607 Case 11: VMOV.F64 <Dd>, #<imm> */
5608 inst.operands[i].immisfloat = 1;
5609 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5610 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5611 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5613 else
5615 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5616 return FAIL;
5619 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5621 /* Cases 6, 7. */
5622 inst.operands[i].reg = val;
5623 inst.operands[i].isreg = 1;
5624 inst.operands[i++].present = 1;
5626 if (skip_past_comma (&ptr) == FAIL)
5627 goto wanted_comma;
5629 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5631 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5632 inst.operands[i].reg = val;
5633 inst.operands[i].isscalar = 1;
5634 inst.operands[i].present = 1;
5635 inst.operands[i].vectype = optype;
5637 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5639 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5640 inst.operands[i].reg = val;
5641 inst.operands[i].isreg = 1;
5642 inst.operands[i++].present = 1;
5644 if (skip_past_comma (&ptr) == FAIL)
5645 goto wanted_comma;
5647 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5648 == FAIL)
5650 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5651 return FAIL;
5654 inst.operands[i].reg = val;
5655 inst.operands[i].isreg = 1;
5656 inst.operands[i].isvec = 1;
5657 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5658 inst.operands[i].vectype = optype;
5659 inst.operands[i].present = 1;
5661 if (rtype == REG_TYPE_VFS)
5663 /* Case 14. */
5664 i++;
5665 if (skip_past_comma (&ptr) == FAIL)
5666 goto wanted_comma;
5667 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5668 &optype)) == FAIL)
5670 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5671 return FAIL;
5673 inst.operands[i].reg = val;
5674 inst.operands[i].isreg = 1;
5675 inst.operands[i].isvec = 1;
5676 inst.operands[i].issingle = 1;
5677 inst.operands[i].vectype = optype;
5678 inst.operands[i].present = 1;
5681 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5682 != FAIL)
5684 /* Case 13. */
5685 inst.operands[i].reg = val;
5686 inst.operands[i].isreg = 1;
5687 inst.operands[i].isvec = 1;
5688 inst.operands[i].issingle = 1;
5689 inst.operands[i].vectype = optype;
5690 inst.operands[i++].present = 1;
5693 else
5695 first_error (_("parse error"));
5696 return FAIL;
5699 /* Successfully parsed the operands. Update args. */
5700 *which_operand = i;
5701 *str = ptr;
5702 return SUCCESS;
5704 wanted_comma:
5705 first_error (_("expected comma"));
5706 return FAIL;
5708 wanted_arm:
5709 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5710 return FAIL;
5713 /* Matcher codes for parse_operands. */
5714 enum operand_parse_code
5716 OP_stop, /* end of line */
5718 OP_RR, /* ARM register */
5719 OP_RRnpc, /* ARM register, not r15 */
5720 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5721 OP_RRw, /* ARM register, not r15, optional trailing ! */
5722 OP_RCP, /* Coprocessor number */
5723 OP_RCN, /* Coprocessor register */
5724 OP_RF, /* FPA register */
5725 OP_RVS, /* VFP single precision register */
5726 OP_RVD, /* VFP double precision register (0..15) */
5727 OP_RND, /* Neon double precision register (0..31) */
5728 OP_RNQ, /* Neon quad precision register */
5729 OP_RVSD, /* VFP single or double precision register */
5730 OP_RNDQ, /* Neon double or quad precision register */
5731 OP_RNSDQ, /* Neon single, double or quad precision register */
5732 OP_RNSC, /* Neon scalar D[X] */
5733 OP_RVC, /* VFP control register */
5734 OP_RMF, /* Maverick F register */
5735 OP_RMD, /* Maverick D register */
5736 OP_RMFX, /* Maverick FX register */
5737 OP_RMDX, /* Maverick DX register */
5738 OP_RMAX, /* Maverick AX register */
5739 OP_RMDS, /* Maverick DSPSC register */
5740 OP_RIWR, /* iWMMXt wR register */
5741 OP_RIWC, /* iWMMXt wC register */
5742 OP_RIWG, /* iWMMXt wCG register */
5743 OP_RXA, /* XScale accumulator register */
5745 OP_REGLST, /* ARM register list */
5746 OP_VRSLST, /* VFP single-precision register list */
5747 OP_VRDLST, /* VFP double-precision register list */
5748 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5749 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5750 OP_NSTRLST, /* Neon element/structure list */
5752 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5753 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5754 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5755 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5756 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5757 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5758 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5759 OP_VMOV, /* Neon VMOV operands. */
5760 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5761 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5762 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5764 OP_I0, /* immediate zero */
5765 OP_I7, /* immediate value 0 .. 7 */
5766 OP_I15, /* 0 .. 15 */
5767 OP_I16, /* 1 .. 16 */
5768 OP_I16z, /* 0 .. 16 */
5769 OP_I31, /* 0 .. 31 */
5770 OP_I31w, /* 0 .. 31, optional trailing ! */
5771 OP_I32, /* 1 .. 32 */
5772 OP_I32z, /* 0 .. 32 */
5773 OP_I63, /* 0 .. 63 */
5774 OP_I63s, /* -64 .. 63 */
5775 OP_I64, /* 1 .. 64 */
5776 OP_I64z, /* 0 .. 64 */
5777 OP_I255, /* 0 .. 255 */
5779 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5780 OP_I7b, /* 0 .. 7 */
5781 OP_I15b, /* 0 .. 15 */
5782 OP_I31b, /* 0 .. 31 */
5784 OP_SH, /* shifter operand */
5785 OP_SHG, /* shifter operand with possible group relocation */
5786 OP_ADDR, /* Memory address expression (any mode) */
5787 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5788 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5789 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5790 OP_EXP, /* arbitrary expression */
5791 OP_EXPi, /* same, with optional immediate prefix */
5792 OP_EXPr, /* same, with optional relocation suffix */
5793 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5795 OP_CPSF, /* CPS flags */
5796 OP_ENDI, /* Endianness specifier */
5797 OP_PSR, /* CPSR/SPSR mask for msr */
5798 OP_COND, /* conditional code */
5799 OP_TB, /* Table branch. */
5801 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5802 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5804 OP_RRnpc_I0, /* ARM register or literal 0 */
5805 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5806 OP_RR_EXi, /* ARM register or expression with imm prefix */
5807 OP_RF_IF, /* FPA register or immediate */
5808 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5809 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5811 /* Optional operands. */
5812 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5813 OP_oI31b, /* 0 .. 31 */
5814 OP_oI32b, /* 1 .. 32 */
5815 OP_oIffffb, /* 0 .. 65535 */
5816 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5818 OP_oRR, /* ARM register */
5819 OP_oRRnpc, /* ARM register, not the PC */
5820 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5821 OP_oRND, /* Optional Neon double precision register */
5822 OP_oRNQ, /* Optional Neon quad precision register */
5823 OP_oRNDQ, /* Optional Neon double or quad precision register */
5824 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5825 OP_oSHll, /* LSL immediate */
5826 OP_oSHar, /* ASR immediate */
5827 OP_oSHllar, /* LSL or ASR immediate */
5828 OP_oROR, /* ROR 0/8/16/24 */
5829 OP_oBARRIER, /* Option argument for a barrier instruction. */
5831 OP_FIRST_OPTIONAL = OP_oI7b
5834 /* Generic instruction operand parser. This does no encoding and no
5835 semantic validation; it merely squirrels values away in the inst
5836 structure. Returns SUCCESS or FAIL depending on whether the
5837 specified grammar matched. */
5838 static int
5839 parse_operands (char *str, const unsigned char *pattern)
5841 unsigned const char *upat = pattern;
5842 char *backtrack_pos = 0;
5843 const char *backtrack_error = 0;
5844 int i, val, backtrack_index = 0;
5845 enum arm_reg_type rtype;
5846 parse_operand_result result;
5848 #define po_char_or_fail(chr) \
5849 do \
5851 if (skip_past_char (&str, chr) == FAIL) \
5852 goto bad_args; \
5854 while (0)
5856 #define po_reg_or_fail(regtype) \
5857 do \
5859 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5860 & inst.operands[i].vectype); \
5861 if (val == FAIL) \
5863 first_error (_(reg_expected_msgs[regtype])); \
5864 goto failure; \
5866 inst.operands[i].reg = val; \
5867 inst.operands[i].isreg = 1; \
5868 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5869 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5870 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5871 || rtype == REG_TYPE_VFD \
5872 || rtype == REG_TYPE_NQ); \
5874 while (0)
5876 #define po_reg_or_goto(regtype, label) \
5877 do \
5879 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5880 & inst.operands[i].vectype); \
5881 if (val == FAIL) \
5882 goto label; \
5884 inst.operands[i].reg = val; \
5885 inst.operands[i].isreg = 1; \
5886 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5887 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5888 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5889 || rtype == REG_TYPE_VFD \
5890 || rtype == REG_TYPE_NQ); \
5892 while (0)
5894 #define po_imm_or_fail(min, max, popt) \
5895 do \
5897 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5898 goto failure; \
5899 inst.operands[i].imm = val; \
5901 while (0)
5903 #define po_scalar_or_goto(elsz, label) \
5904 do \
5906 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5907 if (val == FAIL) \
5908 goto label; \
5909 inst.operands[i].reg = val; \
5910 inst.operands[i].isscalar = 1; \
5912 while (0)
5914 #define po_misc_or_fail(expr) \
5915 do \
5917 if (expr) \
5918 goto failure; \
5920 while (0)
5922 #define po_misc_or_fail_no_backtrack(expr) \
5923 do \
5925 result = expr; \
5926 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5927 backtrack_pos = 0; \
5928 if (result != PARSE_OPERAND_SUCCESS) \
5929 goto failure; \
5931 while (0)
5933 skip_whitespace (str);
5935 for (i = 0; upat[i] != OP_stop; i++)
5937 if (upat[i] >= OP_FIRST_OPTIONAL)
5939 /* Remember where we are in case we need to backtrack. */
5940 gas_assert (!backtrack_pos);
5941 backtrack_pos = str;
5942 backtrack_error = inst.error;
5943 backtrack_index = i;
5946 if (i > 0 && (i > 1 || inst.operands[0].present))
5947 po_char_or_fail (',');
5949 switch (upat[i])
5951 /* Registers */
5952 case OP_oRRnpc:
5953 case OP_RRnpc:
5954 case OP_oRR:
5955 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5956 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5957 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5958 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5959 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5960 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5961 case OP_oRND:
5962 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5963 case OP_RVC:
5964 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5965 break;
5966 /* Also accept generic coprocessor regs for unknown registers. */
5967 coproc_reg:
5968 po_reg_or_fail (REG_TYPE_CN);
5969 break;
5970 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5971 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5972 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5973 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5974 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5975 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5976 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5977 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5978 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5979 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5980 case OP_oRNQ:
5981 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5982 case OP_oRNDQ:
5983 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5984 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5985 case OP_oRNSDQ:
5986 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5988 /* Neon scalar. Using an element size of 8 means that some invalid
5989 scalars are accepted here, so deal with those in later code. */
5990 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5992 /* WARNING: We can expand to two operands here. This has the potential
5993 to totally confuse the backtracking mechanism! It will be OK at
5994 least as long as we don't try to use optional args as well,
5995 though. */
5996 case OP_NILO:
5998 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5999 inst.operands[i].present = 1;
6000 i++;
6001 skip_past_comma (&str);
6002 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
6003 break;
6004 one_reg_only:
6005 /* Optional register operand was omitted. Unfortunately, it's in
6006 operands[i-1] and we need it to be in inst.operands[i]. Fix that
6007 here (this is a bit grotty). */
6008 inst.operands[i] = inst.operands[i-1];
6009 inst.operands[i-1].present = 0;
6010 break;
6011 try_imm:
6012 /* There's a possibility of getting a 64-bit immediate here, so
6013 we need special handling. */
6014 if (parse_big_immediate (&str, i) == FAIL)
6016 inst.error = _("immediate value is out of range");
6017 goto failure;
6020 break;
6022 case OP_RNDQ_I0:
6024 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6025 break;
6026 try_imm0:
6027 po_imm_or_fail (0, 0, TRUE);
6029 break;
6031 case OP_RVSD_I0:
6032 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6033 break;
6035 case OP_RR_RNSC:
6037 po_scalar_or_goto (8, try_rr);
6038 break;
6039 try_rr:
6040 po_reg_or_fail (REG_TYPE_RN);
6042 break;
6044 case OP_RNSDQ_RNSC:
6046 po_scalar_or_goto (8, try_nsdq);
6047 break;
6048 try_nsdq:
6049 po_reg_or_fail (REG_TYPE_NSDQ);
6051 break;
6053 case OP_RNDQ_RNSC:
6055 po_scalar_or_goto (8, try_ndq);
6056 break;
6057 try_ndq:
6058 po_reg_or_fail (REG_TYPE_NDQ);
6060 break;
6062 case OP_RND_RNSC:
6064 po_scalar_or_goto (8, try_vfd);
6065 break;
6066 try_vfd:
6067 po_reg_or_fail (REG_TYPE_VFD);
6069 break;
6071 case OP_VMOV:
6072 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6073 not careful then bad things might happen. */
6074 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6075 break;
6077 case OP_RNDQ_IMVNb:
6079 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
6080 break;
6081 try_mvnimm:
6082 /* There's a possibility of getting a 64-bit immediate here, so
6083 we need special handling. */
6084 if (parse_big_immediate (&str, i) == FAIL)
6086 inst.error = _("immediate value is out of range");
6087 goto failure;
6090 break;
6092 case OP_RNDQ_I63b:
6094 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6095 break;
6096 try_shimm:
6097 po_imm_or_fail (0, 63, TRUE);
6099 break;
6101 case OP_RRnpcb:
6102 po_char_or_fail ('[');
6103 po_reg_or_fail (REG_TYPE_RN);
6104 po_char_or_fail (']');
6105 break;
6107 case OP_RRw:
6108 case OP_oRRw:
6109 po_reg_or_fail (REG_TYPE_RN);
6110 if (skip_past_char (&str, '!') == SUCCESS)
6111 inst.operands[i].writeback = 1;
6112 break;
6114 /* Immediates */
6115 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6116 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6117 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6118 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6119 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6120 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6121 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6122 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6123 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6124 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6125 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6126 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6128 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6129 case OP_oI7b:
6130 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6131 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6132 case OP_oI31b:
6133 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6134 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6135 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6137 /* Immediate variants */
6138 case OP_oI255c:
6139 po_char_or_fail ('{');
6140 po_imm_or_fail (0, 255, TRUE);
6141 po_char_or_fail ('}');
6142 break;
6144 case OP_I31w:
6145 /* The expression parser chokes on a trailing !, so we have
6146 to find it first and zap it. */
6148 char *s = str;
6149 while (*s && *s != ',')
6150 s++;
6151 if (s[-1] == '!')
6153 s[-1] = '\0';
6154 inst.operands[i].writeback = 1;
6156 po_imm_or_fail (0, 31, TRUE);
6157 if (str == s - 1)
6158 str = s;
6160 break;
6162 /* Expressions */
6163 case OP_EXPi: EXPi:
6164 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6165 GE_OPT_PREFIX));
6166 break;
6168 case OP_EXP:
6169 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6170 GE_NO_PREFIX));
6171 break;
6173 case OP_EXPr: EXPr:
6174 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6175 GE_NO_PREFIX));
6176 if (inst.reloc.exp.X_op == O_symbol)
6178 val = parse_reloc (&str);
6179 if (val == -1)
6181 inst.error = _("unrecognized relocation suffix");
6182 goto failure;
6184 else if (val != BFD_RELOC_UNUSED)
6186 inst.operands[i].imm = val;
6187 inst.operands[i].hasreloc = 1;
6190 break;
6192 /* Operand for MOVW or MOVT. */
6193 case OP_HALF:
6194 po_misc_or_fail (parse_half (&str));
6195 break;
6197 /* Register or expression. */
6198 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6199 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6201 /* Register or immediate. */
6202 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6203 I0: po_imm_or_fail (0, 0, FALSE); break;
6205 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6207 if (!is_immediate_prefix (*str))
6208 goto bad_args;
6209 str++;
6210 val = parse_fpa_immediate (&str);
6211 if (val == FAIL)
6212 goto failure;
6213 /* FPA immediates are encoded as registers 8-15.
6214 parse_fpa_immediate has already applied the offset. */
6215 inst.operands[i].reg = val;
6216 inst.operands[i].isreg = 1;
6217 break;
6219 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6220 I32z: po_imm_or_fail (0, 32, FALSE); break;
6222 /* Two kinds of register. */
6223 case OP_RIWR_RIWC:
6225 struct reg_entry *rege = arm_reg_parse_multi (&str);
6226 if (!rege
6227 || (rege->type != REG_TYPE_MMXWR
6228 && rege->type != REG_TYPE_MMXWC
6229 && rege->type != REG_TYPE_MMXWCG))
6231 inst.error = _("iWMMXt data or control register expected");
6232 goto failure;
6234 inst.operands[i].reg = rege->number;
6235 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6237 break;
6239 case OP_RIWC_RIWG:
6241 struct reg_entry *rege = arm_reg_parse_multi (&str);
6242 if (!rege
6243 || (rege->type != REG_TYPE_MMXWC
6244 && rege->type != REG_TYPE_MMXWCG))
6246 inst.error = _("iWMMXt control register expected");
6247 goto failure;
6249 inst.operands[i].reg = rege->number;
6250 inst.operands[i].isreg = 1;
6252 break;
6254 /* Misc */
6255 case OP_CPSF: val = parse_cps_flags (&str); break;
6256 case OP_ENDI: val = parse_endian_specifier (&str); break;
6257 case OP_oROR: val = parse_ror (&str); break;
6258 case OP_PSR: val = parse_psr (&str); break;
6259 case OP_COND: val = parse_cond (&str); break;
6260 case OP_oBARRIER:val = parse_barrier (&str); break;
6262 case OP_RVC_PSR:
6263 po_reg_or_goto (REG_TYPE_VFC, try_psr);
6264 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
6265 break;
6266 try_psr:
6267 val = parse_psr (&str);
6268 break;
6270 case OP_APSR_RR:
6271 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6272 break;
6273 try_apsr:
6274 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6275 instruction). */
6276 if (strncasecmp (str, "APSR_", 5) == 0)
6278 unsigned found = 0;
6279 str += 5;
6280 while (found < 15)
6281 switch (*str++)
6283 case 'c': found = (found & 1) ? 16 : found | 1; break;
6284 case 'n': found = (found & 2) ? 16 : found | 2; break;
6285 case 'z': found = (found & 4) ? 16 : found | 4; break;
6286 case 'v': found = (found & 8) ? 16 : found | 8; break;
6287 default: found = 16;
6289 if (found != 15)
6290 goto failure;
6291 inst.operands[i].isvec = 1;
6293 else
6294 goto failure;
6295 break;
6297 case OP_TB:
6298 po_misc_or_fail (parse_tb (&str));
6299 break;
6301 /* Register lists. */
6302 case OP_REGLST:
6303 val = parse_reg_list (&str);
6304 if (*str == '^')
6306 inst.operands[1].writeback = 1;
6307 str++;
6309 break;
6311 case OP_VRSLST:
6312 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6313 break;
6315 case OP_VRDLST:
6316 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6317 break;
6319 case OP_VRSDLST:
6320 /* Allow Q registers too. */
6321 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6322 REGLIST_NEON_D);
6323 if (val == FAIL)
6325 inst.error = NULL;
6326 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6327 REGLIST_VFP_S);
6328 inst.operands[i].issingle = 1;
6330 break;
6332 case OP_NRDLST:
6333 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6334 REGLIST_NEON_D);
6335 break;
6337 case OP_NSTRLST:
6338 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6339 &inst.operands[i].vectype);
6340 break;
6342 /* Addressing modes */
6343 case OP_ADDR:
6344 po_misc_or_fail (parse_address (&str, i));
6345 break;
6347 case OP_ADDRGLDR:
6348 po_misc_or_fail_no_backtrack (
6349 parse_address_group_reloc (&str, i, GROUP_LDR));
6350 break;
6352 case OP_ADDRGLDRS:
6353 po_misc_or_fail_no_backtrack (
6354 parse_address_group_reloc (&str, i, GROUP_LDRS));
6355 break;
6357 case OP_ADDRGLDC:
6358 po_misc_or_fail_no_backtrack (
6359 parse_address_group_reloc (&str, i, GROUP_LDC));
6360 break;
6362 case OP_SH:
6363 po_misc_or_fail (parse_shifter_operand (&str, i));
6364 break;
6366 case OP_SHG:
6367 po_misc_or_fail_no_backtrack (
6368 parse_shifter_operand_group_reloc (&str, i));
6369 break;
6371 case OP_oSHll:
6372 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6373 break;
6375 case OP_oSHar:
6376 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6377 break;
6379 case OP_oSHllar:
6380 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6381 break;
6383 default:
6384 as_fatal (_("unhandled operand code %d"), upat[i]);
6387 /* Various value-based sanity checks and shared operations. We
6388 do not signal immediate failures for the register constraints;
6389 this allows a syntax error to take precedence. */
6390 switch (upat[i])
6392 case OP_oRRnpc:
6393 case OP_RRnpc:
6394 case OP_RRnpcb:
6395 case OP_RRw:
6396 case OP_oRRw:
6397 case OP_RRnpc_I0:
6398 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6399 inst.error = BAD_PC;
6400 break;
6402 case OP_CPSF:
6403 case OP_ENDI:
6404 case OP_oROR:
6405 case OP_PSR:
6406 case OP_RVC_PSR:
6407 case OP_COND:
6408 case OP_oBARRIER:
6409 case OP_REGLST:
6410 case OP_VRSLST:
6411 case OP_VRDLST:
6412 case OP_VRSDLST:
6413 case OP_NRDLST:
6414 case OP_NSTRLST:
6415 if (val == FAIL)
6416 goto failure;
6417 inst.operands[i].imm = val;
6418 break;
6420 default:
6421 break;
6424 /* If we get here, this operand was successfully parsed. */
6425 inst.operands[i].present = 1;
6426 continue;
6428 bad_args:
6429 inst.error = BAD_ARGS;
6431 failure:
6432 if (!backtrack_pos)
6434 /* The parse routine should already have set inst.error, but set a
6435 default here just in case. */
6436 if (!inst.error)
6437 inst.error = _("syntax error");
6438 return FAIL;
6441 /* Do not backtrack over a trailing optional argument that
6442 absorbed some text. We will only fail again, with the
6443 'garbage following instruction' error message, which is
6444 probably less helpful than the current one. */
6445 if (backtrack_index == i && backtrack_pos != str
6446 && upat[i+1] == OP_stop)
6448 if (!inst.error)
6449 inst.error = _("syntax error");
6450 return FAIL;
6453 /* Try again, skipping the optional argument at backtrack_pos. */
6454 str = backtrack_pos;
6455 inst.error = backtrack_error;
6456 inst.operands[backtrack_index].present = 0;
6457 i = backtrack_index;
6458 backtrack_pos = 0;
6461 /* Check that we have parsed all the arguments. */
6462 if (*str != '\0' && !inst.error)
6463 inst.error = _("garbage following instruction");
6465 return inst.error ? FAIL : SUCCESS;
6468 #undef po_char_or_fail
6469 #undef po_reg_or_fail
6470 #undef po_reg_or_goto
6471 #undef po_imm_or_fail
6472 #undef po_scalar_or_fail
6474 /* Shorthand macro for instruction encoding functions issuing errors. */
6475 #define constraint(expr, err) \
6476 do \
6478 if (expr) \
6480 inst.error = err; \
6481 return; \
6484 while (0)
6486 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6487 instructions are unpredictable if these registers are used. This
6488 is the BadReg predicate in ARM's Thumb-2 documentation. */
6489 #define reject_bad_reg(reg) \
6490 do \
6491 if (reg == REG_SP || reg == REG_PC) \
6493 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6494 return; \
6496 while (0)
6498 /* If REG is R13 (the stack pointer), warn that its use is
6499 deprecated. */
6500 #define warn_deprecated_sp(reg) \
6501 do \
6502 if (warn_on_deprecated && reg == REG_SP) \
6503 as_warn (_("use of r13 is deprecated")); \
6504 while (0)
6506 /* Functions for operand encoding. ARM, then Thumb. */
6508 #define rotate_left(v, n) (v << n | v >> (32 - n))
6510 /* If VAL can be encoded in the immediate field of an ARM instruction,
6511 return the encoded form. Otherwise, return FAIL. */
6513 static unsigned int
6514 encode_arm_immediate (unsigned int val)
6516 unsigned int a, i;
6518 for (i = 0; i < 32; i += 2)
6519 if ((a = rotate_left (val, i)) <= 0xff)
6520 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6522 return FAIL;
6525 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6526 return the encoded form. Otherwise, return FAIL. */
6527 static unsigned int
6528 encode_thumb32_immediate (unsigned int val)
6530 unsigned int a, i;
6532 if (val <= 0xff)
6533 return val;
6535 for (i = 1; i <= 24; i++)
6537 a = val >> i;
6538 if ((val & ~(0xff << i)) == 0)
6539 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6542 a = val & 0xff;
6543 if (val == ((a << 16) | a))
6544 return 0x100 | a;
6545 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6546 return 0x300 | a;
6548 a = val & 0xff00;
6549 if (val == ((a << 16) | a))
6550 return 0x200 | (a >> 8);
6552 return FAIL;
6554 /* Encode a VFP SP or DP register number into inst.instruction. */
6556 static void
6557 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6559 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6560 && reg > 15)
6562 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6564 if (thumb_mode)
6565 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6566 fpu_vfp_ext_d32);
6567 else
6568 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6569 fpu_vfp_ext_d32);
6571 else
6573 first_error (_("D register out of range for selected VFP version"));
6574 return;
6578 switch (pos)
6580 case VFP_REG_Sd:
6581 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6582 break;
6584 case VFP_REG_Sn:
6585 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6586 break;
6588 case VFP_REG_Sm:
6589 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6590 break;
6592 case VFP_REG_Dd:
6593 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6594 break;
6596 case VFP_REG_Dn:
6597 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6598 break;
6600 case VFP_REG_Dm:
6601 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6602 break;
6604 default:
6605 abort ();
6609 /* Encode a <shift> in an ARM-format instruction. The immediate,
6610 if any, is handled by md_apply_fix. */
6611 static void
6612 encode_arm_shift (int i)
6614 if (inst.operands[i].shift_kind == SHIFT_RRX)
6615 inst.instruction |= SHIFT_ROR << 5;
6616 else
6618 inst.instruction |= inst.operands[i].shift_kind << 5;
6619 if (inst.operands[i].immisreg)
6621 inst.instruction |= SHIFT_BY_REG;
6622 inst.instruction |= inst.operands[i].imm << 8;
6624 else
6625 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6629 static void
6630 encode_arm_shifter_operand (int i)
6632 if (inst.operands[i].isreg)
6634 inst.instruction |= inst.operands[i].reg;
6635 encode_arm_shift (i);
6637 else
6638 inst.instruction |= INST_IMMEDIATE;
6641 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6642 static void
6643 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6645 gas_assert (inst.operands[i].isreg);
6646 inst.instruction |= inst.operands[i].reg << 16;
6648 if (inst.operands[i].preind)
6650 if (is_t)
6652 inst.error = _("instruction does not accept preindexed addressing");
6653 return;
6655 inst.instruction |= PRE_INDEX;
6656 if (inst.operands[i].writeback)
6657 inst.instruction |= WRITE_BACK;
6660 else if (inst.operands[i].postind)
6662 gas_assert (inst.operands[i].writeback);
6663 if (is_t)
6664 inst.instruction |= WRITE_BACK;
6666 else /* unindexed - only for coprocessor */
6668 inst.error = _("instruction does not accept unindexed addressing");
6669 return;
6672 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6673 && (((inst.instruction & 0x000f0000) >> 16)
6674 == ((inst.instruction & 0x0000f000) >> 12)))
6675 as_warn ((inst.instruction & LOAD_BIT)
6676 ? _("destination register same as write-back base")
6677 : _("source register same as write-back base"));
6680 /* inst.operands[i] was set up by parse_address. Encode it into an
6681 ARM-format mode 2 load or store instruction. If is_t is true,
6682 reject forms that cannot be used with a T instruction (i.e. not
6683 post-indexed). */
6684 static void
6685 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6687 encode_arm_addr_mode_common (i, is_t);
6689 if (inst.operands[i].immisreg)
6691 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6692 inst.instruction |= inst.operands[i].imm;
6693 if (!inst.operands[i].negative)
6694 inst.instruction |= INDEX_UP;
6695 if (inst.operands[i].shifted)
6697 if (inst.operands[i].shift_kind == SHIFT_RRX)
6698 inst.instruction |= SHIFT_ROR << 5;
6699 else
6701 inst.instruction |= inst.operands[i].shift_kind << 5;
6702 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6706 else /* immediate offset in inst.reloc */
6708 if (inst.reloc.type == BFD_RELOC_UNUSED)
6709 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6713 /* inst.operands[i] was set up by parse_address. Encode it into an
6714 ARM-format mode 3 load or store instruction. Reject forms that
6715 cannot be used with such instructions. If is_t is true, reject
6716 forms that cannot be used with a T instruction (i.e. not
6717 post-indexed). */
6718 static void
6719 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6721 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6723 inst.error = _("instruction does not accept scaled register index");
6724 return;
6727 encode_arm_addr_mode_common (i, is_t);
6729 if (inst.operands[i].immisreg)
6731 inst.instruction |= inst.operands[i].imm;
6732 if (!inst.operands[i].negative)
6733 inst.instruction |= INDEX_UP;
6735 else /* immediate offset in inst.reloc */
6737 inst.instruction |= HWOFFSET_IMM;
6738 if (inst.reloc.type == BFD_RELOC_UNUSED)
6739 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6743 /* inst.operands[i] was set up by parse_address. Encode it into an
6744 ARM-format instruction. Reject all forms which cannot be encoded
6745 into a coprocessor load/store instruction. If wb_ok is false,
6746 reject use of writeback; if unind_ok is false, reject use of
6747 unindexed addressing. If reloc_override is not 0, use it instead
6748 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6749 (in which case it is preserved). */
6751 static int
6752 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6754 inst.instruction |= inst.operands[i].reg << 16;
6756 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
6758 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6760 gas_assert (!inst.operands[i].writeback);
6761 if (!unind_ok)
6763 inst.error = _("instruction does not support unindexed addressing");
6764 return FAIL;
6766 inst.instruction |= inst.operands[i].imm;
6767 inst.instruction |= INDEX_UP;
6768 return SUCCESS;
6771 if (inst.operands[i].preind)
6772 inst.instruction |= PRE_INDEX;
6774 if (inst.operands[i].writeback)
6776 if (inst.operands[i].reg == REG_PC)
6778 inst.error = _("pc may not be used with write-back");
6779 return FAIL;
6781 if (!wb_ok)
6783 inst.error = _("instruction does not support writeback");
6784 return FAIL;
6786 inst.instruction |= WRITE_BACK;
6789 if (reloc_override)
6790 inst.reloc.type = reloc_override;
6791 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6792 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6793 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6795 if (thumb_mode)
6796 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6797 else
6798 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6801 return SUCCESS;
6804 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6805 Determine whether it can be performed with a move instruction; if
6806 it can, convert inst.instruction to that move instruction and
6807 return TRUE; if it can't, convert inst.instruction to a literal-pool
6808 load and return FALSE. If this is not a valid thing to do in the
6809 current context, set inst.error and return TRUE.
6811 inst.operands[i] describes the destination register. */
6813 static bfd_boolean
6814 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6816 unsigned long tbit;
6818 if (thumb_p)
6819 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6820 else
6821 tbit = LOAD_BIT;
6823 if ((inst.instruction & tbit) == 0)
6825 inst.error = _("invalid pseudo operation");
6826 return TRUE;
6828 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6830 inst.error = _("constant expression expected");
6831 return TRUE;
6833 if (inst.reloc.exp.X_op == O_constant)
6835 if (thumb_p)
6837 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6839 /* This can be done with a mov(1) instruction. */
6840 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6841 inst.instruction |= inst.reloc.exp.X_add_number;
6842 return TRUE;
6845 else
6847 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6848 if (value != FAIL)
6850 /* This can be done with a mov instruction. */
6851 inst.instruction &= LITERAL_MASK;
6852 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6853 inst.instruction |= value & 0xfff;
6854 return TRUE;
6857 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6858 if (value != FAIL)
6860 /* This can be done with a mvn instruction. */
6861 inst.instruction &= LITERAL_MASK;
6862 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6863 inst.instruction |= value & 0xfff;
6864 return TRUE;
6869 if (add_to_lit_pool () == FAIL)
6871 inst.error = _("literal pool insertion failed");
6872 return TRUE;
6874 inst.operands[1].reg = REG_PC;
6875 inst.operands[1].isreg = 1;
6876 inst.operands[1].preind = 1;
6877 inst.reloc.pc_rel = 1;
6878 inst.reloc.type = (thumb_p
6879 ? BFD_RELOC_ARM_THUMB_OFFSET
6880 : (mode_3
6881 ? BFD_RELOC_ARM_HWLITERAL
6882 : BFD_RELOC_ARM_LITERAL));
6883 return FALSE;
6886 /* Functions for instruction encoding, sorted by sub-architecture.
6887 First some generics; their names are taken from the conventional
6888 bit positions for register arguments in ARM format instructions. */
6890 static void
6891 do_noargs (void)
6895 static void
6896 do_rd (void)
6898 inst.instruction |= inst.operands[0].reg << 12;
6901 static void
6902 do_rd_rm (void)
6904 inst.instruction |= inst.operands[0].reg << 12;
6905 inst.instruction |= inst.operands[1].reg;
6908 static void
6909 do_rd_rn (void)
6911 inst.instruction |= inst.operands[0].reg << 12;
6912 inst.instruction |= inst.operands[1].reg << 16;
6915 static void
6916 do_rn_rd (void)
6918 inst.instruction |= inst.operands[0].reg << 16;
6919 inst.instruction |= inst.operands[1].reg << 12;
6922 static void
6923 do_rd_rm_rn (void)
6925 unsigned Rn = inst.operands[2].reg;
6926 /* Enforce restrictions on SWP instruction. */
6927 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6928 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6929 _("Rn must not overlap other operands"));
6930 inst.instruction |= inst.operands[0].reg << 12;
6931 inst.instruction |= inst.operands[1].reg;
6932 inst.instruction |= Rn << 16;
6935 static void
6936 do_rd_rn_rm (void)
6938 inst.instruction |= inst.operands[0].reg << 12;
6939 inst.instruction |= inst.operands[1].reg << 16;
6940 inst.instruction |= inst.operands[2].reg;
6943 static void
6944 do_rm_rd_rn (void)
6946 inst.instruction |= inst.operands[0].reg;
6947 inst.instruction |= inst.operands[1].reg << 12;
6948 inst.instruction |= inst.operands[2].reg << 16;
6951 static void
6952 do_imm0 (void)
6954 inst.instruction |= inst.operands[0].imm;
6957 static void
6958 do_rd_cpaddr (void)
6960 inst.instruction |= inst.operands[0].reg << 12;
6961 encode_arm_cp_address (1, TRUE, TRUE, 0);
6964 /* ARM instructions, in alphabetical order by function name (except
6965 that wrapper functions appear immediately after the function they
6966 wrap). */
6968 /* This is a pseudo-op of the form "adr rd, label" to be converted
6969 into a relative address of the form "add rd, pc, #label-.-8". */
6971 static void
6972 do_adr (void)
6974 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6976 /* Frag hacking will turn this into a sub instruction if the offset turns
6977 out to be negative. */
6978 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6979 inst.reloc.pc_rel = 1;
6980 inst.reloc.exp.X_add_number -= 8;
6983 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6984 into a relative address of the form:
6985 add rd, pc, #low(label-.-8)"
6986 add rd, rd, #high(label-.-8)" */
6988 static void
6989 do_adrl (void)
6991 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6993 /* Frag hacking will turn this into a sub instruction if the offset turns
6994 out to be negative. */
6995 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6996 inst.reloc.pc_rel = 1;
6997 inst.size = INSN_SIZE * 2;
6998 inst.reloc.exp.X_add_number -= 8;
7001 static void
7002 do_arit (void)
7004 if (!inst.operands[1].present)
7005 inst.operands[1].reg = inst.operands[0].reg;
7006 inst.instruction |= inst.operands[0].reg << 12;
7007 inst.instruction |= inst.operands[1].reg << 16;
7008 encode_arm_shifter_operand (2);
7011 static void
7012 do_barrier (void)
7014 if (inst.operands[0].present)
7016 constraint ((inst.instruction & 0xf0) != 0x40
7017 && inst.operands[0].imm != 0xf,
7018 _("bad barrier type"));
7019 inst.instruction |= inst.operands[0].imm;
7021 else
7022 inst.instruction |= 0xf;
7025 static void
7026 do_bfc (void)
7028 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7029 constraint (msb > 32, _("bit-field extends past end of register"));
7030 /* The instruction encoding stores the LSB and MSB,
7031 not the LSB and width. */
7032 inst.instruction |= inst.operands[0].reg << 12;
7033 inst.instruction |= inst.operands[1].imm << 7;
7034 inst.instruction |= (msb - 1) << 16;
7037 static void
7038 do_bfi (void)
7040 unsigned int msb;
7042 /* #0 in second position is alternative syntax for bfc, which is
7043 the same instruction but with REG_PC in the Rm field. */
7044 if (!inst.operands[1].isreg)
7045 inst.operands[1].reg = REG_PC;
7047 msb = inst.operands[2].imm + inst.operands[3].imm;
7048 constraint (msb > 32, _("bit-field extends past end of register"));
7049 /* The instruction encoding stores the LSB and MSB,
7050 not the LSB and width. */
7051 inst.instruction |= inst.operands[0].reg << 12;
7052 inst.instruction |= inst.operands[1].reg;
7053 inst.instruction |= inst.operands[2].imm << 7;
7054 inst.instruction |= (msb - 1) << 16;
7057 static void
7058 do_bfx (void)
7060 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7061 _("bit-field extends past end of register"));
7062 inst.instruction |= inst.operands[0].reg << 12;
7063 inst.instruction |= inst.operands[1].reg;
7064 inst.instruction |= inst.operands[2].imm << 7;
7065 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7068 /* ARM V5 breakpoint instruction (argument parse)
7069 BKPT <16 bit unsigned immediate>
7070 Instruction is not conditional.
7071 The bit pattern given in insns[] has the COND_ALWAYS condition,
7072 and it is an error if the caller tried to override that. */
7074 static void
7075 do_bkpt (void)
7077 /* Top 12 of 16 bits to bits 19:8. */
7078 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7080 /* Bottom 4 of 16 bits to bits 3:0. */
7081 inst.instruction |= inst.operands[0].imm & 0xf;
7084 static void
7085 encode_branch (int default_reloc)
7087 if (inst.operands[0].hasreloc)
7089 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
7090 _("the only suffix valid here is '(plt)'"));
7091 inst.reloc.type = BFD_RELOC_ARM_PLT32;
7093 else
7095 inst.reloc.type = default_reloc;
7097 inst.reloc.pc_rel = 1;
7100 static void
7101 do_branch (void)
7103 #ifdef OBJ_ELF
7104 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7105 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7106 else
7107 #endif
7108 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7111 static void
7112 do_bl (void)
7114 #ifdef OBJ_ELF
7115 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7117 if (inst.cond == COND_ALWAYS)
7118 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7119 else
7120 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7122 else
7123 #endif
7124 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7127 /* ARM V5 branch-link-exchange instruction (argument parse)
7128 BLX <target_addr> ie BLX(1)
7129 BLX{<condition>} <Rm> ie BLX(2)
7130 Unfortunately, there are two different opcodes for this mnemonic.
7131 So, the insns[].value is not used, and the code here zaps values
7132 into inst.instruction.
7133 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7135 static void
7136 do_blx (void)
7138 if (inst.operands[0].isreg)
7140 /* Arg is a register; the opcode provided by insns[] is correct.
7141 It is not illegal to do "blx pc", just useless. */
7142 if (inst.operands[0].reg == REG_PC)
7143 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7145 inst.instruction |= inst.operands[0].reg;
7147 else
7149 /* Arg is an address; this instruction cannot be executed
7150 conditionally, and the opcode must be adjusted.
7151 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7152 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7153 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7154 inst.instruction = 0xfa000000;
7155 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7159 static void
7160 do_bx (void)
7162 bfd_boolean want_reloc;
7164 if (inst.operands[0].reg == REG_PC)
7165 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7167 inst.instruction |= inst.operands[0].reg;
7168 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7169 it is for ARMv4t or earlier. */
7170 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7171 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7172 want_reloc = TRUE;
7174 #ifdef OBJ_ELF
7175 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7176 #endif
7177 want_reloc = FALSE;
7179 if (want_reloc)
7180 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7184 /* ARM v5TEJ. Jump to Jazelle code. */
7186 static void
7187 do_bxj (void)
7189 if (inst.operands[0].reg == REG_PC)
7190 as_tsktsk (_("use of r15 in bxj is not really useful"));
7192 inst.instruction |= inst.operands[0].reg;
7195 /* Co-processor data operation:
7196 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7197 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7198 static void
7199 do_cdp (void)
7201 inst.instruction |= inst.operands[0].reg << 8;
7202 inst.instruction |= inst.operands[1].imm << 20;
7203 inst.instruction |= inst.operands[2].reg << 12;
7204 inst.instruction |= inst.operands[3].reg << 16;
7205 inst.instruction |= inst.operands[4].reg;
7206 inst.instruction |= inst.operands[5].imm << 5;
7209 static void
7210 do_cmp (void)
7212 inst.instruction |= inst.operands[0].reg << 16;
7213 encode_arm_shifter_operand (1);
7216 /* Transfer between coprocessor and ARM registers.
7217 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7218 MRC2
7219 MCR{cond}
7220 MCR2
7222 No special properties. */
7224 static void
7225 do_co_reg (void)
7227 unsigned Rd;
7229 Rd = inst.operands[2].reg;
7230 if (thumb_mode)
7232 if (inst.instruction == 0xee000010
7233 || inst.instruction == 0xfe000010)
7234 /* MCR, MCR2 */
7235 reject_bad_reg (Rd);
7236 else
7237 /* MRC, MRC2 */
7238 constraint (Rd == REG_SP, BAD_SP);
7240 else
7242 /* MCR */
7243 if (inst.instruction == 0xe000010)
7244 constraint (Rd == REG_PC, BAD_PC);
7248 inst.instruction |= inst.operands[0].reg << 8;
7249 inst.instruction |= inst.operands[1].imm << 21;
7250 inst.instruction |= Rd << 12;
7251 inst.instruction |= inst.operands[3].reg << 16;
7252 inst.instruction |= inst.operands[4].reg;
7253 inst.instruction |= inst.operands[5].imm << 5;
7256 /* Transfer between coprocessor register and pair of ARM registers.
7257 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7258 MCRR2
7259 MRRC{cond}
7260 MRRC2
7262 Two XScale instructions are special cases of these:
7264 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7265 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7267 Result unpredictable if Rd or Rn is R15. */
7269 static void
7270 do_co_reg2c (void)
7272 unsigned Rd, Rn;
7274 Rd = inst.operands[2].reg;
7275 Rn = inst.operands[3].reg;
7277 if (thumb_mode)
7279 reject_bad_reg (Rd);
7280 reject_bad_reg (Rn);
7282 else
7284 constraint (Rd == REG_PC, BAD_PC);
7285 constraint (Rn == REG_PC, BAD_PC);
7288 inst.instruction |= inst.operands[0].reg << 8;
7289 inst.instruction |= inst.operands[1].imm << 4;
7290 inst.instruction |= Rd << 12;
7291 inst.instruction |= Rn << 16;
7292 inst.instruction |= inst.operands[4].reg;
7295 static void
7296 do_cpsi (void)
7298 inst.instruction |= inst.operands[0].imm << 6;
7299 if (inst.operands[1].present)
7301 inst.instruction |= CPSI_MMOD;
7302 inst.instruction |= inst.operands[1].imm;
7306 static void
7307 do_dbg (void)
7309 inst.instruction |= inst.operands[0].imm;
7312 static void
7313 do_it (void)
7315 /* There is no IT instruction in ARM mode. We
7316 process it to do the validation as if in
7317 thumb mode, just in case the code gets
7318 assembled for thumb using the unified syntax. */
7320 inst.size = 0;
7321 if (unified_syntax)
7323 set_it_insn_type (IT_INSN);
7324 now_it.mask = (inst.instruction & 0xf) | 0x10;
7325 now_it.cc = inst.operands[0].imm;
7329 static void
7330 do_ldmstm (void)
7332 int base_reg = inst.operands[0].reg;
7333 int range = inst.operands[1].imm;
7335 inst.instruction |= base_reg << 16;
7336 inst.instruction |= range;
7338 if (inst.operands[1].writeback)
7339 inst.instruction |= LDM_TYPE_2_OR_3;
7341 if (inst.operands[0].writeback)
7343 inst.instruction |= WRITE_BACK;
7344 /* Check for unpredictable uses of writeback. */
7345 if (inst.instruction & LOAD_BIT)
7347 /* Not allowed in LDM type 2. */
7348 if ((inst.instruction & LDM_TYPE_2_OR_3)
7349 && ((range & (1 << REG_PC)) == 0))
7350 as_warn (_("writeback of base register is UNPREDICTABLE"));
7351 /* Only allowed if base reg not in list for other types. */
7352 else if (range & (1 << base_reg))
7353 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7355 else /* STM. */
7357 /* Not allowed for type 2. */
7358 if (inst.instruction & LDM_TYPE_2_OR_3)
7359 as_warn (_("writeback of base register is UNPREDICTABLE"));
7360 /* Only allowed if base reg not in list, or first in list. */
7361 else if ((range & (1 << base_reg))
7362 && (range & ((1 << base_reg) - 1)))
7363 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7368 /* ARMv5TE load-consecutive (argument parse)
7369 Mode is like LDRH.
7371 LDRccD R, mode
7372 STRccD R, mode. */
7374 static void
7375 do_ldrd (void)
7377 constraint (inst.operands[0].reg % 2 != 0,
7378 _("first destination register must be even"));
7379 constraint (inst.operands[1].present
7380 && inst.operands[1].reg != inst.operands[0].reg + 1,
7381 _("can only load two consecutive registers"));
7382 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7383 constraint (!inst.operands[2].isreg, _("'[' expected"));
7385 if (!inst.operands[1].present)
7386 inst.operands[1].reg = inst.operands[0].reg + 1;
7388 if (inst.instruction & LOAD_BIT)
7390 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7391 register and the first register written; we have to diagnose
7392 overlap between the base and the second register written here. */
7394 if (inst.operands[2].reg == inst.operands[1].reg
7395 && (inst.operands[2].writeback || inst.operands[2].postind))
7396 as_warn (_("base register written back, and overlaps "
7397 "second destination register"));
7399 /* For an index-register load, the index register must not overlap the
7400 destination (even if not write-back). */
7401 else if (inst.operands[2].immisreg
7402 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7403 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7404 as_warn (_("index register overlaps destination register"));
7407 inst.instruction |= inst.operands[0].reg << 12;
7408 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7411 static void
7412 do_ldrex (void)
7414 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7415 || inst.operands[1].postind || inst.operands[1].writeback
7416 || inst.operands[1].immisreg || inst.operands[1].shifted
7417 || inst.operands[1].negative
7418 /* This can arise if the programmer has written
7419 strex rN, rM, foo
7420 or if they have mistakenly used a register name as the last
7421 operand, eg:
7422 strex rN, rM, rX
7423 It is very difficult to distinguish between these two cases
7424 because "rX" might actually be a label. ie the register
7425 name has been occluded by a symbol of the same name. So we
7426 just generate a general 'bad addressing mode' type error
7427 message and leave it up to the programmer to discover the
7428 true cause and fix their mistake. */
7429 || (inst.operands[1].reg == REG_PC),
7430 BAD_ADDR_MODE);
7432 constraint (inst.reloc.exp.X_op != O_constant
7433 || inst.reloc.exp.X_add_number != 0,
7434 _("offset must be zero in ARM encoding"));
7436 inst.instruction |= inst.operands[0].reg << 12;
7437 inst.instruction |= inst.operands[1].reg << 16;
7438 inst.reloc.type = BFD_RELOC_UNUSED;
7441 static void
7442 do_ldrexd (void)
7444 constraint (inst.operands[0].reg % 2 != 0,
7445 _("even register required"));
7446 constraint (inst.operands[1].present
7447 && inst.operands[1].reg != inst.operands[0].reg + 1,
7448 _("can only load two consecutive registers"));
7449 /* If op 1 were present and equal to PC, this function wouldn't
7450 have been called in the first place. */
7451 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7453 inst.instruction |= inst.operands[0].reg << 12;
7454 inst.instruction |= inst.operands[2].reg << 16;
7457 static void
7458 do_ldst (void)
7460 inst.instruction |= inst.operands[0].reg << 12;
7461 if (!inst.operands[1].isreg)
7462 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7463 return;
7464 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7467 static void
7468 do_ldstt (void)
7470 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7471 reject [Rn,...]. */
7472 if (inst.operands[1].preind)
7474 constraint (inst.reloc.exp.X_op != O_constant
7475 || inst.reloc.exp.X_add_number != 0,
7476 _("this instruction requires a post-indexed address"));
7478 inst.operands[1].preind = 0;
7479 inst.operands[1].postind = 1;
7480 inst.operands[1].writeback = 1;
7482 inst.instruction |= inst.operands[0].reg << 12;
7483 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7486 /* Halfword and signed-byte load/store operations. */
7488 static void
7489 do_ldstv4 (void)
7491 inst.instruction |= inst.operands[0].reg << 12;
7492 if (!inst.operands[1].isreg)
7493 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7494 return;
7495 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7498 static void
7499 do_ldsttv4 (void)
7501 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7502 reject [Rn,...]. */
7503 if (inst.operands[1].preind)
7505 constraint (inst.reloc.exp.X_op != O_constant
7506 || inst.reloc.exp.X_add_number != 0,
7507 _("this instruction requires a post-indexed address"));
7509 inst.operands[1].preind = 0;
7510 inst.operands[1].postind = 1;
7511 inst.operands[1].writeback = 1;
7513 inst.instruction |= inst.operands[0].reg << 12;
7514 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7517 /* Co-processor register load/store.
7518 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7519 static void
7520 do_lstc (void)
7522 inst.instruction |= inst.operands[0].reg << 8;
7523 inst.instruction |= inst.operands[1].reg << 12;
7524 encode_arm_cp_address (2, TRUE, TRUE, 0);
7527 static void
7528 do_mlas (void)
7530 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7531 if (inst.operands[0].reg == inst.operands[1].reg
7532 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7533 && !(inst.instruction & 0x00400000))
7534 as_tsktsk (_("Rd and Rm should be different in mla"));
7536 inst.instruction |= inst.operands[0].reg << 16;
7537 inst.instruction |= inst.operands[1].reg;
7538 inst.instruction |= inst.operands[2].reg << 8;
7539 inst.instruction |= inst.operands[3].reg << 12;
7542 static void
7543 do_mov (void)
7545 inst.instruction |= inst.operands[0].reg << 12;
7546 encode_arm_shifter_operand (1);
7549 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7550 static void
7551 do_mov16 (void)
7553 bfd_vma imm;
7554 bfd_boolean top;
7556 top = (inst.instruction & 0x00400000) != 0;
7557 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7558 _(":lower16: not allowed this instruction"));
7559 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7560 _(":upper16: not allowed instruction"));
7561 inst.instruction |= inst.operands[0].reg << 12;
7562 if (inst.reloc.type == BFD_RELOC_UNUSED)
7564 imm = inst.reloc.exp.X_add_number;
7565 /* The value is in two pieces: 0:11, 16:19. */
7566 inst.instruction |= (imm & 0x00000fff);
7567 inst.instruction |= (imm & 0x0000f000) << 4;
7571 static void do_vfp_nsyn_opcode (const char *);
7573 static int
7574 do_vfp_nsyn_mrs (void)
7576 if (inst.operands[0].isvec)
7578 if (inst.operands[1].reg != 1)
7579 first_error (_("operand 1 must be FPSCR"));
7580 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7581 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7582 do_vfp_nsyn_opcode ("fmstat");
7584 else if (inst.operands[1].isvec)
7585 do_vfp_nsyn_opcode ("fmrx");
7586 else
7587 return FAIL;
7589 return SUCCESS;
7592 static int
7593 do_vfp_nsyn_msr (void)
7595 if (inst.operands[0].isvec)
7596 do_vfp_nsyn_opcode ("fmxr");
7597 else
7598 return FAIL;
7600 return SUCCESS;
7603 static void
7604 do_mrs (void)
7606 if (do_vfp_nsyn_mrs () == SUCCESS)
7607 return;
7609 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7610 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7611 != (PSR_c|PSR_f),
7612 _("'CPSR' or 'SPSR' expected"));
7613 inst.instruction |= inst.operands[0].reg << 12;
7614 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7617 /* Two possible forms:
7618 "{C|S}PSR_<field>, Rm",
7619 "{C|S}PSR_f, #expression". */
7621 static void
7622 do_msr (void)
7624 if (do_vfp_nsyn_msr () == SUCCESS)
7625 return;
7627 inst.instruction |= inst.operands[0].imm;
7628 if (inst.operands[1].isreg)
7629 inst.instruction |= inst.operands[1].reg;
7630 else
7632 inst.instruction |= INST_IMMEDIATE;
7633 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7634 inst.reloc.pc_rel = 0;
7638 static void
7639 do_mul (void)
7641 if (!inst.operands[2].present)
7642 inst.operands[2].reg = inst.operands[0].reg;
7643 inst.instruction |= inst.operands[0].reg << 16;
7644 inst.instruction |= inst.operands[1].reg;
7645 inst.instruction |= inst.operands[2].reg << 8;
7647 if (inst.operands[0].reg == inst.operands[1].reg
7648 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7649 as_tsktsk (_("Rd and Rm should be different in mul"));
7652 /* Long Multiply Parser
7653 UMULL RdLo, RdHi, Rm, Rs
7654 SMULL RdLo, RdHi, Rm, Rs
7655 UMLAL RdLo, RdHi, Rm, Rs
7656 SMLAL RdLo, RdHi, Rm, Rs. */
7658 static void
7659 do_mull (void)
7661 inst.instruction |= inst.operands[0].reg << 12;
7662 inst.instruction |= inst.operands[1].reg << 16;
7663 inst.instruction |= inst.operands[2].reg;
7664 inst.instruction |= inst.operands[3].reg << 8;
7666 /* rdhi and rdlo must be different. */
7667 if (inst.operands[0].reg == inst.operands[1].reg)
7668 as_tsktsk (_("rdhi and rdlo must be different"));
7670 /* rdhi, rdlo and rm must all be different before armv6. */
7671 if ((inst.operands[0].reg == inst.operands[2].reg
7672 || inst.operands[1].reg == inst.operands[2].reg)
7673 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7674 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7677 static void
7678 do_nop (void)
7680 if (inst.operands[0].present
7681 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
7683 /* Architectural NOP hints are CPSR sets with no bits selected. */
7684 inst.instruction &= 0xf0000000;
7685 inst.instruction |= 0x0320f000;
7686 if (inst.operands[0].present)
7687 inst.instruction |= inst.operands[0].imm;
7691 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7692 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7693 Condition defaults to COND_ALWAYS.
7694 Error if Rd, Rn or Rm are R15. */
7696 static void
7697 do_pkhbt (void)
7699 inst.instruction |= inst.operands[0].reg << 12;
7700 inst.instruction |= inst.operands[1].reg << 16;
7701 inst.instruction |= inst.operands[2].reg;
7702 if (inst.operands[3].present)
7703 encode_arm_shift (3);
7706 /* ARM V6 PKHTB (Argument Parse). */
7708 static void
7709 do_pkhtb (void)
7711 if (!inst.operands[3].present)
7713 /* If the shift specifier is omitted, turn the instruction
7714 into pkhbt rd, rm, rn. */
7715 inst.instruction &= 0xfff00010;
7716 inst.instruction |= inst.operands[0].reg << 12;
7717 inst.instruction |= inst.operands[1].reg;
7718 inst.instruction |= inst.operands[2].reg << 16;
7720 else
7722 inst.instruction |= inst.operands[0].reg << 12;
7723 inst.instruction |= inst.operands[1].reg << 16;
7724 inst.instruction |= inst.operands[2].reg;
7725 encode_arm_shift (3);
7729 /* ARMv5TE: Preload-Cache
7731 PLD <addr_mode>
7733 Syntactically, like LDR with B=1, W=0, L=1. */
7735 static void
7736 do_pld (void)
7738 constraint (!inst.operands[0].isreg,
7739 _("'[' expected after PLD mnemonic"));
7740 constraint (inst.operands[0].postind,
7741 _("post-indexed expression used in preload instruction"));
7742 constraint (inst.operands[0].writeback,
7743 _("writeback used in preload instruction"));
7744 constraint (!inst.operands[0].preind,
7745 _("unindexed addressing used in preload instruction"));
7746 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7749 /* ARMv7: PLI <addr_mode> */
7750 static void
7751 do_pli (void)
7753 constraint (!inst.operands[0].isreg,
7754 _("'[' expected after PLI mnemonic"));
7755 constraint (inst.operands[0].postind,
7756 _("post-indexed expression used in preload instruction"));
7757 constraint (inst.operands[0].writeback,
7758 _("writeback used in preload instruction"));
7759 constraint (!inst.operands[0].preind,
7760 _("unindexed addressing used in preload instruction"));
7761 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7762 inst.instruction &= ~PRE_INDEX;
7765 static void
7766 do_push_pop (void)
7768 inst.operands[1] = inst.operands[0];
7769 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7770 inst.operands[0].isreg = 1;
7771 inst.operands[0].writeback = 1;
7772 inst.operands[0].reg = REG_SP;
7773 do_ldmstm ();
7776 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7777 word at the specified address and the following word
7778 respectively.
7779 Unconditionally executed.
7780 Error if Rn is R15. */
7782 static void
7783 do_rfe (void)
7785 inst.instruction |= inst.operands[0].reg << 16;
7786 if (inst.operands[0].writeback)
7787 inst.instruction |= WRITE_BACK;
7790 /* ARM V6 ssat (argument parse). */
7792 static void
7793 do_ssat (void)
7795 inst.instruction |= inst.operands[0].reg << 12;
7796 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7797 inst.instruction |= inst.operands[2].reg;
7799 if (inst.operands[3].present)
7800 encode_arm_shift (3);
7803 /* ARM V6 usat (argument parse). */
7805 static void
7806 do_usat (void)
7808 inst.instruction |= inst.operands[0].reg << 12;
7809 inst.instruction |= inst.operands[1].imm << 16;
7810 inst.instruction |= inst.operands[2].reg;
7812 if (inst.operands[3].present)
7813 encode_arm_shift (3);
7816 /* ARM V6 ssat16 (argument parse). */
7818 static void
7819 do_ssat16 (void)
7821 inst.instruction |= inst.operands[0].reg << 12;
7822 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7823 inst.instruction |= inst.operands[2].reg;
7826 static void
7827 do_usat16 (void)
7829 inst.instruction |= inst.operands[0].reg << 12;
7830 inst.instruction |= inst.operands[1].imm << 16;
7831 inst.instruction |= inst.operands[2].reg;
7834 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7835 preserving the other bits.
7837 setend <endian_specifier>, where <endian_specifier> is either
7838 BE or LE. */
7840 static void
7841 do_setend (void)
7843 if (inst.operands[0].imm)
7844 inst.instruction |= 0x200;
7847 static void
7848 do_shift (void)
7850 unsigned int Rm = (inst.operands[1].present
7851 ? inst.operands[1].reg
7852 : inst.operands[0].reg);
7854 inst.instruction |= inst.operands[0].reg << 12;
7855 inst.instruction |= Rm;
7856 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7858 inst.instruction |= inst.operands[2].reg << 8;
7859 inst.instruction |= SHIFT_BY_REG;
7861 else
7862 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7865 static void
7866 do_smc (void)
7868 inst.reloc.type = BFD_RELOC_ARM_SMC;
7869 inst.reloc.pc_rel = 0;
7872 static void
7873 do_swi (void)
7875 inst.reloc.type = BFD_RELOC_ARM_SWI;
7876 inst.reloc.pc_rel = 0;
7879 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7880 SMLAxy{cond} Rd,Rm,Rs,Rn
7881 SMLAWy{cond} Rd,Rm,Rs,Rn
7882 Error if any register is R15. */
7884 static void
7885 do_smla (void)
7887 inst.instruction |= inst.operands[0].reg << 16;
7888 inst.instruction |= inst.operands[1].reg;
7889 inst.instruction |= inst.operands[2].reg << 8;
7890 inst.instruction |= inst.operands[3].reg << 12;
7893 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7894 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7895 Error if any register is R15.
7896 Warning if Rdlo == Rdhi. */
7898 static void
7899 do_smlal (void)
7901 inst.instruction |= inst.operands[0].reg << 12;
7902 inst.instruction |= inst.operands[1].reg << 16;
7903 inst.instruction |= inst.operands[2].reg;
7904 inst.instruction |= inst.operands[3].reg << 8;
7906 if (inst.operands[0].reg == inst.operands[1].reg)
7907 as_tsktsk (_("rdhi and rdlo must be different"));
7910 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7911 SMULxy{cond} Rd,Rm,Rs
7912 Error if any register is R15. */
7914 static void
7915 do_smul (void)
7917 inst.instruction |= inst.operands[0].reg << 16;
7918 inst.instruction |= inst.operands[1].reg;
7919 inst.instruction |= inst.operands[2].reg << 8;
7922 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7923 the same for both ARM and Thumb-2. */
7925 static void
7926 do_srs (void)
7928 int reg;
7930 if (inst.operands[0].present)
7932 reg = inst.operands[0].reg;
7933 constraint (reg != REG_SP, _("SRS base register must be r13"));
7935 else
7936 reg = REG_SP;
7938 inst.instruction |= reg << 16;
7939 inst.instruction |= inst.operands[1].imm;
7940 if (inst.operands[0].writeback || inst.operands[1].writeback)
7941 inst.instruction |= WRITE_BACK;
7944 /* ARM V6 strex (argument parse). */
7946 static void
7947 do_strex (void)
7949 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7950 || inst.operands[2].postind || inst.operands[2].writeback
7951 || inst.operands[2].immisreg || inst.operands[2].shifted
7952 || inst.operands[2].negative
7953 /* See comment in do_ldrex(). */
7954 || (inst.operands[2].reg == REG_PC),
7955 BAD_ADDR_MODE);
7957 constraint (inst.operands[0].reg == inst.operands[1].reg
7958 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7960 constraint (inst.reloc.exp.X_op != O_constant
7961 || inst.reloc.exp.X_add_number != 0,
7962 _("offset must be zero in ARM encoding"));
7964 inst.instruction |= inst.operands[0].reg << 12;
7965 inst.instruction |= inst.operands[1].reg;
7966 inst.instruction |= inst.operands[2].reg << 16;
7967 inst.reloc.type = BFD_RELOC_UNUSED;
7970 static void
7971 do_strexd (void)
7973 constraint (inst.operands[1].reg % 2 != 0,
7974 _("even register required"));
7975 constraint (inst.operands[2].present
7976 && inst.operands[2].reg != inst.operands[1].reg + 1,
7977 _("can only store two consecutive registers"));
7978 /* If op 2 were present and equal to PC, this function wouldn't
7979 have been called in the first place. */
7980 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7982 constraint (inst.operands[0].reg == inst.operands[1].reg
7983 || inst.operands[0].reg == inst.operands[1].reg + 1
7984 || inst.operands[0].reg == inst.operands[3].reg,
7985 BAD_OVERLAP);
7987 inst.instruction |= inst.operands[0].reg << 12;
7988 inst.instruction |= inst.operands[1].reg;
7989 inst.instruction |= inst.operands[3].reg << 16;
7992 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7993 extends it to 32-bits, and adds the result to a value in another
7994 register. You can specify a rotation by 0, 8, 16, or 24 bits
7995 before extracting the 16-bit value.
7996 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7997 Condition defaults to COND_ALWAYS.
7998 Error if any register uses R15. */
8000 static void
8001 do_sxtah (void)
8003 inst.instruction |= inst.operands[0].reg << 12;
8004 inst.instruction |= inst.operands[1].reg << 16;
8005 inst.instruction |= inst.operands[2].reg;
8006 inst.instruction |= inst.operands[3].imm << 10;
8009 /* ARM V6 SXTH.
8011 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8012 Condition defaults to COND_ALWAYS.
8013 Error if any register uses R15. */
8015 static void
8016 do_sxth (void)
8018 inst.instruction |= inst.operands[0].reg << 12;
8019 inst.instruction |= inst.operands[1].reg;
8020 inst.instruction |= inst.operands[2].imm << 10;
8023 /* VFP instructions. In a logical order: SP variant first, monad
8024 before dyad, arithmetic then move then load/store. */
8026 static void
8027 do_vfp_sp_monadic (void)
8029 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8030 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8033 static void
8034 do_vfp_sp_dyadic (void)
8036 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8037 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8038 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8041 static void
8042 do_vfp_sp_compare_z (void)
8044 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8047 static void
8048 do_vfp_dp_sp_cvt (void)
8050 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8051 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8054 static void
8055 do_vfp_sp_dp_cvt (void)
8057 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8058 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8061 static void
8062 do_vfp_reg_from_sp (void)
8064 inst.instruction |= inst.operands[0].reg << 12;
8065 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8068 static void
8069 do_vfp_reg2_from_sp2 (void)
8071 constraint (inst.operands[2].imm != 2,
8072 _("only two consecutive VFP SP registers allowed here"));
8073 inst.instruction |= inst.operands[0].reg << 12;
8074 inst.instruction |= inst.operands[1].reg << 16;
8075 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8078 static void
8079 do_vfp_sp_from_reg (void)
8081 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8082 inst.instruction |= inst.operands[1].reg << 12;
8085 static void
8086 do_vfp_sp2_from_reg2 (void)
8088 constraint (inst.operands[0].imm != 2,
8089 _("only two consecutive VFP SP registers allowed here"));
8090 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8091 inst.instruction |= inst.operands[1].reg << 12;
8092 inst.instruction |= inst.operands[2].reg << 16;
8095 static void
8096 do_vfp_sp_ldst (void)
8098 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8099 encode_arm_cp_address (1, FALSE, TRUE, 0);
8102 static void
8103 do_vfp_dp_ldst (void)
8105 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8106 encode_arm_cp_address (1, FALSE, TRUE, 0);
8110 static void
8111 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8113 if (inst.operands[0].writeback)
8114 inst.instruction |= WRITE_BACK;
8115 else
8116 constraint (ldstm_type != VFP_LDSTMIA,
8117 _("this addressing mode requires base-register writeback"));
8118 inst.instruction |= inst.operands[0].reg << 16;
8119 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8120 inst.instruction |= inst.operands[1].imm;
8123 static void
8124 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8126 int count;
8128 if (inst.operands[0].writeback)
8129 inst.instruction |= WRITE_BACK;
8130 else
8131 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8132 _("this addressing mode requires base-register writeback"));
8134 inst.instruction |= inst.operands[0].reg << 16;
8135 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8137 count = inst.operands[1].imm << 1;
8138 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8139 count += 1;
8141 inst.instruction |= count;
8144 static void
8145 do_vfp_sp_ldstmia (void)
8147 vfp_sp_ldstm (VFP_LDSTMIA);
8150 static void
8151 do_vfp_sp_ldstmdb (void)
8153 vfp_sp_ldstm (VFP_LDSTMDB);
8156 static void
8157 do_vfp_dp_ldstmia (void)
8159 vfp_dp_ldstm (VFP_LDSTMIA);
8162 static void
8163 do_vfp_dp_ldstmdb (void)
8165 vfp_dp_ldstm (VFP_LDSTMDB);
8168 static void
8169 do_vfp_xp_ldstmia (void)
8171 vfp_dp_ldstm (VFP_LDSTMIAX);
8174 static void
8175 do_vfp_xp_ldstmdb (void)
8177 vfp_dp_ldstm (VFP_LDSTMDBX);
8180 static void
8181 do_vfp_dp_rd_rm (void)
8183 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8184 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8187 static void
8188 do_vfp_dp_rn_rd (void)
8190 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8191 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8194 static void
8195 do_vfp_dp_rd_rn (void)
8197 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8198 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8201 static void
8202 do_vfp_dp_rd_rn_rm (void)
8204 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8205 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8206 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8209 static void
8210 do_vfp_dp_rd (void)
8212 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8215 static void
8216 do_vfp_dp_rm_rd_rn (void)
8218 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8219 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8220 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8223 /* VFPv3 instructions. */
8224 static void
8225 do_vfp_sp_const (void)
8227 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8228 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8229 inst.instruction |= (inst.operands[1].imm & 0x0f);
8232 static void
8233 do_vfp_dp_const (void)
8235 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8236 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8237 inst.instruction |= (inst.operands[1].imm & 0x0f);
8240 static void
8241 vfp_conv (int srcsize)
8243 unsigned immbits = srcsize - inst.operands[1].imm;
8244 inst.instruction |= (immbits & 1) << 5;
8245 inst.instruction |= (immbits >> 1);
8248 static void
8249 do_vfp_sp_conv_16 (void)
8251 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8252 vfp_conv (16);
8255 static void
8256 do_vfp_dp_conv_16 (void)
8258 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8259 vfp_conv (16);
8262 static void
8263 do_vfp_sp_conv_32 (void)
8265 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8266 vfp_conv (32);
8269 static void
8270 do_vfp_dp_conv_32 (void)
8272 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8273 vfp_conv (32);
8276 /* FPA instructions. Also in a logical order. */
8278 static void
8279 do_fpa_cmp (void)
8281 inst.instruction |= inst.operands[0].reg << 16;
8282 inst.instruction |= inst.operands[1].reg;
8285 static void
8286 do_fpa_ldmstm (void)
8288 inst.instruction |= inst.operands[0].reg << 12;
8289 switch (inst.operands[1].imm)
8291 case 1: inst.instruction |= CP_T_X; break;
8292 case 2: inst.instruction |= CP_T_Y; break;
8293 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8294 case 4: break;
8295 default: abort ();
8298 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8300 /* The instruction specified "ea" or "fd", so we can only accept
8301 [Rn]{!}. The instruction does not really support stacking or
8302 unstacking, so we have to emulate these by setting appropriate
8303 bits and offsets. */
8304 constraint (inst.reloc.exp.X_op != O_constant
8305 || inst.reloc.exp.X_add_number != 0,
8306 _("this instruction does not support indexing"));
8308 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8309 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8311 if (!(inst.instruction & INDEX_UP))
8312 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8314 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8316 inst.operands[2].preind = 0;
8317 inst.operands[2].postind = 1;
8321 encode_arm_cp_address (2, TRUE, TRUE, 0);
8324 /* iWMMXt instructions: strictly in alphabetical order. */
8326 static void
8327 do_iwmmxt_tandorc (void)
8329 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8332 static void
8333 do_iwmmxt_textrc (void)
8335 inst.instruction |= inst.operands[0].reg << 12;
8336 inst.instruction |= inst.operands[1].imm;
8339 static void
8340 do_iwmmxt_textrm (void)
8342 inst.instruction |= inst.operands[0].reg << 12;
8343 inst.instruction |= inst.operands[1].reg << 16;
8344 inst.instruction |= inst.operands[2].imm;
8347 static void
8348 do_iwmmxt_tinsr (void)
8350 inst.instruction |= inst.operands[0].reg << 16;
8351 inst.instruction |= inst.operands[1].reg << 12;
8352 inst.instruction |= inst.operands[2].imm;
8355 static void
8356 do_iwmmxt_tmia (void)
8358 inst.instruction |= inst.operands[0].reg << 5;
8359 inst.instruction |= inst.operands[1].reg;
8360 inst.instruction |= inst.operands[2].reg << 12;
8363 static void
8364 do_iwmmxt_waligni (void)
8366 inst.instruction |= inst.operands[0].reg << 12;
8367 inst.instruction |= inst.operands[1].reg << 16;
8368 inst.instruction |= inst.operands[2].reg;
8369 inst.instruction |= inst.operands[3].imm << 20;
8372 static void
8373 do_iwmmxt_wmerge (void)
8375 inst.instruction |= inst.operands[0].reg << 12;
8376 inst.instruction |= inst.operands[1].reg << 16;
8377 inst.instruction |= inst.operands[2].reg;
8378 inst.instruction |= inst.operands[3].imm << 21;
8381 static void
8382 do_iwmmxt_wmov (void)
8384 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8385 inst.instruction |= inst.operands[0].reg << 12;
8386 inst.instruction |= inst.operands[1].reg << 16;
8387 inst.instruction |= inst.operands[1].reg;
8390 static void
8391 do_iwmmxt_wldstbh (void)
8393 int reloc;
8394 inst.instruction |= inst.operands[0].reg << 12;
8395 if (thumb_mode)
8396 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8397 else
8398 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8399 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8402 static void
8403 do_iwmmxt_wldstw (void)
8405 /* RIWR_RIWC clears .isreg for a control register. */
8406 if (!inst.operands[0].isreg)
8408 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8409 inst.instruction |= 0xf0000000;
8412 inst.instruction |= inst.operands[0].reg << 12;
8413 encode_arm_cp_address (1, TRUE, TRUE, 0);
8416 static void
8417 do_iwmmxt_wldstd (void)
8419 inst.instruction |= inst.operands[0].reg << 12;
8420 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8421 && inst.operands[1].immisreg)
8423 inst.instruction &= ~0x1a000ff;
8424 inst.instruction |= (0xf << 28);
8425 if (inst.operands[1].preind)
8426 inst.instruction |= PRE_INDEX;
8427 if (!inst.operands[1].negative)
8428 inst.instruction |= INDEX_UP;
8429 if (inst.operands[1].writeback)
8430 inst.instruction |= WRITE_BACK;
8431 inst.instruction |= inst.operands[1].reg << 16;
8432 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8433 inst.instruction |= inst.operands[1].imm;
8435 else
8436 encode_arm_cp_address (1, TRUE, FALSE, 0);
8439 static void
8440 do_iwmmxt_wshufh (void)
8442 inst.instruction |= inst.operands[0].reg << 12;
8443 inst.instruction |= inst.operands[1].reg << 16;
8444 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8445 inst.instruction |= (inst.operands[2].imm & 0x0f);
8448 static void
8449 do_iwmmxt_wzero (void)
8451 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8452 inst.instruction |= inst.operands[0].reg;
8453 inst.instruction |= inst.operands[0].reg << 12;
8454 inst.instruction |= inst.operands[0].reg << 16;
8457 static void
8458 do_iwmmxt_wrwrwr_or_imm5 (void)
8460 if (inst.operands[2].isreg)
8461 do_rd_rn_rm ();
8462 else {
8463 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8464 _("immediate operand requires iWMMXt2"));
8465 do_rd_rn ();
8466 if (inst.operands[2].imm == 0)
8468 switch ((inst.instruction >> 20) & 0xf)
8470 case 4:
8471 case 5:
8472 case 6:
8473 case 7:
8474 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8475 inst.operands[2].imm = 16;
8476 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8477 break;
8478 case 8:
8479 case 9:
8480 case 10:
8481 case 11:
8482 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8483 inst.operands[2].imm = 32;
8484 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8485 break;
8486 case 12:
8487 case 13:
8488 case 14:
8489 case 15:
8491 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8492 unsigned long wrn;
8493 wrn = (inst.instruction >> 16) & 0xf;
8494 inst.instruction &= 0xff0fff0f;
8495 inst.instruction |= wrn;
8496 /* Bail out here; the instruction is now assembled. */
8497 return;
8501 /* Map 32 -> 0, etc. */
8502 inst.operands[2].imm &= 0x1f;
8503 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8507 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8508 operations first, then control, shift, and load/store. */
8510 /* Insns like "foo X,Y,Z". */
8512 static void
8513 do_mav_triple (void)
8515 inst.instruction |= inst.operands[0].reg << 16;
8516 inst.instruction |= inst.operands[1].reg;
8517 inst.instruction |= inst.operands[2].reg << 12;
8520 /* Insns like "foo W,X,Y,Z".
8521 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8523 static void
8524 do_mav_quad (void)
8526 inst.instruction |= inst.operands[0].reg << 5;
8527 inst.instruction |= inst.operands[1].reg << 12;
8528 inst.instruction |= inst.operands[2].reg << 16;
8529 inst.instruction |= inst.operands[3].reg;
8532 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8533 static void
8534 do_mav_dspsc (void)
8536 inst.instruction |= inst.operands[1].reg << 12;
8539 /* Maverick shift immediate instructions.
8540 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8541 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8543 static void
8544 do_mav_shift (void)
8546 int imm = inst.operands[2].imm;
8548 inst.instruction |= inst.operands[0].reg << 12;
8549 inst.instruction |= inst.operands[1].reg << 16;
8551 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8552 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8553 Bit 4 should be 0. */
8554 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8556 inst.instruction |= imm;
8559 /* XScale instructions. Also sorted arithmetic before move. */
8561 /* Xscale multiply-accumulate (argument parse)
8562 MIAcc acc0,Rm,Rs
8563 MIAPHcc acc0,Rm,Rs
8564 MIAxycc acc0,Rm,Rs. */
8566 static void
8567 do_xsc_mia (void)
8569 inst.instruction |= inst.operands[1].reg;
8570 inst.instruction |= inst.operands[2].reg << 12;
8573 /* Xscale move-accumulator-register (argument parse)
8575 MARcc acc0,RdLo,RdHi. */
8577 static void
8578 do_xsc_mar (void)
8580 inst.instruction |= inst.operands[1].reg << 12;
8581 inst.instruction |= inst.operands[2].reg << 16;
8584 /* Xscale move-register-accumulator (argument parse)
8586 MRAcc RdLo,RdHi,acc0. */
8588 static void
8589 do_xsc_mra (void)
8591 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8592 inst.instruction |= inst.operands[0].reg << 12;
8593 inst.instruction |= inst.operands[1].reg << 16;
8596 /* Encoding functions relevant only to Thumb. */
8598 /* inst.operands[i] is a shifted-register operand; encode
8599 it into inst.instruction in the format used by Thumb32. */
8601 static void
8602 encode_thumb32_shifted_operand (int i)
8604 unsigned int value = inst.reloc.exp.X_add_number;
8605 unsigned int shift = inst.operands[i].shift_kind;
8607 constraint (inst.operands[i].immisreg,
8608 _("shift by register not allowed in thumb mode"));
8609 inst.instruction |= inst.operands[i].reg;
8610 if (shift == SHIFT_RRX)
8611 inst.instruction |= SHIFT_ROR << 4;
8612 else
8614 constraint (inst.reloc.exp.X_op != O_constant,
8615 _("expression too complex"));
8617 constraint (value > 32
8618 || (value == 32 && (shift == SHIFT_LSL
8619 || shift == SHIFT_ROR)),
8620 _("shift expression is too large"));
8622 if (value == 0)
8623 shift = SHIFT_LSL;
8624 else if (value == 32)
8625 value = 0;
8627 inst.instruction |= shift << 4;
8628 inst.instruction |= (value & 0x1c) << 10;
8629 inst.instruction |= (value & 0x03) << 6;
8634 /* inst.operands[i] was set up by parse_address. Encode it into a
8635 Thumb32 format load or store instruction. Reject forms that cannot
8636 be used with such instructions. If is_t is true, reject forms that
8637 cannot be used with a T instruction; if is_d is true, reject forms
8638 that cannot be used with a D instruction. */
8640 static void
8641 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8643 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8645 constraint (!inst.operands[i].isreg,
8646 _("Instruction does not support =N addresses"));
8648 inst.instruction |= inst.operands[i].reg << 16;
8649 if (inst.operands[i].immisreg)
8651 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8652 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8653 constraint (inst.operands[i].negative,
8654 _("Thumb does not support negative register indexing"));
8655 constraint (inst.operands[i].postind,
8656 _("Thumb does not support register post-indexing"));
8657 constraint (inst.operands[i].writeback,
8658 _("Thumb does not support register indexing with writeback"));
8659 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8660 _("Thumb supports only LSL in shifted register indexing"));
8662 inst.instruction |= inst.operands[i].imm;
8663 if (inst.operands[i].shifted)
8665 constraint (inst.reloc.exp.X_op != O_constant,
8666 _("expression too complex"));
8667 constraint (inst.reloc.exp.X_add_number < 0
8668 || inst.reloc.exp.X_add_number > 3,
8669 _("shift out of range"));
8670 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8672 inst.reloc.type = BFD_RELOC_UNUSED;
8674 else if (inst.operands[i].preind)
8676 constraint (is_pc && inst.operands[i].writeback,
8677 _("cannot use writeback with PC-relative addressing"));
8678 constraint (is_t && inst.operands[i].writeback,
8679 _("cannot use writeback with this instruction"));
8681 if (is_d)
8683 inst.instruction |= 0x01000000;
8684 if (inst.operands[i].writeback)
8685 inst.instruction |= 0x00200000;
8687 else
8689 inst.instruction |= 0x00000c00;
8690 if (inst.operands[i].writeback)
8691 inst.instruction |= 0x00000100;
8693 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8695 else if (inst.operands[i].postind)
8697 gas_assert (inst.operands[i].writeback);
8698 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8699 constraint (is_t, _("cannot use post-indexing with this instruction"));
8701 if (is_d)
8702 inst.instruction |= 0x00200000;
8703 else
8704 inst.instruction |= 0x00000900;
8705 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8707 else /* unindexed - only for coprocessor */
8708 inst.error = _("instruction does not accept unindexed addressing");
8711 /* Table of Thumb instructions which exist in both 16- and 32-bit
8712 encodings (the latter only in post-V6T2 cores). The index is the
8713 value used in the insns table below. When there is more than one
8714 possible 16-bit encoding for the instruction, this table always
8715 holds variant (1).
8716 Also contains several pseudo-instructions used during relaxation. */
8717 #define T16_32_TAB \
8718 X(adc, 4140, eb400000), \
8719 X(adcs, 4140, eb500000), \
8720 X(add, 1c00, eb000000), \
8721 X(adds, 1c00, eb100000), \
8722 X(addi, 0000, f1000000), \
8723 X(addis, 0000, f1100000), \
8724 X(add_pc,000f, f20f0000), \
8725 X(add_sp,000d, f10d0000), \
8726 X(adr, 000f, f20f0000), \
8727 X(and, 4000, ea000000), \
8728 X(ands, 4000, ea100000), \
8729 X(asr, 1000, fa40f000), \
8730 X(asrs, 1000, fa50f000), \
8731 X(b, e000, f000b000), \
8732 X(bcond, d000, f0008000), \
8733 X(bic, 4380, ea200000), \
8734 X(bics, 4380, ea300000), \
8735 X(cmn, 42c0, eb100f00), \
8736 X(cmp, 2800, ebb00f00), \
8737 X(cpsie, b660, f3af8400), \
8738 X(cpsid, b670, f3af8600), \
8739 X(cpy, 4600, ea4f0000), \
8740 X(dec_sp,80dd, f1ad0d00), \
8741 X(eor, 4040, ea800000), \
8742 X(eors, 4040, ea900000), \
8743 X(inc_sp,00dd, f10d0d00), \
8744 X(ldmia, c800, e8900000), \
8745 X(ldr, 6800, f8500000), \
8746 X(ldrb, 7800, f8100000), \
8747 X(ldrh, 8800, f8300000), \
8748 X(ldrsb, 5600, f9100000), \
8749 X(ldrsh, 5e00, f9300000), \
8750 X(ldr_pc,4800, f85f0000), \
8751 X(ldr_pc2,4800, f85f0000), \
8752 X(ldr_sp,9800, f85d0000), \
8753 X(lsl, 0000, fa00f000), \
8754 X(lsls, 0000, fa10f000), \
8755 X(lsr, 0800, fa20f000), \
8756 X(lsrs, 0800, fa30f000), \
8757 X(mov, 2000, ea4f0000), \
8758 X(movs, 2000, ea5f0000), \
8759 X(mul, 4340, fb00f000), \
8760 X(muls, 4340, ffffffff), /* no 32b muls */ \
8761 X(mvn, 43c0, ea6f0000), \
8762 X(mvns, 43c0, ea7f0000), \
8763 X(neg, 4240, f1c00000), /* rsb #0 */ \
8764 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8765 X(orr, 4300, ea400000), \
8766 X(orrs, 4300, ea500000), \
8767 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8768 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8769 X(rev, ba00, fa90f080), \
8770 X(rev16, ba40, fa90f090), \
8771 X(revsh, bac0, fa90f0b0), \
8772 X(ror, 41c0, fa60f000), \
8773 X(rors, 41c0, fa70f000), \
8774 X(sbc, 4180, eb600000), \
8775 X(sbcs, 4180, eb700000), \
8776 X(stmia, c000, e8800000), \
8777 X(str, 6000, f8400000), \
8778 X(strb, 7000, f8000000), \
8779 X(strh, 8000, f8200000), \
8780 X(str_sp,9000, f84d0000), \
8781 X(sub, 1e00, eba00000), \
8782 X(subs, 1e00, ebb00000), \
8783 X(subi, 8000, f1a00000), \
8784 X(subis, 8000, f1b00000), \
8785 X(sxtb, b240, fa4ff080), \
8786 X(sxth, b200, fa0ff080), \
8787 X(tst, 4200, ea100f00), \
8788 X(uxtb, b2c0, fa5ff080), \
8789 X(uxth, b280, fa1ff080), \
8790 X(nop, bf00, f3af8000), \
8791 X(yield, bf10, f3af8001), \
8792 X(wfe, bf20, f3af8002), \
8793 X(wfi, bf30, f3af8003), \
8794 X(sev, bf40, f3af8004),
8796 /* To catch errors in encoding functions, the codes are all offset by
8797 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8798 as 16-bit instructions. */
8799 #define X(a,b,c) T_MNEM_##a
8800 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8801 #undef X
8803 #define X(a,b,c) 0x##b
8804 static const unsigned short thumb_op16[] = { T16_32_TAB };
8805 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8806 #undef X
8808 #define X(a,b,c) 0x##c
8809 static const unsigned int thumb_op32[] = { T16_32_TAB };
8810 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8811 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8812 #undef X
8813 #undef T16_32_TAB
8815 /* Thumb instruction encoders, in alphabetical order. */
8817 /* ADDW or SUBW. */
8819 static void
8820 do_t_add_sub_w (void)
8822 int Rd, Rn;
8824 Rd = inst.operands[0].reg;
8825 Rn = inst.operands[1].reg;
8827 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this is the
8828 SP-{plus,minute}-immediate form of the instruction. */
8829 reject_bad_reg (Rd);
8831 inst.instruction |= (Rn << 16) | (Rd << 8);
8832 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8835 /* Parse an add or subtract instruction. We get here with inst.instruction
8836 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8838 static void
8839 do_t_add_sub (void)
8841 int Rd, Rs, Rn;
8843 Rd = inst.operands[0].reg;
8844 Rs = (inst.operands[1].present
8845 ? inst.operands[1].reg /* Rd, Rs, foo */
8846 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8848 if (Rd == REG_PC)
8849 set_it_insn_type_last ();
8851 if (unified_syntax)
8853 bfd_boolean flags;
8854 bfd_boolean narrow;
8855 int opcode;
8857 flags = (inst.instruction == T_MNEM_adds
8858 || inst.instruction == T_MNEM_subs);
8859 if (flags)
8860 narrow = !in_it_block ();
8861 else
8862 narrow = in_it_block ();
8863 if (!inst.operands[2].isreg)
8865 int add;
8867 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8869 add = (inst.instruction == T_MNEM_add
8870 || inst.instruction == T_MNEM_adds);
8871 opcode = 0;
8872 if (inst.size_req != 4)
8874 /* Attempt to use a narrow opcode, with relaxation if
8875 appropriate. */
8876 if (Rd == REG_SP && Rs == REG_SP && !flags)
8877 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8878 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8879 opcode = T_MNEM_add_sp;
8880 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8881 opcode = T_MNEM_add_pc;
8882 else if (Rd <= 7 && Rs <= 7 && narrow)
8884 if (flags)
8885 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8886 else
8887 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8889 if (opcode)
8891 inst.instruction = THUMB_OP16(opcode);
8892 inst.instruction |= (Rd << 4) | Rs;
8893 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8894 if (inst.size_req != 2)
8895 inst.relax = opcode;
8897 else
8898 constraint (inst.size_req == 2, BAD_HIREG);
8900 if (inst.size_req == 4
8901 || (inst.size_req != 2 && !opcode))
8903 if (Rd == REG_PC)
8905 constraint (add, BAD_PC);
8906 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8907 _("only SUBS PC, LR, #const allowed"));
8908 constraint (inst.reloc.exp.X_op != O_constant,
8909 _("expression too complex"));
8910 constraint (inst.reloc.exp.X_add_number < 0
8911 || inst.reloc.exp.X_add_number > 0xff,
8912 _("immediate value out of range"));
8913 inst.instruction = T2_SUBS_PC_LR
8914 | inst.reloc.exp.X_add_number;
8915 inst.reloc.type = BFD_RELOC_UNUSED;
8916 return;
8918 else if (Rs == REG_PC)
8920 /* Always use addw/subw. */
8921 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8922 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8924 else
8926 inst.instruction = THUMB_OP32 (inst.instruction);
8927 inst.instruction = (inst.instruction & 0xe1ffffff)
8928 | 0x10000000;
8929 if (flags)
8930 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8931 else
8932 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8934 inst.instruction |= Rd << 8;
8935 inst.instruction |= Rs << 16;
8938 else
8940 Rn = inst.operands[2].reg;
8941 /* See if we can do this with a 16-bit instruction. */
8942 if (!inst.operands[2].shifted && inst.size_req != 4)
8944 if (Rd > 7 || Rs > 7 || Rn > 7)
8945 narrow = FALSE;
8947 if (narrow)
8949 inst.instruction = ((inst.instruction == T_MNEM_adds
8950 || inst.instruction == T_MNEM_add)
8951 ? T_OPCODE_ADD_R3
8952 : T_OPCODE_SUB_R3);
8953 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8954 return;
8957 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
8959 /* Thumb-1 cores (except v6-M) require at least one high
8960 register in a narrow non flag setting add. */
8961 if (Rd > 7 || Rn > 7
8962 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
8963 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
8965 if (Rd == Rn)
8967 Rn = Rs;
8968 Rs = Rd;
8970 inst.instruction = T_OPCODE_ADD_HI;
8971 inst.instruction |= (Rd & 8) << 4;
8972 inst.instruction |= (Rd & 7);
8973 inst.instruction |= Rn << 3;
8974 return;
8979 constraint (Rd == REG_PC, BAD_PC);
8980 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8981 constraint (Rs == REG_PC, BAD_PC);
8982 reject_bad_reg (Rn);
8984 /* If we get here, it can't be done in 16 bits. */
8985 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8986 _("shift must be constant"));
8987 inst.instruction = THUMB_OP32 (inst.instruction);
8988 inst.instruction |= Rd << 8;
8989 inst.instruction |= Rs << 16;
8990 encode_thumb32_shifted_operand (2);
8993 else
8995 constraint (inst.instruction == T_MNEM_adds
8996 || inst.instruction == T_MNEM_subs,
8997 BAD_THUMB32);
8999 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9001 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9002 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9003 BAD_HIREG);
9005 inst.instruction = (inst.instruction == T_MNEM_add
9006 ? 0x0000 : 0x8000);
9007 inst.instruction |= (Rd << 4) | Rs;
9008 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9009 return;
9012 Rn = inst.operands[2].reg;
9013 constraint (inst.operands[2].shifted, _("unshifted register required"));
9015 /* We now have Rd, Rs, and Rn set to registers. */
9016 if (Rd > 7 || Rs > 7 || Rn > 7)
9018 /* Can't do this for SUB. */
9019 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9020 inst.instruction = T_OPCODE_ADD_HI;
9021 inst.instruction |= (Rd & 8) << 4;
9022 inst.instruction |= (Rd & 7);
9023 if (Rs == Rd)
9024 inst.instruction |= Rn << 3;
9025 else if (Rn == Rd)
9026 inst.instruction |= Rs << 3;
9027 else
9028 constraint (1, _("dest must overlap one source register"));
9030 else
9032 inst.instruction = (inst.instruction == T_MNEM_add
9033 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9034 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9039 static void
9040 do_t_adr (void)
9042 unsigned Rd;
9044 Rd = inst.operands[0].reg;
9045 reject_bad_reg (Rd);
9047 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9049 /* Defer to section relaxation. */
9050 inst.relax = inst.instruction;
9051 inst.instruction = THUMB_OP16 (inst.instruction);
9052 inst.instruction |= Rd << 4;
9054 else if (unified_syntax && inst.size_req != 2)
9056 /* Generate a 32-bit opcode. */
9057 inst.instruction = THUMB_OP32 (inst.instruction);
9058 inst.instruction |= Rd << 8;
9059 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9060 inst.reloc.pc_rel = 1;
9062 else
9064 /* Generate a 16-bit opcode. */
9065 inst.instruction = THUMB_OP16 (inst.instruction);
9066 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9067 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9068 inst.reloc.pc_rel = 1;
9070 inst.instruction |= Rd << 4;
9074 /* Arithmetic instructions for which there is just one 16-bit
9075 instruction encoding, and it allows only two low registers.
9076 For maximal compatibility with ARM syntax, we allow three register
9077 operands even when Thumb-32 instructions are not available, as long
9078 as the first two are identical. For instance, both "sbc r0,r1" and
9079 "sbc r0,r0,r1" are allowed. */
9080 static void
9081 do_t_arit3 (void)
9083 int Rd, Rs, Rn;
9085 Rd = inst.operands[0].reg;
9086 Rs = (inst.operands[1].present
9087 ? inst.operands[1].reg /* Rd, Rs, foo */
9088 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9089 Rn = inst.operands[2].reg;
9091 reject_bad_reg (Rd);
9092 reject_bad_reg (Rs);
9093 if (inst.operands[2].isreg)
9094 reject_bad_reg (Rn);
9096 if (unified_syntax)
9098 if (!inst.operands[2].isreg)
9100 /* For an immediate, we always generate a 32-bit opcode;
9101 section relaxation will shrink it later if possible. */
9102 inst.instruction = THUMB_OP32 (inst.instruction);
9103 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9104 inst.instruction |= Rd << 8;
9105 inst.instruction |= Rs << 16;
9106 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9108 else
9110 bfd_boolean narrow;
9112 /* See if we can do this with a 16-bit instruction. */
9113 if (THUMB_SETS_FLAGS (inst.instruction))
9114 narrow = !in_it_block ();
9115 else
9116 narrow = in_it_block ();
9118 if (Rd > 7 || Rn > 7 || Rs > 7)
9119 narrow = FALSE;
9120 if (inst.operands[2].shifted)
9121 narrow = FALSE;
9122 if (inst.size_req == 4)
9123 narrow = FALSE;
9125 if (narrow
9126 && Rd == Rs)
9128 inst.instruction = THUMB_OP16 (inst.instruction);
9129 inst.instruction |= Rd;
9130 inst.instruction |= Rn << 3;
9131 return;
9134 /* If we get here, it can't be done in 16 bits. */
9135 constraint (inst.operands[2].shifted
9136 && inst.operands[2].immisreg,
9137 _("shift must be constant"));
9138 inst.instruction = THUMB_OP32 (inst.instruction);
9139 inst.instruction |= Rd << 8;
9140 inst.instruction |= Rs << 16;
9141 encode_thumb32_shifted_operand (2);
9144 else
9146 /* On its face this is a lie - the instruction does set the
9147 flags. However, the only supported mnemonic in this mode
9148 says it doesn't. */
9149 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9151 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9152 _("unshifted register required"));
9153 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9154 constraint (Rd != Rs,
9155 _("dest and source1 must be the same register"));
9157 inst.instruction = THUMB_OP16 (inst.instruction);
9158 inst.instruction |= Rd;
9159 inst.instruction |= Rn << 3;
9163 /* Similarly, but for instructions where the arithmetic operation is
9164 commutative, so we can allow either of them to be different from
9165 the destination operand in a 16-bit instruction. For instance, all
9166 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9167 accepted. */
9168 static void
9169 do_t_arit3c (void)
9171 int Rd, Rs, Rn;
9173 Rd = inst.operands[0].reg;
9174 Rs = (inst.operands[1].present
9175 ? inst.operands[1].reg /* Rd, Rs, foo */
9176 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9177 Rn = inst.operands[2].reg;
9179 reject_bad_reg (Rd);
9180 reject_bad_reg (Rs);
9181 if (inst.operands[2].isreg)
9182 reject_bad_reg (Rn);
9184 if (unified_syntax)
9186 if (!inst.operands[2].isreg)
9188 /* For an immediate, we always generate a 32-bit opcode;
9189 section relaxation will shrink it later if possible. */
9190 inst.instruction = THUMB_OP32 (inst.instruction);
9191 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9192 inst.instruction |= Rd << 8;
9193 inst.instruction |= Rs << 16;
9194 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9196 else
9198 bfd_boolean narrow;
9200 /* See if we can do this with a 16-bit instruction. */
9201 if (THUMB_SETS_FLAGS (inst.instruction))
9202 narrow = !in_it_block ();
9203 else
9204 narrow = in_it_block ();
9206 if (Rd > 7 || Rn > 7 || Rs > 7)
9207 narrow = FALSE;
9208 if (inst.operands[2].shifted)
9209 narrow = FALSE;
9210 if (inst.size_req == 4)
9211 narrow = FALSE;
9213 if (narrow)
9215 if (Rd == Rs)
9217 inst.instruction = THUMB_OP16 (inst.instruction);
9218 inst.instruction |= Rd;
9219 inst.instruction |= Rn << 3;
9220 return;
9222 if (Rd == Rn)
9224 inst.instruction = THUMB_OP16 (inst.instruction);
9225 inst.instruction |= Rd;
9226 inst.instruction |= Rs << 3;
9227 return;
9231 /* If we get here, it can't be done in 16 bits. */
9232 constraint (inst.operands[2].shifted
9233 && inst.operands[2].immisreg,
9234 _("shift must be constant"));
9235 inst.instruction = THUMB_OP32 (inst.instruction);
9236 inst.instruction |= Rd << 8;
9237 inst.instruction |= Rs << 16;
9238 encode_thumb32_shifted_operand (2);
9241 else
9243 /* On its face this is a lie - the instruction does set the
9244 flags. However, the only supported mnemonic in this mode
9245 says it doesn't. */
9246 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9248 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9249 _("unshifted register required"));
9250 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9252 inst.instruction = THUMB_OP16 (inst.instruction);
9253 inst.instruction |= Rd;
9255 if (Rd == Rs)
9256 inst.instruction |= Rn << 3;
9257 else if (Rd == Rn)
9258 inst.instruction |= Rs << 3;
9259 else
9260 constraint (1, _("dest must overlap one source register"));
9264 static void
9265 do_t_barrier (void)
9267 if (inst.operands[0].present)
9269 constraint ((inst.instruction & 0xf0) != 0x40
9270 && inst.operands[0].imm != 0xf,
9271 _("bad barrier type"));
9272 inst.instruction |= inst.operands[0].imm;
9274 else
9275 inst.instruction |= 0xf;
9278 static void
9279 do_t_bfc (void)
9281 unsigned Rd;
9282 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9283 constraint (msb > 32, _("bit-field extends past end of register"));
9284 /* The instruction encoding stores the LSB and MSB,
9285 not the LSB and width. */
9286 Rd = inst.operands[0].reg;
9287 reject_bad_reg (Rd);
9288 inst.instruction |= Rd << 8;
9289 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9290 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9291 inst.instruction |= msb - 1;
9294 static void
9295 do_t_bfi (void)
9297 int Rd, Rn;
9298 unsigned int msb;
9300 Rd = inst.operands[0].reg;
9301 reject_bad_reg (Rd);
9303 /* #0 in second position is alternative syntax for bfc, which is
9304 the same instruction but with REG_PC in the Rm field. */
9305 if (!inst.operands[1].isreg)
9306 Rn = REG_PC;
9307 else
9309 Rn = inst.operands[1].reg;
9310 reject_bad_reg (Rn);
9313 msb = inst.operands[2].imm + inst.operands[3].imm;
9314 constraint (msb > 32, _("bit-field extends past end of register"));
9315 /* The instruction encoding stores the LSB and MSB,
9316 not the LSB and width. */
9317 inst.instruction |= Rd << 8;
9318 inst.instruction |= Rn << 16;
9319 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9320 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9321 inst.instruction |= msb - 1;
9324 static void
9325 do_t_bfx (void)
9327 unsigned Rd, Rn;
9329 Rd = inst.operands[0].reg;
9330 Rn = inst.operands[1].reg;
9332 reject_bad_reg (Rd);
9333 reject_bad_reg (Rn);
9335 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9336 _("bit-field extends past end of register"));
9337 inst.instruction |= Rd << 8;
9338 inst.instruction |= Rn << 16;
9339 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9340 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9341 inst.instruction |= inst.operands[3].imm - 1;
9344 /* ARM V5 Thumb BLX (argument parse)
9345 BLX <target_addr> which is BLX(1)
9346 BLX <Rm> which is BLX(2)
9347 Unfortunately, there are two different opcodes for this mnemonic.
9348 So, the insns[].value is not used, and the code here zaps values
9349 into inst.instruction.
9351 ??? How to take advantage of the additional two bits of displacement
9352 available in Thumb32 mode? Need new relocation? */
9354 static void
9355 do_t_blx (void)
9357 set_it_insn_type_last ();
9359 if (inst.operands[0].isreg)
9361 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9362 /* We have a register, so this is BLX(2). */
9363 inst.instruction |= inst.operands[0].reg << 3;
9365 else
9367 /* No register. This must be BLX(1). */
9368 inst.instruction = 0xf000e800;
9369 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
9370 inst.reloc.pc_rel = 1;
9374 static void
9375 do_t_branch (void)
9377 int opcode;
9378 int cond;
9380 cond = inst.cond;
9381 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9383 if (in_it_block ())
9385 /* Conditional branches inside IT blocks are encoded as unconditional
9386 branches. */
9387 cond = COND_ALWAYS;
9389 else
9390 cond = inst.cond;
9392 if (cond != COND_ALWAYS)
9393 opcode = T_MNEM_bcond;
9394 else
9395 opcode = inst.instruction;
9397 if (unified_syntax && inst.size_req == 4)
9399 inst.instruction = THUMB_OP32(opcode);
9400 if (cond == COND_ALWAYS)
9401 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9402 else
9404 gas_assert (cond != 0xF);
9405 inst.instruction |= cond << 22;
9406 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9409 else
9411 inst.instruction = THUMB_OP16(opcode);
9412 if (cond == COND_ALWAYS)
9413 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9414 else
9416 inst.instruction |= cond << 8;
9417 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9419 /* Allow section relaxation. */
9420 if (unified_syntax && inst.size_req != 2)
9421 inst.relax = opcode;
9424 inst.reloc.pc_rel = 1;
9427 static void
9428 do_t_bkpt (void)
9430 constraint (inst.cond != COND_ALWAYS,
9431 _("instruction is always unconditional"));
9432 if (inst.operands[0].present)
9434 constraint (inst.operands[0].imm > 255,
9435 _("immediate value out of range"));
9436 inst.instruction |= inst.operands[0].imm;
9437 set_it_insn_type (NEUTRAL_IT_INSN);
9441 static void
9442 do_t_branch23 (void)
9444 set_it_insn_type_last ();
9445 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9446 inst.reloc.pc_rel = 1;
9448 #if defined(OBJ_COFF)
9449 /* If the destination of the branch is a defined symbol which does not have
9450 the THUMB_FUNC attribute, then we must be calling a function which has
9451 the (interfacearm) attribute. We look for the Thumb entry point to that
9452 function and change the branch to refer to that function instead. */
9453 if ( inst.reloc.exp.X_op == O_symbol
9454 && inst.reloc.exp.X_add_symbol != NULL
9455 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9456 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9457 inst.reloc.exp.X_add_symbol =
9458 find_real_start (inst.reloc.exp.X_add_symbol);
9459 #endif
9462 static void
9463 do_t_bx (void)
9465 set_it_insn_type_last ();
9466 inst.instruction |= inst.operands[0].reg << 3;
9467 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9468 should cause the alignment to be checked once it is known. This is
9469 because BX PC only works if the instruction is word aligned. */
9472 static void
9473 do_t_bxj (void)
9475 int Rm;
9477 set_it_insn_type_last ();
9478 Rm = inst.operands[0].reg;
9479 reject_bad_reg (Rm);
9480 inst.instruction |= Rm << 16;
9483 static void
9484 do_t_clz (void)
9486 unsigned Rd;
9487 unsigned Rm;
9489 Rd = inst.operands[0].reg;
9490 Rm = inst.operands[1].reg;
9492 reject_bad_reg (Rd);
9493 reject_bad_reg (Rm);
9495 inst.instruction |= Rd << 8;
9496 inst.instruction |= Rm << 16;
9497 inst.instruction |= Rm;
9500 static void
9501 do_t_cps (void)
9503 set_it_insn_type (OUTSIDE_IT_INSN);
9504 inst.instruction |= inst.operands[0].imm;
9507 static void
9508 do_t_cpsi (void)
9510 set_it_insn_type (OUTSIDE_IT_INSN);
9511 if (unified_syntax
9512 && (inst.operands[1].present || inst.size_req == 4)
9513 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9515 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9516 inst.instruction = 0xf3af8000;
9517 inst.instruction |= imod << 9;
9518 inst.instruction |= inst.operands[0].imm << 5;
9519 if (inst.operands[1].present)
9520 inst.instruction |= 0x100 | inst.operands[1].imm;
9522 else
9524 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9525 && (inst.operands[0].imm & 4),
9526 _("selected processor does not support 'A' form "
9527 "of this instruction"));
9528 constraint (inst.operands[1].present || inst.size_req == 4,
9529 _("Thumb does not support the 2-argument "
9530 "form of this instruction"));
9531 inst.instruction |= inst.operands[0].imm;
9535 /* THUMB CPY instruction (argument parse). */
9537 static void
9538 do_t_cpy (void)
9540 if (inst.size_req == 4)
9542 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9543 inst.instruction |= inst.operands[0].reg << 8;
9544 inst.instruction |= inst.operands[1].reg;
9546 else
9548 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9549 inst.instruction |= (inst.operands[0].reg & 0x7);
9550 inst.instruction |= inst.operands[1].reg << 3;
9554 static void
9555 do_t_cbz (void)
9557 set_it_insn_type (OUTSIDE_IT_INSN);
9558 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9559 inst.instruction |= inst.operands[0].reg;
9560 inst.reloc.pc_rel = 1;
9561 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9564 static void
9565 do_t_dbg (void)
9567 inst.instruction |= inst.operands[0].imm;
9570 static void
9571 do_t_div (void)
9573 unsigned Rd, Rn, Rm;
9575 Rd = inst.operands[0].reg;
9576 Rn = (inst.operands[1].present
9577 ? inst.operands[1].reg : Rd);
9578 Rm = inst.operands[2].reg;
9580 reject_bad_reg (Rd);
9581 reject_bad_reg (Rn);
9582 reject_bad_reg (Rm);
9584 inst.instruction |= Rd << 8;
9585 inst.instruction |= Rn << 16;
9586 inst.instruction |= Rm;
9589 static void
9590 do_t_hint (void)
9592 if (unified_syntax && inst.size_req == 4)
9593 inst.instruction = THUMB_OP32 (inst.instruction);
9594 else
9595 inst.instruction = THUMB_OP16 (inst.instruction);
9598 static void
9599 do_t_it (void)
9601 unsigned int cond = inst.operands[0].imm;
9603 set_it_insn_type (IT_INSN);
9604 now_it.mask = (inst.instruction & 0xf) | 0x10;
9605 now_it.cc = cond;
9607 /* If the condition is a negative condition, invert the mask. */
9608 if ((cond & 0x1) == 0x0)
9610 unsigned int mask = inst.instruction & 0x000f;
9612 if ((mask & 0x7) == 0)
9613 /* no conversion needed */;
9614 else if ((mask & 0x3) == 0)
9615 mask ^= 0x8;
9616 else if ((mask & 0x1) == 0)
9617 mask ^= 0xC;
9618 else
9619 mask ^= 0xE;
9621 inst.instruction &= 0xfff0;
9622 inst.instruction |= mask;
9625 inst.instruction |= cond << 4;
9628 /* Helper function used for both push/pop and ldm/stm. */
9629 static void
9630 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9632 bfd_boolean load;
9634 load = (inst.instruction & (1 << 20)) != 0;
9636 if (mask & (1 << 13))
9637 inst.error = _("SP not allowed in register list");
9638 if (load)
9640 if (mask & (1 << 15))
9642 if (mask & (1 << 14))
9643 inst.error = _("LR and PC should not both be in register list");
9644 else
9645 set_it_insn_type_last ();
9648 if ((mask & (1 << base)) != 0
9649 && writeback)
9650 as_warn (_("base register should not be in register list "
9651 "when written back"));
9653 else
9655 if (mask & (1 << 15))
9656 inst.error = _("PC not allowed in register list");
9658 if (mask & (1 << base))
9659 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9662 if ((mask & (mask - 1)) == 0)
9664 /* Single register transfers implemented as str/ldr. */
9665 if (writeback)
9667 if (inst.instruction & (1 << 23))
9668 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9669 else
9670 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9672 else
9674 if (inst.instruction & (1 << 23))
9675 inst.instruction = 0x00800000; /* ia -> [base] */
9676 else
9677 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9680 inst.instruction |= 0xf8400000;
9681 if (load)
9682 inst.instruction |= 0x00100000;
9684 mask = ffs (mask) - 1;
9685 mask <<= 12;
9687 else if (writeback)
9688 inst.instruction |= WRITE_BACK;
9690 inst.instruction |= mask;
9691 inst.instruction |= base << 16;
9694 static void
9695 do_t_ldmstm (void)
9697 /* This really doesn't seem worth it. */
9698 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9699 _("expression too complex"));
9700 constraint (inst.operands[1].writeback,
9701 _("Thumb load/store multiple does not support {reglist}^"));
9703 if (unified_syntax)
9705 bfd_boolean narrow;
9706 unsigned mask;
9708 narrow = FALSE;
9709 /* See if we can use a 16-bit instruction. */
9710 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9711 && inst.size_req != 4
9712 && !(inst.operands[1].imm & ~0xff))
9714 mask = 1 << inst.operands[0].reg;
9716 if (inst.operands[0].reg <= 7
9717 && (inst.instruction == T_MNEM_stmia
9718 ? inst.operands[0].writeback
9719 : (inst.operands[0].writeback
9720 == !(inst.operands[1].imm & mask))))
9722 if (inst.instruction == T_MNEM_stmia
9723 && (inst.operands[1].imm & mask)
9724 && (inst.operands[1].imm & (mask - 1)))
9725 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9726 inst.operands[0].reg);
9728 inst.instruction = THUMB_OP16 (inst.instruction);
9729 inst.instruction |= inst.operands[0].reg << 8;
9730 inst.instruction |= inst.operands[1].imm;
9731 narrow = TRUE;
9733 else if (inst.operands[0] .reg == REG_SP
9734 && inst.operands[0].writeback)
9736 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9737 ? T_MNEM_push : T_MNEM_pop);
9738 inst.instruction |= inst.operands[1].imm;
9739 narrow = TRUE;
9743 if (!narrow)
9745 if (inst.instruction < 0xffff)
9746 inst.instruction = THUMB_OP32 (inst.instruction);
9748 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9749 inst.operands[0].writeback);
9752 else
9754 constraint (inst.operands[0].reg > 7
9755 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9756 constraint (inst.instruction != T_MNEM_ldmia
9757 && inst.instruction != T_MNEM_stmia,
9758 _("Thumb-2 instruction only valid in unified syntax"));
9759 if (inst.instruction == T_MNEM_stmia)
9761 if (!inst.operands[0].writeback)
9762 as_warn (_("this instruction will write back the base register"));
9763 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9764 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9765 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9766 inst.operands[0].reg);
9768 else
9770 if (!inst.operands[0].writeback
9771 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9772 as_warn (_("this instruction will write back the base register"));
9773 else if (inst.operands[0].writeback
9774 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9775 as_warn (_("this instruction will not write back the base register"));
9778 inst.instruction = THUMB_OP16 (inst.instruction);
9779 inst.instruction |= inst.operands[0].reg << 8;
9780 inst.instruction |= inst.operands[1].imm;
9784 static void
9785 do_t_ldrex (void)
9787 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9788 || inst.operands[1].postind || inst.operands[1].writeback
9789 || inst.operands[1].immisreg || inst.operands[1].shifted
9790 || inst.operands[1].negative,
9791 BAD_ADDR_MODE);
9793 inst.instruction |= inst.operands[0].reg << 12;
9794 inst.instruction |= inst.operands[1].reg << 16;
9795 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9798 static void
9799 do_t_ldrexd (void)
9801 if (!inst.operands[1].present)
9803 constraint (inst.operands[0].reg == REG_LR,
9804 _("r14 not allowed as first register "
9805 "when second register is omitted"));
9806 inst.operands[1].reg = inst.operands[0].reg + 1;
9808 constraint (inst.operands[0].reg == inst.operands[1].reg,
9809 BAD_OVERLAP);
9811 inst.instruction |= inst.operands[0].reg << 12;
9812 inst.instruction |= inst.operands[1].reg << 8;
9813 inst.instruction |= inst.operands[2].reg << 16;
9816 static void
9817 do_t_ldst (void)
9819 unsigned long opcode;
9820 int Rn;
9822 if (inst.operands[0].isreg
9823 && !inst.operands[0].preind
9824 && inst.operands[0].reg == REG_PC)
9825 set_it_insn_type_last ();
9827 opcode = inst.instruction;
9828 if (unified_syntax)
9830 if (!inst.operands[1].isreg)
9832 if (opcode <= 0xffff)
9833 inst.instruction = THUMB_OP32 (opcode);
9834 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9835 return;
9837 if (inst.operands[1].isreg
9838 && !inst.operands[1].writeback
9839 && !inst.operands[1].shifted && !inst.operands[1].postind
9840 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9841 && opcode <= 0xffff
9842 && inst.size_req != 4)
9844 /* Insn may have a 16-bit form. */
9845 Rn = inst.operands[1].reg;
9846 if (inst.operands[1].immisreg)
9848 inst.instruction = THUMB_OP16 (opcode);
9849 /* [Rn, Rik] */
9850 if (Rn <= 7 && inst.operands[1].imm <= 7)
9851 goto op16;
9853 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9854 && opcode != T_MNEM_ldrsb)
9855 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9856 || (Rn == REG_SP && opcode == T_MNEM_str))
9858 /* [Rn, #const] */
9859 if (Rn > 7)
9861 if (Rn == REG_PC)
9863 if (inst.reloc.pc_rel)
9864 opcode = T_MNEM_ldr_pc2;
9865 else
9866 opcode = T_MNEM_ldr_pc;
9868 else
9870 if (opcode == T_MNEM_ldr)
9871 opcode = T_MNEM_ldr_sp;
9872 else
9873 opcode = T_MNEM_str_sp;
9875 inst.instruction = inst.operands[0].reg << 8;
9877 else
9879 inst.instruction = inst.operands[0].reg;
9880 inst.instruction |= inst.operands[1].reg << 3;
9882 inst.instruction |= THUMB_OP16 (opcode);
9883 if (inst.size_req == 2)
9884 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9885 else
9886 inst.relax = opcode;
9887 return;
9890 /* Definitely a 32-bit variant. */
9891 inst.instruction = THUMB_OP32 (opcode);
9892 inst.instruction |= inst.operands[0].reg << 12;
9893 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9894 return;
9897 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9899 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9901 /* Only [Rn,Rm] is acceptable. */
9902 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9903 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9904 || inst.operands[1].postind || inst.operands[1].shifted
9905 || inst.operands[1].negative,
9906 _("Thumb does not support this addressing mode"));
9907 inst.instruction = THUMB_OP16 (inst.instruction);
9908 goto op16;
9911 inst.instruction = THUMB_OP16 (inst.instruction);
9912 if (!inst.operands[1].isreg)
9913 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9914 return;
9916 constraint (!inst.operands[1].preind
9917 || inst.operands[1].shifted
9918 || inst.operands[1].writeback,
9919 _("Thumb does not support this addressing mode"));
9920 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9922 constraint (inst.instruction & 0x0600,
9923 _("byte or halfword not valid for base register"));
9924 constraint (inst.operands[1].reg == REG_PC
9925 && !(inst.instruction & THUMB_LOAD_BIT),
9926 _("r15 based store not allowed"));
9927 constraint (inst.operands[1].immisreg,
9928 _("invalid base register for register offset"));
9930 if (inst.operands[1].reg == REG_PC)
9931 inst.instruction = T_OPCODE_LDR_PC;
9932 else if (inst.instruction & THUMB_LOAD_BIT)
9933 inst.instruction = T_OPCODE_LDR_SP;
9934 else
9935 inst.instruction = T_OPCODE_STR_SP;
9937 inst.instruction |= inst.operands[0].reg << 8;
9938 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9939 return;
9942 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9943 if (!inst.operands[1].immisreg)
9945 /* Immediate offset. */
9946 inst.instruction |= inst.operands[0].reg;
9947 inst.instruction |= inst.operands[1].reg << 3;
9948 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9949 return;
9952 /* Register offset. */
9953 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9954 constraint (inst.operands[1].negative,
9955 _("Thumb does not support this addressing mode"));
9957 op16:
9958 switch (inst.instruction)
9960 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9961 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9962 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9963 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9964 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9965 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9966 case 0x5600 /* ldrsb */:
9967 case 0x5e00 /* ldrsh */: break;
9968 default: abort ();
9971 inst.instruction |= inst.operands[0].reg;
9972 inst.instruction |= inst.operands[1].reg << 3;
9973 inst.instruction |= inst.operands[1].imm << 6;
9976 static void
9977 do_t_ldstd (void)
9979 if (!inst.operands[1].present)
9981 inst.operands[1].reg = inst.operands[0].reg + 1;
9982 constraint (inst.operands[0].reg == REG_LR,
9983 _("r14 not allowed here"));
9985 inst.instruction |= inst.operands[0].reg << 12;
9986 inst.instruction |= inst.operands[1].reg << 8;
9987 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9990 static void
9991 do_t_ldstt (void)
9993 inst.instruction |= inst.operands[0].reg << 12;
9994 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9997 static void
9998 do_t_mla (void)
10000 unsigned Rd, Rn, Rm, Ra;
10002 Rd = inst.operands[0].reg;
10003 Rn = inst.operands[1].reg;
10004 Rm = inst.operands[2].reg;
10005 Ra = inst.operands[3].reg;
10007 reject_bad_reg (Rd);
10008 reject_bad_reg (Rn);
10009 reject_bad_reg (Rm);
10010 reject_bad_reg (Ra);
10012 inst.instruction |= Rd << 8;
10013 inst.instruction |= Rn << 16;
10014 inst.instruction |= Rm;
10015 inst.instruction |= Ra << 12;
10018 static void
10019 do_t_mlal (void)
10021 unsigned RdLo, RdHi, Rn, Rm;
10023 RdLo = inst.operands[0].reg;
10024 RdHi = inst.operands[1].reg;
10025 Rn = inst.operands[2].reg;
10026 Rm = inst.operands[3].reg;
10028 reject_bad_reg (RdLo);
10029 reject_bad_reg (RdHi);
10030 reject_bad_reg (Rn);
10031 reject_bad_reg (Rm);
10033 inst.instruction |= RdLo << 12;
10034 inst.instruction |= RdHi << 8;
10035 inst.instruction |= Rn << 16;
10036 inst.instruction |= Rm;
10039 static void
10040 do_t_mov_cmp (void)
10042 unsigned Rn, Rm;
10044 Rn = inst.operands[0].reg;
10045 Rm = inst.operands[1].reg;
10047 if (Rn == REG_PC)
10048 set_it_insn_type_last ();
10050 if (unified_syntax)
10052 int r0off = (inst.instruction == T_MNEM_mov
10053 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10054 unsigned long opcode;
10055 bfd_boolean narrow;
10056 bfd_boolean low_regs;
10058 low_regs = (Rn <= 7 && Rm <= 7);
10059 opcode = inst.instruction;
10060 if (in_it_block ())
10061 narrow = opcode != T_MNEM_movs;
10062 else
10063 narrow = opcode != T_MNEM_movs || low_regs;
10064 if (inst.size_req == 4
10065 || inst.operands[1].shifted)
10066 narrow = FALSE;
10068 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10069 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10070 && !inst.operands[1].shifted
10071 && Rn == REG_PC
10072 && Rm == REG_LR)
10074 inst.instruction = T2_SUBS_PC_LR;
10075 return;
10078 if (opcode == T_MNEM_cmp)
10080 constraint (Rn == REG_PC, BAD_PC);
10081 if (narrow)
10083 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10084 but valid. */
10085 warn_deprecated_sp (Rm);
10086 /* R15 was documented as a valid choice for Rm in ARMv6,
10087 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10088 tools reject R15, so we do too. */
10089 constraint (Rm == REG_PC, BAD_PC);
10091 else
10092 reject_bad_reg (Rm);
10094 else if (opcode == T_MNEM_mov
10095 || opcode == T_MNEM_movs)
10097 if (inst.operands[1].isreg)
10099 if (opcode == T_MNEM_movs)
10101 reject_bad_reg (Rn);
10102 reject_bad_reg (Rm);
10104 else if ((Rn == REG_SP || Rn == REG_PC)
10105 && (Rm == REG_SP || Rm == REG_PC))
10106 reject_bad_reg (Rm);
10108 else
10109 reject_bad_reg (Rn);
10112 if (!inst.operands[1].isreg)
10114 /* Immediate operand. */
10115 if (!in_it_block () && opcode == T_MNEM_mov)
10116 narrow = 0;
10117 if (low_regs && narrow)
10119 inst.instruction = THUMB_OP16 (opcode);
10120 inst.instruction |= Rn << 8;
10121 if (inst.size_req == 2)
10122 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10123 else
10124 inst.relax = opcode;
10126 else
10128 inst.instruction = THUMB_OP32 (inst.instruction);
10129 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10130 inst.instruction |= Rn << r0off;
10131 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10134 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10135 && (inst.instruction == T_MNEM_mov
10136 || inst.instruction == T_MNEM_movs))
10138 /* Register shifts are encoded as separate shift instructions. */
10139 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10141 if (in_it_block ())
10142 narrow = !flags;
10143 else
10144 narrow = flags;
10146 if (inst.size_req == 4)
10147 narrow = FALSE;
10149 if (!low_regs || inst.operands[1].imm > 7)
10150 narrow = FALSE;
10152 if (Rn != Rm)
10153 narrow = FALSE;
10155 switch (inst.operands[1].shift_kind)
10157 case SHIFT_LSL:
10158 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10159 break;
10160 case SHIFT_ASR:
10161 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10162 break;
10163 case SHIFT_LSR:
10164 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10165 break;
10166 case SHIFT_ROR:
10167 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10168 break;
10169 default:
10170 abort ();
10173 inst.instruction = opcode;
10174 if (narrow)
10176 inst.instruction |= Rn;
10177 inst.instruction |= inst.operands[1].imm << 3;
10179 else
10181 if (flags)
10182 inst.instruction |= CONDS_BIT;
10184 inst.instruction |= Rn << 8;
10185 inst.instruction |= Rm << 16;
10186 inst.instruction |= inst.operands[1].imm;
10189 else if (!narrow)
10191 /* Some mov with immediate shift have narrow variants.
10192 Register shifts are handled above. */
10193 if (low_regs && inst.operands[1].shifted
10194 && (inst.instruction == T_MNEM_mov
10195 || inst.instruction == T_MNEM_movs))
10197 if (in_it_block ())
10198 narrow = (inst.instruction == T_MNEM_mov);
10199 else
10200 narrow = (inst.instruction == T_MNEM_movs);
10203 if (narrow)
10205 switch (inst.operands[1].shift_kind)
10207 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10208 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10209 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10210 default: narrow = FALSE; break;
10214 if (narrow)
10216 inst.instruction |= Rn;
10217 inst.instruction |= Rm << 3;
10218 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10220 else
10222 inst.instruction = THUMB_OP32 (inst.instruction);
10223 inst.instruction |= Rn << r0off;
10224 encode_thumb32_shifted_operand (1);
10227 else
10228 switch (inst.instruction)
10230 case T_MNEM_mov:
10231 inst.instruction = T_OPCODE_MOV_HR;
10232 inst.instruction |= (Rn & 0x8) << 4;
10233 inst.instruction |= (Rn & 0x7);
10234 inst.instruction |= Rm << 3;
10235 break;
10237 case T_MNEM_movs:
10238 /* We know we have low registers at this point.
10239 Generate ADD Rd, Rs, #0. */
10240 inst.instruction = T_OPCODE_ADD_I3;
10241 inst.instruction |= Rn;
10242 inst.instruction |= Rm << 3;
10243 break;
10245 case T_MNEM_cmp:
10246 if (low_regs)
10248 inst.instruction = T_OPCODE_CMP_LR;
10249 inst.instruction |= Rn;
10250 inst.instruction |= Rm << 3;
10252 else
10254 inst.instruction = T_OPCODE_CMP_HR;
10255 inst.instruction |= (Rn & 0x8) << 4;
10256 inst.instruction |= (Rn & 0x7);
10257 inst.instruction |= Rm << 3;
10259 break;
10261 return;
10264 inst.instruction = THUMB_OP16 (inst.instruction);
10265 if (inst.operands[1].isreg)
10267 if (Rn < 8 && Rm < 8)
10269 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10270 since a MOV instruction produces unpredictable results. */
10271 if (inst.instruction == T_OPCODE_MOV_I8)
10272 inst.instruction = T_OPCODE_ADD_I3;
10273 else
10274 inst.instruction = T_OPCODE_CMP_LR;
10276 inst.instruction |= Rn;
10277 inst.instruction |= Rm << 3;
10279 else
10281 if (inst.instruction == T_OPCODE_MOV_I8)
10282 inst.instruction = T_OPCODE_MOV_HR;
10283 else
10284 inst.instruction = T_OPCODE_CMP_HR;
10285 do_t_cpy ();
10288 else
10290 constraint (Rn > 7,
10291 _("only lo regs allowed with immediate"));
10292 inst.instruction |= Rn << 8;
10293 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10297 static void
10298 do_t_mov16 (void)
10300 unsigned Rd;
10301 bfd_vma imm;
10302 bfd_boolean top;
10304 top = (inst.instruction & 0x00800000) != 0;
10305 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10307 constraint (top, _(":lower16: not allowed this instruction"));
10308 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10310 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10312 constraint (!top, _(":upper16: not allowed this instruction"));
10313 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10316 Rd = inst.operands[0].reg;
10317 reject_bad_reg (Rd);
10319 inst.instruction |= Rd << 8;
10320 if (inst.reloc.type == BFD_RELOC_UNUSED)
10322 imm = inst.reloc.exp.X_add_number;
10323 inst.instruction |= (imm & 0xf000) << 4;
10324 inst.instruction |= (imm & 0x0800) << 15;
10325 inst.instruction |= (imm & 0x0700) << 4;
10326 inst.instruction |= (imm & 0x00ff);
10330 static void
10331 do_t_mvn_tst (void)
10333 unsigned Rn, Rm;
10335 Rn = inst.operands[0].reg;
10336 Rm = inst.operands[1].reg;
10338 if (inst.instruction == T_MNEM_cmp
10339 || inst.instruction == T_MNEM_cmn)
10340 constraint (Rn == REG_PC, BAD_PC);
10341 else
10342 reject_bad_reg (Rn);
10343 reject_bad_reg (Rm);
10345 if (unified_syntax)
10347 int r0off = (inst.instruction == T_MNEM_mvn
10348 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
10349 bfd_boolean narrow;
10351 if (inst.size_req == 4
10352 || inst.instruction > 0xffff
10353 || inst.operands[1].shifted
10354 || Rn > 7 || Rm > 7)
10355 narrow = FALSE;
10356 else if (inst.instruction == T_MNEM_cmn)
10357 narrow = TRUE;
10358 else if (THUMB_SETS_FLAGS (inst.instruction))
10359 narrow = !in_it_block ();
10360 else
10361 narrow = in_it_block ();
10363 if (!inst.operands[1].isreg)
10365 /* For an immediate, we always generate a 32-bit opcode;
10366 section relaxation will shrink it later if possible. */
10367 if (inst.instruction < 0xffff)
10368 inst.instruction = THUMB_OP32 (inst.instruction);
10369 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10370 inst.instruction |= Rn << r0off;
10371 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10373 else
10375 /* See if we can do this with a 16-bit instruction. */
10376 if (narrow)
10378 inst.instruction = THUMB_OP16 (inst.instruction);
10379 inst.instruction |= Rn;
10380 inst.instruction |= Rm << 3;
10382 else
10384 constraint (inst.operands[1].shifted
10385 && inst.operands[1].immisreg,
10386 _("shift must be constant"));
10387 if (inst.instruction < 0xffff)
10388 inst.instruction = THUMB_OP32 (inst.instruction);
10389 inst.instruction |= Rn << r0off;
10390 encode_thumb32_shifted_operand (1);
10394 else
10396 constraint (inst.instruction > 0xffff
10397 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
10398 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10399 _("unshifted register required"));
10400 constraint (Rn > 7 || Rm > 7,
10401 BAD_HIREG);
10403 inst.instruction = THUMB_OP16 (inst.instruction);
10404 inst.instruction |= Rn;
10405 inst.instruction |= Rm << 3;
10409 static void
10410 do_t_mrs (void)
10412 unsigned Rd;
10413 int flags;
10415 if (do_vfp_nsyn_mrs () == SUCCESS)
10416 return;
10418 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10419 if (flags == 0)
10421 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10422 _("selected processor does not support "
10423 "requested special purpose register"));
10425 else
10427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10428 _("selected processor does not support "
10429 "requested special purpose register"));
10430 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10431 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10432 _("'CPSR' or 'SPSR' expected"));
10435 Rd = inst.operands[0].reg;
10436 reject_bad_reg (Rd);
10438 inst.instruction |= Rd << 8;
10439 inst.instruction |= (flags & SPSR_BIT) >> 2;
10440 inst.instruction |= inst.operands[1].imm & 0xff;
10443 static void
10444 do_t_msr (void)
10446 int flags;
10447 unsigned Rn;
10449 if (do_vfp_nsyn_msr () == SUCCESS)
10450 return;
10452 constraint (!inst.operands[1].isreg,
10453 _("Thumb encoding does not support an immediate here"));
10454 flags = inst.operands[0].imm;
10455 if (flags & ~0xff)
10457 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10458 _("selected processor does not support "
10459 "requested special purpose register"));
10461 else
10463 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10464 _("selected processor does not support "
10465 "requested special purpose register"));
10466 flags |= PSR_f;
10469 Rn = inst.operands[1].reg;
10470 reject_bad_reg (Rn);
10472 inst.instruction |= (flags & SPSR_BIT) >> 2;
10473 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10474 inst.instruction |= (flags & 0xff);
10475 inst.instruction |= Rn << 16;
10478 static void
10479 do_t_mul (void)
10481 bfd_boolean narrow;
10482 unsigned Rd, Rn, Rm;
10484 if (!inst.operands[2].present)
10485 inst.operands[2].reg = inst.operands[0].reg;
10487 Rd = inst.operands[0].reg;
10488 Rn = inst.operands[1].reg;
10489 Rm = inst.operands[2].reg;
10491 if (unified_syntax)
10493 if (inst.size_req == 4
10494 || (Rd != Rn
10495 && Rd != Rm)
10496 || Rn > 7
10497 || Rm > 7)
10498 narrow = FALSE;
10499 else if (inst.instruction == T_MNEM_muls)
10500 narrow = !in_it_block ();
10501 else
10502 narrow = in_it_block ();
10504 else
10506 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10507 constraint (Rn > 7 || Rm > 7,
10508 BAD_HIREG);
10509 narrow = TRUE;
10512 if (narrow)
10514 /* 16-bit MULS/Conditional MUL. */
10515 inst.instruction = THUMB_OP16 (inst.instruction);
10516 inst.instruction |= Rd;
10518 if (Rd == Rn)
10519 inst.instruction |= Rm << 3;
10520 else if (Rd == Rm)
10521 inst.instruction |= Rn << 3;
10522 else
10523 constraint (1, _("dest must overlap one source register"));
10525 else
10527 constraint (inst.instruction != T_MNEM_mul,
10528 _("Thumb-2 MUL must not set flags"));
10529 /* 32-bit MUL. */
10530 inst.instruction = THUMB_OP32 (inst.instruction);
10531 inst.instruction |= Rd << 8;
10532 inst.instruction |= Rn << 16;
10533 inst.instruction |= Rm << 0;
10535 reject_bad_reg (Rd);
10536 reject_bad_reg (Rn);
10537 reject_bad_reg (Rm);
10541 static void
10542 do_t_mull (void)
10544 unsigned RdLo, RdHi, Rn, Rm;
10546 RdLo = inst.operands[0].reg;
10547 RdHi = inst.operands[1].reg;
10548 Rn = inst.operands[2].reg;
10549 Rm = inst.operands[3].reg;
10551 reject_bad_reg (RdLo);
10552 reject_bad_reg (RdHi);
10553 reject_bad_reg (Rn);
10554 reject_bad_reg (Rm);
10556 inst.instruction |= RdLo << 12;
10557 inst.instruction |= RdHi << 8;
10558 inst.instruction |= Rn << 16;
10559 inst.instruction |= Rm;
10561 if (RdLo == RdHi)
10562 as_tsktsk (_("rdhi and rdlo must be different"));
10565 static void
10566 do_t_nop (void)
10568 set_it_insn_type (NEUTRAL_IT_INSN);
10570 if (unified_syntax)
10572 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10574 inst.instruction = THUMB_OP32 (inst.instruction);
10575 inst.instruction |= inst.operands[0].imm;
10577 else
10579 /* PR9722: Check for Thumb2 availability before
10580 generating a thumb2 nop instruction. */
10581 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
10583 inst.instruction = THUMB_OP16 (inst.instruction);
10584 inst.instruction |= inst.operands[0].imm << 4;
10586 else
10587 inst.instruction = 0x46c0;
10590 else
10592 constraint (inst.operands[0].present,
10593 _("Thumb does not support NOP with hints"));
10594 inst.instruction = 0x46c0;
10598 static void
10599 do_t_neg (void)
10601 if (unified_syntax)
10603 bfd_boolean narrow;
10605 if (THUMB_SETS_FLAGS (inst.instruction))
10606 narrow = !in_it_block ();
10607 else
10608 narrow = in_it_block ();
10609 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10610 narrow = FALSE;
10611 if (inst.size_req == 4)
10612 narrow = FALSE;
10614 if (!narrow)
10616 inst.instruction = THUMB_OP32 (inst.instruction);
10617 inst.instruction |= inst.operands[0].reg << 8;
10618 inst.instruction |= inst.operands[1].reg << 16;
10620 else
10622 inst.instruction = THUMB_OP16 (inst.instruction);
10623 inst.instruction |= inst.operands[0].reg;
10624 inst.instruction |= inst.operands[1].reg << 3;
10627 else
10629 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10630 BAD_HIREG);
10631 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10633 inst.instruction = THUMB_OP16 (inst.instruction);
10634 inst.instruction |= inst.operands[0].reg;
10635 inst.instruction |= inst.operands[1].reg << 3;
10639 static void
10640 do_t_orn (void)
10642 unsigned Rd, Rn;
10644 Rd = inst.operands[0].reg;
10645 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10647 reject_bad_reg (Rd);
10648 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10649 reject_bad_reg (Rn);
10651 inst.instruction |= Rd << 8;
10652 inst.instruction |= Rn << 16;
10654 if (!inst.operands[2].isreg)
10656 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10657 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10659 else
10661 unsigned Rm;
10663 Rm = inst.operands[2].reg;
10664 reject_bad_reg (Rm);
10666 constraint (inst.operands[2].shifted
10667 && inst.operands[2].immisreg,
10668 _("shift must be constant"));
10669 encode_thumb32_shifted_operand (2);
10673 static void
10674 do_t_pkhbt (void)
10676 unsigned Rd, Rn, Rm;
10678 Rd = inst.operands[0].reg;
10679 Rn = inst.operands[1].reg;
10680 Rm = inst.operands[2].reg;
10682 reject_bad_reg (Rd);
10683 reject_bad_reg (Rn);
10684 reject_bad_reg (Rm);
10686 inst.instruction |= Rd << 8;
10687 inst.instruction |= Rn << 16;
10688 inst.instruction |= Rm;
10689 if (inst.operands[3].present)
10691 unsigned int val = inst.reloc.exp.X_add_number;
10692 constraint (inst.reloc.exp.X_op != O_constant,
10693 _("expression too complex"));
10694 inst.instruction |= (val & 0x1c) << 10;
10695 inst.instruction |= (val & 0x03) << 6;
10699 static void
10700 do_t_pkhtb (void)
10702 if (!inst.operands[3].present)
10704 unsigned Rtmp;
10706 inst.instruction &= ~0x00000020;
10708 /* PR 10168. Swap the Rm and Rn registers. */
10709 Rtmp = inst.operands[1].reg;
10710 inst.operands[1].reg = inst.operands[2].reg;
10711 inst.operands[2].reg = Rtmp;
10713 do_t_pkhbt ();
10716 static void
10717 do_t_pld (void)
10719 if (inst.operands[0].immisreg)
10720 reject_bad_reg (inst.operands[0].imm);
10722 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10725 static void
10726 do_t_push_pop (void)
10728 unsigned mask;
10730 constraint (inst.operands[0].writeback,
10731 _("push/pop do not support {reglist}^"));
10732 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10733 _("expression too complex"));
10735 mask = inst.operands[0].imm;
10736 if ((mask & ~0xff) == 0)
10737 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10738 else if ((inst.instruction == T_MNEM_push
10739 && (mask & ~0xff) == 1 << REG_LR)
10740 || (inst.instruction == T_MNEM_pop
10741 && (mask & ~0xff) == 1 << REG_PC))
10743 inst.instruction = THUMB_OP16 (inst.instruction);
10744 inst.instruction |= THUMB_PP_PC_LR;
10745 inst.instruction |= mask & 0xff;
10747 else if (unified_syntax)
10749 inst.instruction = THUMB_OP32 (inst.instruction);
10750 encode_thumb2_ldmstm (13, mask, TRUE);
10752 else
10754 inst.error = _("invalid register list to push/pop instruction");
10755 return;
10759 static void
10760 do_t_rbit (void)
10762 unsigned Rd, Rm;
10764 Rd = inst.operands[0].reg;
10765 Rm = inst.operands[1].reg;
10767 reject_bad_reg (Rd);
10768 reject_bad_reg (Rm);
10770 inst.instruction |= Rd << 8;
10771 inst.instruction |= Rm << 16;
10772 inst.instruction |= Rm;
10775 static void
10776 do_t_rev (void)
10778 unsigned Rd, Rm;
10780 Rd = inst.operands[0].reg;
10781 Rm = inst.operands[1].reg;
10783 reject_bad_reg (Rd);
10784 reject_bad_reg (Rm);
10786 if (Rd <= 7 && Rm <= 7
10787 && inst.size_req != 4)
10789 inst.instruction = THUMB_OP16 (inst.instruction);
10790 inst.instruction |= Rd;
10791 inst.instruction |= Rm << 3;
10793 else if (unified_syntax)
10795 inst.instruction = THUMB_OP32 (inst.instruction);
10796 inst.instruction |= Rd << 8;
10797 inst.instruction |= Rm << 16;
10798 inst.instruction |= Rm;
10800 else
10801 inst.error = BAD_HIREG;
10804 static void
10805 do_t_rrx (void)
10807 unsigned Rd, Rm;
10809 Rd = inst.operands[0].reg;
10810 Rm = inst.operands[1].reg;
10812 reject_bad_reg (Rd);
10813 reject_bad_reg (Rm);
10815 inst.instruction |= Rd << 8;
10816 inst.instruction |= Rm;
10819 static void
10820 do_t_rsb (void)
10822 unsigned Rd, Rs;
10824 Rd = inst.operands[0].reg;
10825 Rs = (inst.operands[1].present
10826 ? inst.operands[1].reg /* Rd, Rs, foo */
10827 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10829 reject_bad_reg (Rd);
10830 reject_bad_reg (Rs);
10831 if (inst.operands[2].isreg)
10832 reject_bad_reg (inst.operands[2].reg);
10834 inst.instruction |= Rd << 8;
10835 inst.instruction |= Rs << 16;
10836 if (!inst.operands[2].isreg)
10838 bfd_boolean narrow;
10840 if ((inst.instruction & 0x00100000) != 0)
10841 narrow = !in_it_block ();
10842 else
10843 narrow = in_it_block ();
10845 if (Rd > 7 || Rs > 7)
10846 narrow = FALSE;
10848 if (inst.size_req == 4 || !unified_syntax)
10849 narrow = FALSE;
10851 if (inst.reloc.exp.X_op != O_constant
10852 || inst.reloc.exp.X_add_number != 0)
10853 narrow = FALSE;
10855 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10856 relaxation, but it doesn't seem worth the hassle. */
10857 if (narrow)
10859 inst.reloc.type = BFD_RELOC_UNUSED;
10860 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10861 inst.instruction |= Rs << 3;
10862 inst.instruction |= Rd;
10864 else
10866 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10867 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10870 else
10871 encode_thumb32_shifted_operand (2);
10874 static void
10875 do_t_setend (void)
10877 set_it_insn_type (OUTSIDE_IT_INSN);
10878 if (inst.operands[0].imm)
10879 inst.instruction |= 0x8;
10882 static void
10883 do_t_shift (void)
10885 if (!inst.operands[1].present)
10886 inst.operands[1].reg = inst.operands[0].reg;
10888 if (unified_syntax)
10890 bfd_boolean narrow;
10891 int shift_kind;
10893 switch (inst.instruction)
10895 case T_MNEM_asr:
10896 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10897 case T_MNEM_lsl:
10898 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10899 case T_MNEM_lsr:
10900 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10901 case T_MNEM_ror:
10902 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10903 default: abort ();
10906 if (THUMB_SETS_FLAGS (inst.instruction))
10907 narrow = !in_it_block ();
10908 else
10909 narrow = in_it_block ();
10910 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10911 narrow = FALSE;
10912 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10913 narrow = FALSE;
10914 if (inst.operands[2].isreg
10915 && (inst.operands[1].reg != inst.operands[0].reg
10916 || inst.operands[2].reg > 7))
10917 narrow = FALSE;
10918 if (inst.size_req == 4)
10919 narrow = FALSE;
10921 reject_bad_reg (inst.operands[0].reg);
10922 reject_bad_reg (inst.operands[1].reg);
10924 if (!narrow)
10926 if (inst.operands[2].isreg)
10928 reject_bad_reg (inst.operands[2].reg);
10929 inst.instruction = THUMB_OP32 (inst.instruction);
10930 inst.instruction |= inst.operands[0].reg << 8;
10931 inst.instruction |= inst.operands[1].reg << 16;
10932 inst.instruction |= inst.operands[2].reg;
10934 else
10936 inst.operands[1].shifted = 1;
10937 inst.operands[1].shift_kind = shift_kind;
10938 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10939 ? T_MNEM_movs : T_MNEM_mov);
10940 inst.instruction |= inst.operands[0].reg << 8;
10941 encode_thumb32_shifted_operand (1);
10942 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10943 inst.reloc.type = BFD_RELOC_UNUSED;
10946 else
10948 if (inst.operands[2].isreg)
10950 switch (shift_kind)
10952 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10953 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10954 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10955 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10956 default: abort ();
10959 inst.instruction |= inst.operands[0].reg;
10960 inst.instruction |= inst.operands[2].reg << 3;
10962 else
10964 switch (shift_kind)
10966 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10967 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10968 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10969 default: abort ();
10971 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10972 inst.instruction |= inst.operands[0].reg;
10973 inst.instruction |= inst.operands[1].reg << 3;
10977 else
10979 constraint (inst.operands[0].reg > 7
10980 || inst.operands[1].reg > 7, BAD_HIREG);
10981 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10983 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10985 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10986 constraint (inst.operands[0].reg != inst.operands[1].reg,
10987 _("source1 and dest must be same register"));
10989 switch (inst.instruction)
10991 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10992 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10993 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10994 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10995 default: abort ();
10998 inst.instruction |= inst.operands[0].reg;
10999 inst.instruction |= inst.operands[2].reg << 3;
11001 else
11003 switch (inst.instruction)
11005 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11006 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11007 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11008 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11009 default: abort ();
11011 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11012 inst.instruction |= inst.operands[0].reg;
11013 inst.instruction |= inst.operands[1].reg << 3;
11018 static void
11019 do_t_simd (void)
11021 unsigned Rd, Rn, Rm;
11023 Rd = inst.operands[0].reg;
11024 Rn = inst.operands[1].reg;
11025 Rm = inst.operands[2].reg;
11027 reject_bad_reg (Rd);
11028 reject_bad_reg (Rn);
11029 reject_bad_reg (Rm);
11031 inst.instruction |= Rd << 8;
11032 inst.instruction |= Rn << 16;
11033 inst.instruction |= Rm;
11036 static void
11037 do_t_smc (void)
11039 unsigned int value = inst.reloc.exp.X_add_number;
11040 constraint (inst.reloc.exp.X_op != O_constant,
11041 _("expression too complex"));
11042 inst.reloc.type = BFD_RELOC_UNUSED;
11043 inst.instruction |= (value & 0xf000) >> 12;
11044 inst.instruction |= (value & 0x0ff0);
11045 inst.instruction |= (value & 0x000f) << 16;
11048 static void
11049 do_t_ssat_usat (int bias)
11051 unsigned Rd, Rn;
11053 Rd = inst.operands[0].reg;
11054 Rn = inst.operands[2].reg;
11056 reject_bad_reg (Rd);
11057 reject_bad_reg (Rn);
11059 inst.instruction |= Rd << 8;
11060 inst.instruction |= inst.operands[1].imm - bias;
11061 inst.instruction |= Rn << 16;
11063 if (inst.operands[3].present)
11065 offsetT shift_amount = inst.reloc.exp.X_add_number;
11067 inst.reloc.type = BFD_RELOC_UNUSED;
11069 constraint (inst.reloc.exp.X_op != O_constant,
11070 _("expression too complex"));
11072 if (shift_amount != 0)
11074 constraint (shift_amount > 31,
11075 _("shift expression is too large"));
11077 if (inst.operands[3].shift_kind == SHIFT_ASR)
11078 inst.instruction |= 0x00200000; /* sh bit. */
11080 inst.instruction |= (shift_amount & 0x1c) << 10;
11081 inst.instruction |= (shift_amount & 0x03) << 6;
11086 static void
11087 do_t_ssat (void)
11089 do_t_ssat_usat (1);
11092 static void
11093 do_t_ssat16 (void)
11095 unsigned Rd, Rn;
11097 Rd = inst.operands[0].reg;
11098 Rn = inst.operands[2].reg;
11100 reject_bad_reg (Rd);
11101 reject_bad_reg (Rn);
11103 inst.instruction |= Rd << 8;
11104 inst.instruction |= inst.operands[1].imm - 1;
11105 inst.instruction |= Rn << 16;
11108 static void
11109 do_t_strex (void)
11111 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11112 || inst.operands[2].postind || inst.operands[2].writeback
11113 || inst.operands[2].immisreg || inst.operands[2].shifted
11114 || inst.operands[2].negative,
11115 BAD_ADDR_MODE);
11117 inst.instruction |= inst.operands[0].reg << 8;
11118 inst.instruction |= inst.operands[1].reg << 12;
11119 inst.instruction |= inst.operands[2].reg << 16;
11120 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11123 static void
11124 do_t_strexd (void)
11126 if (!inst.operands[2].present)
11127 inst.operands[2].reg = inst.operands[1].reg + 1;
11129 constraint (inst.operands[0].reg == inst.operands[1].reg
11130 || inst.operands[0].reg == inst.operands[2].reg
11131 || inst.operands[0].reg == inst.operands[3].reg
11132 || inst.operands[1].reg == inst.operands[2].reg,
11133 BAD_OVERLAP);
11135 inst.instruction |= inst.operands[0].reg;
11136 inst.instruction |= inst.operands[1].reg << 12;
11137 inst.instruction |= inst.operands[2].reg << 8;
11138 inst.instruction |= inst.operands[3].reg << 16;
11141 static void
11142 do_t_sxtah (void)
11144 unsigned Rd, Rn, Rm;
11146 Rd = inst.operands[0].reg;
11147 Rn = inst.operands[1].reg;
11148 Rm = inst.operands[2].reg;
11150 reject_bad_reg (Rd);
11151 reject_bad_reg (Rn);
11152 reject_bad_reg (Rm);
11154 inst.instruction |= Rd << 8;
11155 inst.instruction |= Rn << 16;
11156 inst.instruction |= Rm;
11157 inst.instruction |= inst.operands[3].imm << 4;
11160 static void
11161 do_t_sxth (void)
11163 unsigned Rd, Rm;
11165 Rd = inst.operands[0].reg;
11166 Rm = inst.operands[1].reg;
11168 reject_bad_reg (Rd);
11169 reject_bad_reg (Rm);
11171 if (inst.instruction <= 0xffff
11172 && inst.size_req != 4
11173 && Rd <= 7 && Rm <= 7
11174 && (!inst.operands[2].present || inst.operands[2].imm == 0))
11176 inst.instruction = THUMB_OP16 (inst.instruction);
11177 inst.instruction |= Rd;
11178 inst.instruction |= Rm << 3;
11180 else if (unified_syntax)
11182 if (inst.instruction <= 0xffff)
11183 inst.instruction = THUMB_OP32 (inst.instruction);
11184 inst.instruction |= Rd << 8;
11185 inst.instruction |= Rm;
11186 inst.instruction |= inst.operands[2].imm << 4;
11188 else
11190 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11191 _("Thumb encoding does not support rotation"));
11192 constraint (1, BAD_HIREG);
11196 static void
11197 do_t_swi (void)
11199 inst.reloc.type = BFD_RELOC_ARM_SWI;
11202 static void
11203 do_t_tb (void)
11205 unsigned Rn, Rm;
11206 int half;
11208 half = (inst.instruction & 0x10) != 0;
11209 set_it_insn_type_last ();
11210 constraint (inst.operands[0].immisreg,
11211 _("instruction requires register index"));
11213 Rn = inst.operands[0].reg;
11214 Rm = inst.operands[0].imm;
11216 constraint (Rn == REG_SP, BAD_SP);
11217 reject_bad_reg (Rm);
11219 constraint (!half && inst.operands[0].shifted,
11220 _("instruction does not allow shifted index"));
11221 inst.instruction |= (Rn << 16) | Rm;
11224 static void
11225 do_t_usat (void)
11227 do_t_ssat_usat (0);
11230 static void
11231 do_t_usat16 (void)
11233 unsigned Rd, Rn;
11235 Rd = inst.operands[0].reg;
11236 Rn = inst.operands[2].reg;
11238 reject_bad_reg (Rd);
11239 reject_bad_reg (Rn);
11241 inst.instruction |= Rd << 8;
11242 inst.instruction |= inst.operands[1].imm;
11243 inst.instruction |= Rn << 16;
11246 /* Neon instruction encoder helpers. */
11248 /* Encodings for the different types for various Neon opcodes. */
11250 /* An "invalid" code for the following tables. */
11251 #define N_INV -1u
11253 struct neon_tab_entry
11255 unsigned integer;
11256 unsigned float_or_poly;
11257 unsigned scalar_or_imm;
11260 /* Map overloaded Neon opcodes to their respective encodings. */
11261 #define NEON_ENC_TAB \
11262 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11263 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11264 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11265 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11266 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11267 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11268 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11269 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11270 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11271 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11272 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11273 /* Register variants of the following two instructions are encoded as
11274 vcge / vcgt with the operands reversed. */ \
11275 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11276 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11277 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11278 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11279 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11280 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11281 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11282 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11283 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11284 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11285 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11286 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11287 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11288 X(vshl, 0x0000400, N_INV, 0x0800510), \
11289 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11290 X(vand, 0x0000110, N_INV, 0x0800030), \
11291 X(vbic, 0x0100110, N_INV, 0x0800030), \
11292 X(veor, 0x1000110, N_INV, N_INV), \
11293 X(vorn, 0x0300110, N_INV, 0x0800010), \
11294 X(vorr, 0x0200110, N_INV, 0x0800010), \
11295 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11296 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11297 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11298 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11299 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11300 X(vst1, 0x0000000, 0x0800000, N_INV), \
11301 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11302 X(vst2, 0x0000100, 0x0800100, N_INV), \
11303 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11304 X(vst3, 0x0000200, 0x0800200, N_INV), \
11305 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11306 X(vst4, 0x0000300, 0x0800300, N_INV), \
11307 X(vmovn, 0x1b20200, N_INV, N_INV), \
11308 X(vtrn, 0x1b20080, N_INV, N_INV), \
11309 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11310 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11311 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11312 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
11313 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
11314 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11315 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11316 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11317 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11319 enum neon_opc
11321 #define X(OPC,I,F,S) N_MNEM_##OPC
11322 NEON_ENC_TAB
11323 #undef X
11326 static const struct neon_tab_entry neon_enc_tab[] =
11328 #define X(OPC,I,F,S) { (I), (F), (S) }
11329 NEON_ENC_TAB
11330 #undef X
11333 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11334 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11335 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11336 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11337 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11338 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11339 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11340 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11341 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11342 #define NEON_ENC_SINGLE(X) \
11343 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11344 #define NEON_ENC_DOUBLE(X) \
11345 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11347 /* Define shapes for instruction operands. The following mnemonic characters
11348 are used in this table:
11350 F - VFP S<n> register
11351 D - Neon D<n> register
11352 Q - Neon Q<n> register
11353 I - Immediate
11354 S - Scalar
11355 R - ARM register
11356 L - D<n> register list
11358 This table is used to generate various data:
11359 - enumerations of the form NS_DDR to be used as arguments to
11360 neon_select_shape.
11361 - a table classifying shapes into single, double, quad, mixed.
11362 - a table used to drive neon_select_shape. */
11364 #define NEON_SHAPE_DEF \
11365 X(3, (D, D, D), DOUBLE), \
11366 X(3, (Q, Q, Q), QUAD), \
11367 X(3, (D, D, I), DOUBLE), \
11368 X(3, (Q, Q, I), QUAD), \
11369 X(3, (D, D, S), DOUBLE), \
11370 X(3, (Q, Q, S), QUAD), \
11371 X(2, (D, D), DOUBLE), \
11372 X(2, (Q, Q), QUAD), \
11373 X(2, (D, S), DOUBLE), \
11374 X(2, (Q, S), QUAD), \
11375 X(2, (D, R), DOUBLE), \
11376 X(2, (Q, R), QUAD), \
11377 X(2, (D, I), DOUBLE), \
11378 X(2, (Q, I), QUAD), \
11379 X(3, (D, L, D), DOUBLE), \
11380 X(2, (D, Q), MIXED), \
11381 X(2, (Q, D), MIXED), \
11382 X(3, (D, Q, I), MIXED), \
11383 X(3, (Q, D, I), MIXED), \
11384 X(3, (Q, D, D), MIXED), \
11385 X(3, (D, Q, Q), MIXED), \
11386 X(3, (Q, Q, D), MIXED), \
11387 X(3, (Q, D, S), MIXED), \
11388 X(3, (D, Q, S), MIXED), \
11389 X(4, (D, D, D, I), DOUBLE), \
11390 X(4, (Q, Q, Q, I), QUAD), \
11391 X(2, (F, F), SINGLE), \
11392 X(3, (F, F, F), SINGLE), \
11393 X(2, (F, I), SINGLE), \
11394 X(2, (F, D), MIXED), \
11395 X(2, (D, F), MIXED), \
11396 X(3, (F, F, I), MIXED), \
11397 X(4, (R, R, F, F), SINGLE), \
11398 X(4, (F, F, R, R), SINGLE), \
11399 X(3, (D, R, R), DOUBLE), \
11400 X(3, (R, R, D), DOUBLE), \
11401 X(2, (S, R), SINGLE), \
11402 X(2, (R, S), SINGLE), \
11403 X(2, (F, R), SINGLE), \
11404 X(2, (R, F), SINGLE)
11406 #define S2(A,B) NS_##A##B
11407 #define S3(A,B,C) NS_##A##B##C
11408 #define S4(A,B,C,D) NS_##A##B##C##D
11410 #define X(N, L, C) S##N L
11412 enum neon_shape
11414 NEON_SHAPE_DEF,
11415 NS_NULL
11418 #undef X
11419 #undef S2
11420 #undef S3
11421 #undef S4
11423 enum neon_shape_class
11425 SC_SINGLE,
11426 SC_DOUBLE,
11427 SC_QUAD,
11428 SC_MIXED
11431 #define X(N, L, C) SC_##C
11433 static enum neon_shape_class neon_shape_class[] =
11435 NEON_SHAPE_DEF
11438 #undef X
11440 enum neon_shape_el
11442 SE_F,
11443 SE_D,
11444 SE_Q,
11445 SE_I,
11446 SE_S,
11447 SE_R,
11448 SE_L
11451 /* Register widths of above. */
11452 static unsigned neon_shape_el_size[] =
11456 128,
11463 struct neon_shape_info
11465 unsigned els;
11466 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11469 #define S2(A,B) { SE_##A, SE_##B }
11470 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11471 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11473 #define X(N, L, C) { N, S##N L }
11475 static struct neon_shape_info neon_shape_tab[] =
11477 NEON_SHAPE_DEF
11480 #undef X
11481 #undef S2
11482 #undef S3
11483 #undef S4
11485 /* Bit masks used in type checking given instructions.
11486 'N_EQK' means the type must be the same as (or based on in some way) the key
11487 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11488 set, various other bits can be set as well in order to modify the meaning of
11489 the type constraint. */
11491 enum neon_type_mask
11493 N_S8 = 0x0000001,
11494 N_S16 = 0x0000002,
11495 N_S32 = 0x0000004,
11496 N_S64 = 0x0000008,
11497 N_U8 = 0x0000010,
11498 N_U16 = 0x0000020,
11499 N_U32 = 0x0000040,
11500 N_U64 = 0x0000080,
11501 N_I8 = 0x0000100,
11502 N_I16 = 0x0000200,
11503 N_I32 = 0x0000400,
11504 N_I64 = 0x0000800,
11505 N_8 = 0x0001000,
11506 N_16 = 0x0002000,
11507 N_32 = 0x0004000,
11508 N_64 = 0x0008000,
11509 N_P8 = 0x0010000,
11510 N_P16 = 0x0020000,
11511 N_F16 = 0x0040000,
11512 N_F32 = 0x0080000,
11513 N_F64 = 0x0100000,
11514 N_KEY = 0x1000000, /* Key element (main type specifier). */
11515 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
11516 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11517 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
11518 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
11519 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11520 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11521 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11522 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
11523 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11524 N_UTYP = 0,
11525 N_MAX_NONSPECIAL = N_F64
11528 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11530 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11531 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11532 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11533 #define N_SUF_32 (N_SU_32 | N_F32)
11534 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11535 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11537 /* Pass this as the first type argument to neon_check_type to ignore types
11538 altogether. */
11539 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11541 /* Select a "shape" for the current instruction (describing register types or
11542 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11543 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11544 function of operand parsing, so this function doesn't need to be called.
11545 Shapes should be listed in order of decreasing length. */
11547 static enum neon_shape
11548 neon_select_shape (enum neon_shape shape, ...)
11550 va_list ap;
11551 enum neon_shape first_shape = shape;
11553 /* Fix missing optional operands. FIXME: we don't know at this point how
11554 many arguments we should have, so this makes the assumption that we have
11555 > 1. This is true of all current Neon opcodes, I think, but may not be
11556 true in the future. */
11557 if (!inst.operands[1].present)
11558 inst.operands[1] = inst.operands[0];
11560 va_start (ap, shape);
11562 for (; shape != NS_NULL; shape = va_arg (ap, int))
11564 unsigned j;
11565 int matches = 1;
11567 for (j = 0; j < neon_shape_tab[shape].els; j++)
11569 if (!inst.operands[j].present)
11571 matches = 0;
11572 break;
11575 switch (neon_shape_tab[shape].el[j])
11577 case SE_F:
11578 if (!(inst.operands[j].isreg
11579 && inst.operands[j].isvec
11580 && inst.operands[j].issingle
11581 && !inst.operands[j].isquad))
11582 matches = 0;
11583 break;
11585 case SE_D:
11586 if (!(inst.operands[j].isreg
11587 && inst.operands[j].isvec
11588 && !inst.operands[j].isquad
11589 && !inst.operands[j].issingle))
11590 matches = 0;
11591 break;
11593 case SE_R:
11594 if (!(inst.operands[j].isreg
11595 && !inst.operands[j].isvec))
11596 matches = 0;
11597 break;
11599 case SE_Q:
11600 if (!(inst.operands[j].isreg
11601 && inst.operands[j].isvec
11602 && inst.operands[j].isquad
11603 && !inst.operands[j].issingle))
11604 matches = 0;
11605 break;
11607 case SE_I:
11608 if (!(!inst.operands[j].isreg
11609 && !inst.operands[j].isscalar))
11610 matches = 0;
11611 break;
11613 case SE_S:
11614 if (!(!inst.operands[j].isreg
11615 && inst.operands[j].isscalar))
11616 matches = 0;
11617 break;
11619 case SE_L:
11620 break;
11623 if (matches)
11624 break;
11627 va_end (ap);
11629 if (shape == NS_NULL && first_shape != NS_NULL)
11630 first_error (_("invalid instruction shape"));
11632 return shape;
11635 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11636 means the Q bit should be set). */
11638 static int
11639 neon_quad (enum neon_shape shape)
11641 return neon_shape_class[shape] == SC_QUAD;
11644 static void
11645 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11646 unsigned *g_size)
11648 /* Allow modification to be made to types which are constrained to be
11649 based on the key element, based on bits set alongside N_EQK. */
11650 if ((typebits & N_EQK) != 0)
11652 if ((typebits & N_HLF) != 0)
11653 *g_size /= 2;
11654 else if ((typebits & N_DBL) != 0)
11655 *g_size *= 2;
11656 if ((typebits & N_SGN) != 0)
11657 *g_type = NT_signed;
11658 else if ((typebits & N_UNS) != 0)
11659 *g_type = NT_unsigned;
11660 else if ((typebits & N_INT) != 0)
11661 *g_type = NT_integer;
11662 else if ((typebits & N_FLT) != 0)
11663 *g_type = NT_float;
11664 else if ((typebits & N_SIZ) != 0)
11665 *g_type = NT_untyped;
11669 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11670 operand type, i.e. the single type specified in a Neon instruction when it
11671 is the only one given. */
11673 static struct neon_type_el
11674 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11676 struct neon_type_el dest = *key;
11678 gas_assert ((thisarg & N_EQK) != 0);
11680 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11682 return dest;
11685 /* Convert Neon type and size into compact bitmask representation. */
11687 static enum neon_type_mask
11688 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11690 switch (type)
11692 case NT_untyped:
11693 switch (size)
11695 case 8: return N_8;
11696 case 16: return N_16;
11697 case 32: return N_32;
11698 case 64: return N_64;
11699 default: ;
11701 break;
11703 case NT_integer:
11704 switch (size)
11706 case 8: return N_I8;
11707 case 16: return N_I16;
11708 case 32: return N_I32;
11709 case 64: return N_I64;
11710 default: ;
11712 break;
11714 case NT_float:
11715 switch (size)
11717 case 16: return N_F16;
11718 case 32: return N_F32;
11719 case 64: return N_F64;
11720 default: ;
11722 break;
11724 case NT_poly:
11725 switch (size)
11727 case 8: return N_P8;
11728 case 16: return N_P16;
11729 default: ;
11731 break;
11733 case NT_signed:
11734 switch (size)
11736 case 8: return N_S8;
11737 case 16: return N_S16;
11738 case 32: return N_S32;
11739 case 64: return N_S64;
11740 default: ;
11742 break;
11744 case NT_unsigned:
11745 switch (size)
11747 case 8: return N_U8;
11748 case 16: return N_U16;
11749 case 32: return N_U32;
11750 case 64: return N_U64;
11751 default: ;
11753 break;
11755 default: ;
11758 return N_UTYP;
11761 /* Convert compact Neon bitmask type representation to a type and size. Only
11762 handles the case where a single bit is set in the mask. */
11764 static int
11765 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11766 enum neon_type_mask mask)
11768 if ((mask & N_EQK) != 0)
11769 return FAIL;
11771 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11772 *size = 8;
11773 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11774 *size = 16;
11775 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11776 *size = 32;
11777 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11778 *size = 64;
11779 else
11780 return FAIL;
11782 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11783 *type = NT_signed;
11784 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11785 *type = NT_unsigned;
11786 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11787 *type = NT_integer;
11788 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11789 *type = NT_untyped;
11790 else if ((mask & (N_P8 | N_P16)) != 0)
11791 *type = NT_poly;
11792 else if ((mask & (N_F32 | N_F64)) != 0)
11793 *type = NT_float;
11794 else
11795 return FAIL;
11797 return SUCCESS;
11800 /* Modify a bitmask of allowed types. This is only needed for type
11801 relaxation. */
11803 static unsigned
11804 modify_types_allowed (unsigned allowed, unsigned mods)
11806 unsigned size;
11807 enum neon_el_type type;
11808 unsigned destmask;
11809 int i;
11811 destmask = 0;
11813 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11815 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
11817 neon_modify_type_size (mods, &type, &size);
11818 destmask |= type_chk_of_el_type (type, size);
11822 return destmask;
11825 /* Check type and return type classification.
11826 The manual states (paraphrase): If one datatype is given, it indicates the
11827 type given in:
11828 - the second operand, if there is one
11829 - the operand, if there is no second operand
11830 - the result, if there are no operands.
11831 This isn't quite good enough though, so we use a concept of a "key" datatype
11832 which is set on a per-instruction basis, which is the one which matters when
11833 only one data type is written.
11834 Note: this function has side-effects (e.g. filling in missing operands). All
11835 Neon instructions should call it before performing bit encoding. */
11837 static struct neon_type_el
11838 neon_check_type (unsigned els, enum neon_shape ns, ...)
11840 va_list ap;
11841 unsigned i, pass, key_el = 0;
11842 unsigned types[NEON_MAX_TYPE_ELS];
11843 enum neon_el_type k_type = NT_invtype;
11844 unsigned k_size = -1u;
11845 struct neon_type_el badtype = {NT_invtype, -1};
11846 unsigned key_allowed = 0;
11848 /* Optional registers in Neon instructions are always (not) in operand 1.
11849 Fill in the missing operand here, if it was omitted. */
11850 if (els > 1 && !inst.operands[1].present)
11851 inst.operands[1] = inst.operands[0];
11853 /* Suck up all the varargs. */
11854 va_start (ap, ns);
11855 for (i = 0; i < els; i++)
11857 unsigned thisarg = va_arg (ap, unsigned);
11858 if (thisarg == N_IGNORE_TYPE)
11860 va_end (ap);
11861 return badtype;
11863 types[i] = thisarg;
11864 if ((thisarg & N_KEY) != 0)
11865 key_el = i;
11867 va_end (ap);
11869 if (inst.vectype.elems > 0)
11870 for (i = 0; i < els; i++)
11871 if (inst.operands[i].vectype.type != NT_invtype)
11873 first_error (_("types specified in both the mnemonic and operands"));
11874 return badtype;
11877 /* Duplicate inst.vectype elements here as necessary.
11878 FIXME: No idea if this is exactly the same as the ARM assembler,
11879 particularly when an insn takes one register and one non-register
11880 operand. */
11881 if (inst.vectype.elems == 1 && els > 1)
11883 unsigned j;
11884 inst.vectype.elems = els;
11885 inst.vectype.el[key_el] = inst.vectype.el[0];
11886 for (j = 0; j < els; j++)
11887 if (j != key_el)
11888 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11889 types[j]);
11891 else if (inst.vectype.elems == 0 && els > 0)
11893 unsigned j;
11894 /* No types were given after the mnemonic, so look for types specified
11895 after each operand. We allow some flexibility here; as long as the
11896 "key" operand has a type, we can infer the others. */
11897 for (j = 0; j < els; j++)
11898 if (inst.operands[j].vectype.type != NT_invtype)
11899 inst.vectype.el[j] = inst.operands[j].vectype;
11901 if (inst.operands[key_el].vectype.type != NT_invtype)
11903 for (j = 0; j < els; j++)
11904 if (inst.operands[j].vectype.type == NT_invtype)
11905 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11906 types[j]);
11908 else
11910 first_error (_("operand types can't be inferred"));
11911 return badtype;
11914 else if (inst.vectype.elems != els)
11916 first_error (_("type specifier has the wrong number of parts"));
11917 return badtype;
11920 for (pass = 0; pass < 2; pass++)
11922 for (i = 0; i < els; i++)
11924 unsigned thisarg = types[i];
11925 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
11926 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
11927 enum neon_el_type g_type = inst.vectype.el[i].type;
11928 unsigned g_size = inst.vectype.el[i].size;
11930 /* Decay more-specific signed & unsigned types to sign-insensitive
11931 integer types if sign-specific variants are unavailable. */
11932 if ((g_type == NT_signed || g_type == NT_unsigned)
11933 && (types_allowed & N_SU_ALL) == 0)
11934 g_type = NT_integer;
11936 /* If only untyped args are allowed, decay any more specific types to
11937 them. Some instructions only care about signs for some element
11938 sizes, so handle that properly. */
11939 if ((g_size == 8 && (types_allowed & N_8) != 0)
11940 || (g_size == 16 && (types_allowed & N_16) != 0)
11941 || (g_size == 32 && (types_allowed & N_32) != 0)
11942 || (g_size == 64 && (types_allowed & N_64) != 0))
11943 g_type = NT_untyped;
11945 if (pass == 0)
11947 if ((thisarg & N_KEY) != 0)
11949 k_type = g_type;
11950 k_size = g_size;
11951 key_allowed = thisarg & ~N_KEY;
11954 else
11956 if ((thisarg & N_VFP) != 0)
11958 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11959 unsigned regwidth = neon_shape_el_size[regshape], match;
11961 /* In VFP mode, operands must match register widths. If we
11962 have a key operand, use its width, else use the width of
11963 the current operand. */
11964 if (k_size != -1u)
11965 match = k_size;
11966 else
11967 match = g_size;
11969 if (regwidth != match)
11971 first_error (_("operand size must match register width"));
11972 return badtype;
11976 if ((thisarg & N_EQK) == 0)
11978 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11980 if ((given_type & types_allowed) == 0)
11982 first_error (_("bad type in Neon instruction"));
11983 return badtype;
11986 else
11988 enum neon_el_type mod_k_type = k_type;
11989 unsigned mod_k_size = k_size;
11990 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11991 if (g_type != mod_k_type || g_size != mod_k_size)
11993 first_error (_("inconsistent types in Neon instruction"));
11994 return badtype;
12001 return inst.vectype.el[key_el];
12004 /* Neon-style VFP instruction forwarding. */
12006 /* Thumb VFP instructions have 0xE in the condition field. */
12008 static void
12009 do_vfp_cond_or_thumb (void)
12011 if (thumb_mode)
12012 inst.instruction |= 0xe0000000;
12013 else
12014 inst.instruction |= inst.cond << 28;
12017 /* Look up and encode a simple mnemonic, for use as a helper function for the
12018 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12019 etc. It is assumed that operand parsing has already been done, and that the
12020 operands are in the form expected by the given opcode (this isn't necessarily
12021 the same as the form in which they were parsed, hence some massaging must
12022 take place before this function is called).
12023 Checks current arch version against that in the looked-up opcode. */
12025 static void
12026 do_vfp_nsyn_opcode (const char *opname)
12028 const struct asm_opcode *opcode;
12030 opcode = hash_find (arm_ops_hsh, opname);
12032 if (!opcode)
12033 abort ();
12035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12036 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12037 _(BAD_FPU));
12039 if (thumb_mode)
12041 inst.instruction = opcode->tvalue;
12042 opcode->tencode ();
12044 else
12046 inst.instruction = (inst.cond << 28) | opcode->avalue;
12047 opcode->aencode ();
12051 static void
12052 do_vfp_nsyn_add_sub (enum neon_shape rs)
12054 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12056 if (rs == NS_FFF)
12058 if (is_add)
12059 do_vfp_nsyn_opcode ("fadds");
12060 else
12061 do_vfp_nsyn_opcode ("fsubs");
12063 else
12065 if (is_add)
12066 do_vfp_nsyn_opcode ("faddd");
12067 else
12068 do_vfp_nsyn_opcode ("fsubd");
12072 /* Check operand types to see if this is a VFP instruction, and if so call
12073 PFN (). */
12075 static int
12076 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12078 enum neon_shape rs;
12079 struct neon_type_el et;
12081 switch (args)
12083 case 2:
12084 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12085 et = neon_check_type (2, rs,
12086 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12087 break;
12089 case 3:
12090 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12091 et = neon_check_type (3, rs,
12092 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12093 break;
12095 default:
12096 abort ();
12099 if (et.type != NT_invtype)
12101 pfn (rs);
12102 return SUCCESS;
12104 else
12105 inst.error = NULL;
12107 return FAIL;
12110 static void
12111 do_vfp_nsyn_mla_mls (enum neon_shape rs)
12113 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12115 if (rs == NS_FFF)
12117 if (is_mla)
12118 do_vfp_nsyn_opcode ("fmacs");
12119 else
12120 do_vfp_nsyn_opcode ("fmscs");
12122 else
12124 if (is_mla)
12125 do_vfp_nsyn_opcode ("fmacd");
12126 else
12127 do_vfp_nsyn_opcode ("fmscd");
12131 static void
12132 do_vfp_nsyn_mul (enum neon_shape rs)
12134 if (rs == NS_FFF)
12135 do_vfp_nsyn_opcode ("fmuls");
12136 else
12137 do_vfp_nsyn_opcode ("fmuld");
12140 static void
12141 do_vfp_nsyn_abs_neg (enum neon_shape rs)
12143 int is_neg = (inst.instruction & 0x80) != 0;
12144 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12146 if (rs == NS_FF)
12148 if (is_neg)
12149 do_vfp_nsyn_opcode ("fnegs");
12150 else
12151 do_vfp_nsyn_opcode ("fabss");
12153 else
12155 if (is_neg)
12156 do_vfp_nsyn_opcode ("fnegd");
12157 else
12158 do_vfp_nsyn_opcode ("fabsd");
12162 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12163 insns belong to Neon, and are handled elsewhere. */
12165 static void
12166 do_vfp_nsyn_ldm_stm (int is_dbmode)
12168 int is_ldm = (inst.instruction & (1 << 20)) != 0;
12169 if (is_ldm)
12171 if (is_dbmode)
12172 do_vfp_nsyn_opcode ("fldmdbs");
12173 else
12174 do_vfp_nsyn_opcode ("fldmias");
12176 else
12178 if (is_dbmode)
12179 do_vfp_nsyn_opcode ("fstmdbs");
12180 else
12181 do_vfp_nsyn_opcode ("fstmias");
12185 static void
12186 do_vfp_nsyn_sqrt (void)
12188 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12189 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12191 if (rs == NS_FF)
12192 do_vfp_nsyn_opcode ("fsqrts");
12193 else
12194 do_vfp_nsyn_opcode ("fsqrtd");
12197 static void
12198 do_vfp_nsyn_div (void)
12200 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12201 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12202 N_F32 | N_F64 | N_KEY | N_VFP);
12204 if (rs == NS_FFF)
12205 do_vfp_nsyn_opcode ("fdivs");
12206 else
12207 do_vfp_nsyn_opcode ("fdivd");
12210 static void
12211 do_vfp_nsyn_nmul (void)
12213 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12214 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12215 N_F32 | N_F64 | N_KEY | N_VFP);
12217 if (rs == NS_FFF)
12219 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
12220 do_vfp_sp_dyadic ();
12222 else
12224 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
12225 do_vfp_dp_rd_rn_rm ();
12227 do_vfp_cond_or_thumb ();
12230 static void
12231 do_vfp_nsyn_cmp (void)
12233 if (inst.operands[1].isreg)
12235 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12236 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12238 if (rs == NS_FF)
12240 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
12241 do_vfp_sp_monadic ();
12243 else
12245 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
12246 do_vfp_dp_rd_rm ();
12249 else
12251 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
12252 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
12254 switch (inst.instruction & 0x0fffffff)
12256 case N_MNEM_vcmp:
12257 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
12258 break;
12259 case N_MNEM_vcmpe:
12260 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
12261 break;
12262 default:
12263 abort ();
12266 if (rs == NS_FI)
12268 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
12269 do_vfp_sp_compare_z ();
12271 else
12273 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
12274 do_vfp_dp_rd ();
12277 do_vfp_cond_or_thumb ();
12280 static void
12281 nsyn_insert_sp (void)
12283 inst.operands[1] = inst.operands[0];
12284 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
12285 inst.operands[0].reg = REG_SP;
12286 inst.operands[0].isreg = 1;
12287 inst.operands[0].writeback = 1;
12288 inst.operands[0].present = 1;
12291 static void
12292 do_vfp_nsyn_push (void)
12294 nsyn_insert_sp ();
12295 if (inst.operands[1].issingle)
12296 do_vfp_nsyn_opcode ("fstmdbs");
12297 else
12298 do_vfp_nsyn_opcode ("fstmdbd");
12301 static void
12302 do_vfp_nsyn_pop (void)
12304 nsyn_insert_sp ();
12305 if (inst.operands[1].issingle)
12306 do_vfp_nsyn_opcode ("fldmias");
12307 else
12308 do_vfp_nsyn_opcode ("fldmiad");
12311 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12312 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12314 static unsigned
12315 neon_dp_fixup (unsigned i)
12317 if (thumb_mode)
12319 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12320 if (i & (1 << 24))
12321 i |= 1 << 28;
12323 i &= ~(1 << 24);
12325 i |= 0xef000000;
12327 else
12328 i |= 0xf2000000;
12330 return i;
12333 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12334 (0, 1, 2, 3). */
12336 static unsigned
12337 neon_logbits (unsigned x)
12339 return ffs (x) - 4;
12342 #define LOW4(R) ((R) & 0xf)
12343 #define HI1(R) (((R) >> 4) & 1)
12345 /* Encode insns with bit pattern:
12347 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12348 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12350 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12351 different meaning for some instruction. */
12353 static void
12354 neon_three_same (int isquad, int ubit, int size)
12356 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12357 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12358 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12359 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12360 inst.instruction |= LOW4 (inst.operands[2].reg);
12361 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12362 inst.instruction |= (isquad != 0) << 6;
12363 inst.instruction |= (ubit != 0) << 24;
12364 if (size != -1)
12365 inst.instruction |= neon_logbits (size) << 20;
12367 inst.instruction = neon_dp_fixup (inst.instruction);
12370 /* Encode instructions of the form:
12372 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12373 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12375 Don't write size if SIZE == -1. */
12377 static void
12378 neon_two_same (int qbit, int ubit, int size)
12380 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12381 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12382 inst.instruction |= LOW4 (inst.operands[1].reg);
12383 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12384 inst.instruction |= (qbit != 0) << 6;
12385 inst.instruction |= (ubit != 0) << 24;
12387 if (size != -1)
12388 inst.instruction |= neon_logbits (size) << 18;
12390 inst.instruction = neon_dp_fixup (inst.instruction);
12393 /* Neon instruction encoders, in approximate order of appearance. */
12395 static void
12396 do_neon_dyadic_i_su (void)
12398 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12399 struct neon_type_el et = neon_check_type (3, rs,
12400 N_EQK, N_EQK, N_SU_32 | N_KEY);
12401 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12404 static void
12405 do_neon_dyadic_i64_su (void)
12407 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12408 struct neon_type_el et = neon_check_type (3, rs,
12409 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12410 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12413 static void
12414 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12415 unsigned immbits)
12417 unsigned size = et.size >> 3;
12418 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12419 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12420 inst.instruction |= LOW4 (inst.operands[1].reg);
12421 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12422 inst.instruction |= (isquad != 0) << 6;
12423 inst.instruction |= immbits << 16;
12424 inst.instruction |= (size >> 3) << 7;
12425 inst.instruction |= (size & 0x7) << 19;
12426 if (write_ubit)
12427 inst.instruction |= (uval != 0) << 24;
12429 inst.instruction = neon_dp_fixup (inst.instruction);
12432 static void
12433 do_neon_shl_imm (void)
12435 if (!inst.operands[2].isreg)
12437 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12438 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12439 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12440 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12442 else
12444 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12445 struct neon_type_el et = neon_check_type (3, rs,
12446 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12447 unsigned int tmp;
12449 /* VSHL/VQSHL 3-register variants have syntax such as:
12450 vshl.xx Dd, Dm, Dn
12451 whereas other 3-register operations encoded by neon_three_same have
12452 syntax like:
12453 vadd.xx Dd, Dn, Dm
12454 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12455 here. */
12456 tmp = inst.operands[2].reg;
12457 inst.operands[2].reg = inst.operands[1].reg;
12458 inst.operands[1].reg = tmp;
12459 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12460 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12464 static void
12465 do_neon_qshl_imm (void)
12467 if (!inst.operands[2].isreg)
12469 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12470 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12472 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12473 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12474 inst.operands[2].imm);
12476 else
12478 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12479 struct neon_type_el et = neon_check_type (3, rs,
12480 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12481 unsigned int tmp;
12483 /* See note in do_neon_shl_imm. */
12484 tmp = inst.operands[2].reg;
12485 inst.operands[2].reg = inst.operands[1].reg;
12486 inst.operands[1].reg = tmp;
12487 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12488 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12492 static void
12493 do_neon_rshl (void)
12495 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12496 struct neon_type_el et = neon_check_type (3, rs,
12497 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12498 unsigned int tmp;
12500 tmp = inst.operands[2].reg;
12501 inst.operands[2].reg = inst.operands[1].reg;
12502 inst.operands[1].reg = tmp;
12503 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12506 static int
12507 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12509 /* Handle .I8 pseudo-instructions. */
12510 if (size == 8)
12512 /* Unfortunately, this will make everything apart from zero out-of-range.
12513 FIXME is this the intended semantics? There doesn't seem much point in
12514 accepting .I8 if so. */
12515 immediate |= immediate << 8;
12516 size = 16;
12519 if (size >= 32)
12521 if (immediate == (immediate & 0x000000ff))
12523 *immbits = immediate;
12524 return 0x1;
12526 else if (immediate == (immediate & 0x0000ff00))
12528 *immbits = immediate >> 8;
12529 return 0x3;
12531 else if (immediate == (immediate & 0x00ff0000))
12533 *immbits = immediate >> 16;
12534 return 0x5;
12536 else if (immediate == (immediate & 0xff000000))
12538 *immbits = immediate >> 24;
12539 return 0x7;
12541 if ((immediate & 0xffff) != (immediate >> 16))
12542 goto bad_immediate;
12543 immediate &= 0xffff;
12546 if (immediate == (immediate & 0x000000ff))
12548 *immbits = immediate;
12549 return 0x9;
12551 else if (immediate == (immediate & 0x0000ff00))
12553 *immbits = immediate >> 8;
12554 return 0xb;
12557 bad_immediate:
12558 first_error (_("immediate value out of range"));
12559 return FAIL;
12562 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12563 A, B, C, D. */
12565 static int
12566 neon_bits_same_in_bytes (unsigned imm)
12568 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12569 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12570 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12571 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12574 /* For immediate of above form, return 0bABCD. */
12576 static unsigned
12577 neon_squash_bits (unsigned imm)
12579 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12580 | ((imm & 0x01000000) >> 21);
12583 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12585 static unsigned
12586 neon_qfloat_bits (unsigned imm)
12588 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12591 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12592 the instruction. *OP is passed as the initial value of the op field, and
12593 may be set to a different value depending on the constant (i.e.
12594 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12595 MVN). If the immediate looks like a repeated pattern then also
12596 try smaller element sizes. */
12598 static int
12599 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12600 unsigned *immbits, int *op, int size,
12601 enum neon_el_type type)
12603 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12604 float. */
12605 if (type == NT_float && !float_p)
12606 return FAIL;
12608 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12610 if (size != 32 || *op == 1)
12611 return FAIL;
12612 *immbits = neon_qfloat_bits (immlo);
12613 return 0xf;
12616 if (size == 64)
12618 if (neon_bits_same_in_bytes (immhi)
12619 && neon_bits_same_in_bytes (immlo))
12621 if (*op == 1)
12622 return FAIL;
12623 *immbits = (neon_squash_bits (immhi) << 4)
12624 | neon_squash_bits (immlo);
12625 *op = 1;
12626 return 0xe;
12629 if (immhi != immlo)
12630 return FAIL;
12633 if (size >= 32)
12635 if (immlo == (immlo & 0x000000ff))
12637 *immbits = immlo;
12638 return 0x0;
12640 else if (immlo == (immlo & 0x0000ff00))
12642 *immbits = immlo >> 8;
12643 return 0x2;
12645 else if (immlo == (immlo & 0x00ff0000))
12647 *immbits = immlo >> 16;
12648 return 0x4;
12650 else if (immlo == (immlo & 0xff000000))
12652 *immbits = immlo >> 24;
12653 return 0x6;
12655 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12657 *immbits = (immlo >> 8) & 0xff;
12658 return 0xc;
12660 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12662 *immbits = (immlo >> 16) & 0xff;
12663 return 0xd;
12666 if ((immlo & 0xffff) != (immlo >> 16))
12667 return FAIL;
12668 immlo &= 0xffff;
12671 if (size >= 16)
12673 if (immlo == (immlo & 0x000000ff))
12675 *immbits = immlo;
12676 return 0x8;
12678 else if (immlo == (immlo & 0x0000ff00))
12680 *immbits = immlo >> 8;
12681 return 0xa;
12684 if ((immlo & 0xff) != (immlo >> 8))
12685 return FAIL;
12686 immlo &= 0xff;
12689 if (immlo == (immlo & 0x000000ff))
12691 /* Don't allow MVN with 8-bit immediate. */
12692 if (*op == 1)
12693 return FAIL;
12694 *immbits = immlo;
12695 return 0xe;
12698 return FAIL;
12701 /* Write immediate bits [7:0] to the following locations:
12703 |28/24|23 19|18 16|15 4|3 0|
12704 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12706 This function is used by VMOV/VMVN/VORR/VBIC. */
12708 static void
12709 neon_write_immbits (unsigned immbits)
12711 inst.instruction |= immbits & 0xf;
12712 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12713 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12716 /* Invert low-order SIZE bits of XHI:XLO. */
12718 static void
12719 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12721 unsigned immlo = xlo ? *xlo : 0;
12722 unsigned immhi = xhi ? *xhi : 0;
12724 switch (size)
12726 case 8:
12727 immlo = (~immlo) & 0xff;
12728 break;
12730 case 16:
12731 immlo = (~immlo) & 0xffff;
12732 break;
12734 case 64:
12735 immhi = (~immhi) & 0xffffffff;
12736 /* fall through. */
12738 case 32:
12739 immlo = (~immlo) & 0xffffffff;
12740 break;
12742 default:
12743 abort ();
12746 if (xlo)
12747 *xlo = immlo;
12749 if (xhi)
12750 *xhi = immhi;
12753 static void
12754 do_neon_logic (void)
12756 if (inst.operands[2].present && inst.operands[2].isreg)
12758 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12759 neon_check_type (3, rs, N_IGNORE_TYPE);
12760 /* U bit and size field were set as part of the bitmask. */
12761 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12762 neon_three_same (neon_quad (rs), 0, -1);
12764 else
12766 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12767 struct neon_type_el et = neon_check_type (2, rs,
12768 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12769 enum neon_opc opcode = inst.instruction & 0x0fffffff;
12770 unsigned immbits;
12771 int cmode;
12773 if (et.type == NT_invtype)
12774 return;
12776 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12778 immbits = inst.operands[1].imm;
12779 if (et.size == 64)
12781 /* .i64 is a pseudo-op, so the immediate must be a repeating
12782 pattern. */
12783 if (immbits != (inst.operands[1].regisimm ?
12784 inst.operands[1].reg : 0))
12786 /* Set immbits to an invalid constant. */
12787 immbits = 0xdeadbeef;
12791 switch (opcode)
12793 case N_MNEM_vbic:
12794 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12795 break;
12797 case N_MNEM_vorr:
12798 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12799 break;
12801 case N_MNEM_vand:
12802 /* Pseudo-instruction for VBIC. */
12803 neon_invert_size (&immbits, 0, et.size);
12804 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12805 break;
12807 case N_MNEM_vorn:
12808 /* Pseudo-instruction for VORR. */
12809 neon_invert_size (&immbits, 0, et.size);
12810 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12811 break;
12813 default:
12814 abort ();
12817 if (cmode == FAIL)
12818 return;
12820 inst.instruction |= neon_quad (rs) << 6;
12821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12823 inst.instruction |= cmode << 8;
12824 neon_write_immbits (immbits);
12826 inst.instruction = neon_dp_fixup (inst.instruction);
12830 static void
12831 do_neon_bitfield (void)
12833 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12834 neon_check_type (3, rs, N_IGNORE_TYPE);
12835 neon_three_same (neon_quad (rs), 0, -1);
12838 static void
12839 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12840 unsigned destbits)
12842 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12843 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12844 types | N_KEY);
12845 if (et.type == NT_float)
12847 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
12848 neon_three_same (neon_quad (rs), 0, -1);
12850 else
12852 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12853 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
12857 static void
12858 do_neon_dyadic_if_su (void)
12860 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12863 static void
12864 do_neon_dyadic_if_su_d (void)
12866 /* This version only allow D registers, but that constraint is enforced during
12867 operand parsing so we don't need to do anything extra here. */
12868 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12871 static void
12872 do_neon_dyadic_if_i_d (void)
12874 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12875 affected if we specify unsigned args. */
12876 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12879 enum vfp_or_neon_is_neon_bits
12881 NEON_CHECK_CC = 1,
12882 NEON_CHECK_ARCH = 2
12885 /* Call this function if an instruction which may have belonged to the VFP or
12886 Neon instruction sets, but turned out to be a Neon instruction (due to the
12887 operand types involved, etc.). We have to check and/or fix-up a couple of
12888 things:
12890 - Make sure the user hasn't attempted to make a Neon instruction
12891 conditional.
12892 - Alter the value in the condition code field if necessary.
12893 - Make sure that the arch supports Neon instructions.
12895 Which of these operations take place depends on bits from enum
12896 vfp_or_neon_is_neon_bits.
12898 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12899 current instruction's condition is COND_ALWAYS, the condition field is
12900 changed to inst.uncond_value. This is necessary because instructions shared
12901 between VFP and Neon may be conditional for the VFP variants only, and the
12902 unconditional Neon version must have, e.g., 0xF in the condition field. */
12904 static int
12905 vfp_or_neon_is_neon (unsigned check)
12907 /* Conditions are always legal in Thumb mode (IT blocks). */
12908 if (!thumb_mode && (check & NEON_CHECK_CC))
12910 if (inst.cond != COND_ALWAYS)
12912 first_error (_(BAD_COND));
12913 return FAIL;
12915 if (inst.uncond_value != -1)
12916 inst.instruction |= inst.uncond_value << 28;
12919 if ((check & NEON_CHECK_ARCH)
12920 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
12922 first_error (_(BAD_FPU));
12923 return FAIL;
12926 return SUCCESS;
12929 static void
12930 do_neon_addsub_if_i (void)
12932 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
12933 return;
12935 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12936 return;
12938 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12939 affected if we specify unsigned args. */
12940 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12943 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12944 result to be:
12945 V<op> A,B (A is operand 0, B is operand 2)
12946 to mean:
12947 V<op> A,B,A
12948 not:
12949 V<op> A,B,B
12950 so handle that case specially. */
12952 static void
12953 neon_exchange_operands (void)
12955 void *scratch = alloca (sizeof (inst.operands[0]));
12956 if (inst.operands[1].present)
12958 /* Swap operands[1] and operands[2]. */
12959 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12960 inst.operands[1] = inst.operands[2];
12961 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12963 else
12965 inst.operands[1] = inst.operands[2];
12966 inst.operands[2] = inst.operands[0];
12970 static void
12971 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12973 if (inst.operands[2].isreg)
12975 if (invert)
12976 neon_exchange_operands ();
12977 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12979 else
12981 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12982 struct neon_type_el et = neon_check_type (2, rs,
12983 N_EQK | N_SIZ, immtypes | N_KEY);
12985 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12986 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12987 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12988 inst.instruction |= LOW4 (inst.operands[1].reg);
12989 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12990 inst.instruction |= neon_quad (rs) << 6;
12991 inst.instruction |= (et.type == NT_float) << 10;
12992 inst.instruction |= neon_logbits (et.size) << 18;
12994 inst.instruction = neon_dp_fixup (inst.instruction);
12998 static void
12999 do_neon_cmp (void)
13001 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13004 static void
13005 do_neon_cmp_inv (void)
13007 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13010 static void
13011 do_neon_ceq (void)
13013 neon_compare (N_IF_32, N_IF_32, FALSE);
13016 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13017 scalars, which are encoded in 5 bits, M : Rm.
13018 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13019 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13020 index in M. */
13022 static unsigned
13023 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13025 unsigned regno = NEON_SCALAR_REG (scalar);
13026 unsigned elno = NEON_SCALAR_INDEX (scalar);
13028 switch (elsize)
13030 case 16:
13031 if (regno > 7 || elno > 3)
13032 goto bad_scalar;
13033 return regno | (elno << 3);
13035 case 32:
13036 if (regno > 15 || elno > 1)
13037 goto bad_scalar;
13038 return regno | (elno << 4);
13040 default:
13041 bad_scalar:
13042 first_error (_("scalar out of range for multiply instruction"));
13045 return 0;
13048 /* Encode multiply / multiply-accumulate scalar instructions. */
13050 static void
13051 neon_mul_mac (struct neon_type_el et, int ubit)
13053 unsigned scalar;
13055 /* Give a more helpful error message if we have an invalid type. */
13056 if (et.type == NT_invtype)
13057 return;
13059 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13060 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13061 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13062 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13063 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13064 inst.instruction |= LOW4 (scalar);
13065 inst.instruction |= HI1 (scalar) << 5;
13066 inst.instruction |= (et.type == NT_float) << 8;
13067 inst.instruction |= neon_logbits (et.size) << 20;
13068 inst.instruction |= (ubit != 0) << 24;
13070 inst.instruction = neon_dp_fixup (inst.instruction);
13073 static void
13074 do_neon_mac_maybe_scalar (void)
13076 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13077 return;
13079 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13080 return;
13082 if (inst.operands[2].isscalar)
13084 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13085 struct neon_type_el et = neon_check_type (3, rs,
13086 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13087 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13088 neon_mul_mac (et, neon_quad (rs));
13090 else
13092 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13093 affected if we specify unsigned args. */
13094 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13098 static void
13099 do_neon_tst (void)
13101 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13102 struct neon_type_el et = neon_check_type (3, rs,
13103 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13104 neon_three_same (neon_quad (rs), 0, et.size);
13107 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13108 same types as the MAC equivalents. The polynomial type for this instruction
13109 is encoded the same as the integer type. */
13111 static void
13112 do_neon_mul (void)
13114 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13115 return;
13117 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13118 return;
13120 if (inst.operands[2].isscalar)
13121 do_neon_mac_maybe_scalar ();
13122 else
13123 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13126 static void
13127 do_neon_qdmulh (void)
13129 if (inst.operands[2].isscalar)
13131 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13132 struct neon_type_el et = neon_check_type (3, rs,
13133 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13134 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13135 neon_mul_mac (et, neon_quad (rs));
13137 else
13139 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13140 struct neon_type_el et = neon_check_type (3, rs,
13141 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13142 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13143 /* The U bit (rounding) comes from bit mask. */
13144 neon_three_same (neon_quad (rs), 0, et.size);
13148 static void
13149 do_neon_fcmp_absolute (void)
13151 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13152 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13153 /* Size field comes from bit mask. */
13154 neon_three_same (neon_quad (rs), 1, -1);
13157 static void
13158 do_neon_fcmp_absolute_inv (void)
13160 neon_exchange_operands ();
13161 do_neon_fcmp_absolute ();
13164 static void
13165 do_neon_step (void)
13167 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13168 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13169 neon_three_same (neon_quad (rs), 0, -1);
13172 static void
13173 do_neon_abs_neg (void)
13175 enum neon_shape rs;
13176 struct neon_type_el et;
13178 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
13179 return;
13181 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13182 return;
13184 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13185 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
13187 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13188 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13189 inst.instruction |= LOW4 (inst.operands[1].reg);
13190 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13191 inst.instruction |= neon_quad (rs) << 6;
13192 inst.instruction |= (et.type == NT_float) << 10;
13193 inst.instruction |= neon_logbits (et.size) << 18;
13195 inst.instruction = neon_dp_fixup (inst.instruction);
13198 static void
13199 do_neon_sli (void)
13201 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13202 struct neon_type_el et = neon_check_type (2, rs,
13203 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13204 int imm = inst.operands[2].imm;
13205 constraint (imm < 0 || (unsigned)imm >= et.size,
13206 _("immediate out of range for insert"));
13207 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13210 static void
13211 do_neon_sri (void)
13213 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13214 struct neon_type_el et = neon_check_type (2, rs,
13215 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13216 int imm = inst.operands[2].imm;
13217 constraint (imm < 1 || (unsigned)imm > et.size,
13218 _("immediate out of range for insert"));
13219 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
13222 static void
13223 do_neon_qshlu_imm (void)
13225 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13226 struct neon_type_el et = neon_check_type (2, rs,
13227 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
13228 int imm = inst.operands[2].imm;
13229 constraint (imm < 0 || (unsigned)imm >= et.size,
13230 _("immediate out of range for shift"));
13231 /* Only encodes the 'U present' variant of the instruction.
13232 In this case, signed types have OP (bit 8) set to 0.
13233 Unsigned types have OP set to 1. */
13234 inst.instruction |= (et.type == NT_unsigned) << 8;
13235 /* The rest of the bits are the same as other immediate shifts. */
13236 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13239 static void
13240 do_neon_qmovn (void)
13242 struct neon_type_el et = neon_check_type (2, NS_DQ,
13243 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13244 /* Saturating move where operands can be signed or unsigned, and the
13245 destination has the same signedness. */
13246 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13247 if (et.type == NT_unsigned)
13248 inst.instruction |= 0xc0;
13249 else
13250 inst.instruction |= 0x80;
13251 neon_two_same (0, 1, et.size / 2);
13254 static void
13255 do_neon_qmovun (void)
13257 struct neon_type_el et = neon_check_type (2, NS_DQ,
13258 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13259 /* Saturating move with unsigned results. Operands must be signed. */
13260 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13261 neon_two_same (0, 1, et.size / 2);
13264 static void
13265 do_neon_rshift_sat_narrow (void)
13267 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13268 or unsigned. If operands are unsigned, results must also be unsigned. */
13269 struct neon_type_el et = neon_check_type (2, NS_DQI,
13270 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13271 int imm = inst.operands[2].imm;
13272 /* This gets the bounds check, size encoding and immediate bits calculation
13273 right. */
13274 et.size /= 2;
13276 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13277 VQMOVN.I<size> <Dd>, <Qm>. */
13278 if (imm == 0)
13280 inst.operands[2].present = 0;
13281 inst.instruction = N_MNEM_vqmovn;
13282 do_neon_qmovn ();
13283 return;
13286 constraint (imm < 1 || (unsigned)imm > et.size,
13287 _("immediate out of range"));
13288 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
13291 static void
13292 do_neon_rshift_sat_narrow_u (void)
13294 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13295 or unsigned. If operands are unsigned, results must also be unsigned. */
13296 struct neon_type_el et = neon_check_type (2, NS_DQI,
13297 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13298 int imm = inst.operands[2].imm;
13299 /* This gets the bounds check, size encoding and immediate bits calculation
13300 right. */
13301 et.size /= 2;
13303 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13304 VQMOVUN.I<size> <Dd>, <Qm>. */
13305 if (imm == 0)
13307 inst.operands[2].present = 0;
13308 inst.instruction = N_MNEM_vqmovun;
13309 do_neon_qmovun ();
13310 return;
13313 constraint (imm < 1 || (unsigned)imm > et.size,
13314 _("immediate out of range"));
13315 /* FIXME: The manual is kind of unclear about what value U should have in
13316 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13317 must be 1. */
13318 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
13321 static void
13322 do_neon_movn (void)
13324 struct neon_type_el et = neon_check_type (2, NS_DQ,
13325 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13326 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13327 neon_two_same (0, 1, et.size / 2);
13330 static void
13331 do_neon_rshift_narrow (void)
13333 struct neon_type_el et = neon_check_type (2, NS_DQI,
13334 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13335 int imm = inst.operands[2].imm;
13336 /* This gets the bounds check, size encoding and immediate bits calculation
13337 right. */
13338 et.size /= 2;
13340 /* If immediate is zero then we are a pseudo-instruction for
13341 VMOVN.I<size> <Dd>, <Qm> */
13342 if (imm == 0)
13344 inst.operands[2].present = 0;
13345 inst.instruction = N_MNEM_vmovn;
13346 do_neon_movn ();
13347 return;
13350 constraint (imm < 1 || (unsigned)imm > et.size,
13351 _("immediate out of range for narrowing operation"));
13352 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
13355 static void
13356 do_neon_shll (void)
13358 /* FIXME: Type checking when lengthening. */
13359 struct neon_type_el et = neon_check_type (2, NS_QDI,
13360 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
13361 unsigned imm = inst.operands[2].imm;
13363 if (imm == et.size)
13365 /* Maximum shift variant. */
13366 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13367 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13368 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13369 inst.instruction |= LOW4 (inst.operands[1].reg);
13370 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13371 inst.instruction |= neon_logbits (et.size) << 18;
13373 inst.instruction = neon_dp_fixup (inst.instruction);
13375 else
13377 /* A more-specific type check for non-max versions. */
13378 et = neon_check_type (2, NS_QDI,
13379 N_EQK | N_DBL, N_SU_32 | N_KEY);
13380 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13381 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
13385 /* Check the various types for the VCVT instruction, and return which version
13386 the current instruction is. */
13388 static int
13389 neon_cvt_flavour (enum neon_shape rs)
13391 #define CVT_VAR(C,X,Y) \
13392 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13393 if (et.type != NT_invtype) \
13395 inst.error = NULL; \
13396 return (C); \
13398 struct neon_type_el et;
13399 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13400 || rs == NS_FF) ? N_VFP : 0;
13401 /* The instruction versions which take an immediate take one register
13402 argument, which is extended to the width of the full register. Thus the
13403 "source" and "destination" registers must have the same width. Hack that
13404 here by making the size equal to the key (wider, in this case) operand. */
13405 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13407 CVT_VAR (0, N_S32, N_F32);
13408 CVT_VAR (1, N_U32, N_F32);
13409 CVT_VAR (2, N_F32, N_S32);
13410 CVT_VAR (3, N_F32, N_U32);
13411 /* Half-precision conversions. */
13412 CVT_VAR (4, N_F32, N_F16);
13413 CVT_VAR (5, N_F16, N_F32);
13415 whole_reg = N_VFP;
13417 /* VFP instructions. */
13418 CVT_VAR (6, N_F32, N_F64);
13419 CVT_VAR (7, N_F64, N_F32);
13420 CVT_VAR (8, N_S32, N_F64 | key);
13421 CVT_VAR (9, N_U32, N_F64 | key);
13422 CVT_VAR (10, N_F64 | key, N_S32);
13423 CVT_VAR (11, N_F64 | key, N_U32);
13424 /* VFP instructions with bitshift. */
13425 CVT_VAR (12, N_F32 | key, N_S16);
13426 CVT_VAR (13, N_F32 | key, N_U16);
13427 CVT_VAR (14, N_F64 | key, N_S16);
13428 CVT_VAR (15, N_F64 | key, N_U16);
13429 CVT_VAR (16, N_S16, N_F32 | key);
13430 CVT_VAR (17, N_U16, N_F32 | key);
13431 CVT_VAR (18, N_S16, N_F64 | key);
13432 CVT_VAR (19, N_U16, N_F64 | key);
13434 return -1;
13435 #undef CVT_VAR
13438 /* Neon-syntax VFP conversions. */
13440 static void
13441 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13443 const char *opname = 0;
13445 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13447 /* Conversions with immediate bitshift. */
13448 const char *enc[] =
13450 "ftosls",
13451 "ftouls",
13452 "fsltos",
13453 "fultos",
13454 NULL,
13455 NULL,
13456 NULL,
13457 NULL,
13458 "ftosld",
13459 "ftould",
13460 "fsltod",
13461 "fultod",
13462 "fshtos",
13463 "fuhtos",
13464 "fshtod",
13465 "fuhtod",
13466 "ftoshs",
13467 "ftouhs",
13468 "ftoshd",
13469 "ftouhd"
13472 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13474 opname = enc[flavour];
13475 constraint (inst.operands[0].reg != inst.operands[1].reg,
13476 _("operands 0 and 1 must be the same register"));
13477 inst.operands[1] = inst.operands[2];
13478 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13481 else
13483 /* Conversions without bitshift. */
13484 const char *enc[] =
13486 "ftosis",
13487 "ftouis",
13488 "fsitos",
13489 "fuitos",
13490 "NULL",
13491 "NULL",
13492 "fcvtsd",
13493 "fcvtds",
13494 "ftosid",
13495 "ftouid",
13496 "fsitod",
13497 "fuitod"
13500 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13501 opname = enc[flavour];
13504 if (opname)
13505 do_vfp_nsyn_opcode (opname);
13508 static void
13509 do_vfp_nsyn_cvtz (void)
13511 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13512 int flavour = neon_cvt_flavour (rs);
13513 const char *enc[] =
13515 "ftosizs",
13516 "ftouizs",
13517 NULL,
13518 NULL,
13519 NULL,
13520 NULL,
13521 NULL,
13522 NULL,
13523 "ftosizd",
13524 "ftouizd"
13527 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13528 do_vfp_nsyn_opcode (enc[flavour]);
13531 static void
13532 do_neon_cvt (void)
13534 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13535 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13536 int flavour = neon_cvt_flavour (rs);
13538 /* VFP rather than Neon conversions. */
13539 if (flavour >= 6)
13541 do_vfp_nsyn_cvt (rs, flavour);
13542 return;
13545 switch (rs)
13547 case NS_DDI:
13548 case NS_QQI:
13550 unsigned immbits;
13551 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13553 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13554 return;
13556 /* Fixed-point conversion with #0 immediate is encoded as an
13557 integer conversion. */
13558 if (inst.operands[2].present && inst.operands[2].imm == 0)
13559 goto int_encode;
13560 immbits = 32 - inst.operands[2].imm;
13561 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13562 if (flavour != -1)
13563 inst.instruction |= enctab[flavour];
13564 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13565 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13566 inst.instruction |= LOW4 (inst.operands[1].reg);
13567 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13568 inst.instruction |= neon_quad (rs) << 6;
13569 inst.instruction |= 1 << 21;
13570 inst.instruction |= immbits << 16;
13572 inst.instruction = neon_dp_fixup (inst.instruction);
13574 break;
13576 case NS_DD:
13577 case NS_QQ:
13578 int_encode:
13580 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13582 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13584 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13585 return;
13587 if (flavour != -1)
13588 inst.instruction |= enctab[flavour];
13590 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13591 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13592 inst.instruction |= LOW4 (inst.operands[1].reg);
13593 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13594 inst.instruction |= neon_quad (rs) << 6;
13595 inst.instruction |= 2 << 18;
13597 inst.instruction = neon_dp_fixup (inst.instruction);
13599 break;
13601 /* Half-precision conversions for Advanced SIMD -- neon. */
13602 case NS_QD:
13603 case NS_DQ:
13605 if ((rs == NS_DQ)
13606 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13608 as_bad (_("operand size must match register width"));
13609 break;
13612 if ((rs == NS_QD)
13613 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13615 as_bad (_("operand size must match register width"));
13616 break;
13619 if (rs == NS_DQ)
13620 inst.instruction = 0x3b60600;
13621 else
13622 inst.instruction = 0x3b60700;
13624 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13625 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13626 inst.instruction |= LOW4 (inst.operands[1].reg);
13627 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13628 inst.instruction = neon_dp_fixup (inst.instruction);
13629 break;
13631 default:
13632 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13633 do_vfp_nsyn_cvt (rs, flavour);
13637 static void
13638 do_neon_cvtb (void)
13640 inst.instruction = 0xeb20a40;
13642 /* The sizes are attached to the mnemonic. */
13643 if (inst.vectype.el[0].type != NT_invtype
13644 && inst.vectype.el[0].size == 16)
13645 inst.instruction |= 0x00010000;
13647 /* Programmer's syntax: the sizes are attached to the operands. */
13648 else if (inst.operands[0].vectype.type != NT_invtype
13649 && inst.operands[0].vectype.size == 16)
13650 inst.instruction |= 0x00010000;
13652 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13653 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13654 do_vfp_cond_or_thumb ();
13658 static void
13659 do_neon_cvtt (void)
13661 do_neon_cvtb ();
13662 inst.instruction |= 0x80;
13665 static void
13666 neon_move_immediate (void)
13668 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13669 struct neon_type_el et = neon_check_type (2, rs,
13670 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13671 unsigned immlo, immhi = 0, immbits;
13672 int op, cmode, float_p;
13674 constraint (et.type == NT_invtype,
13675 _("operand size must be specified for immediate VMOV"));
13677 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13678 op = (inst.instruction & (1 << 5)) != 0;
13680 immlo = inst.operands[1].imm;
13681 if (inst.operands[1].regisimm)
13682 immhi = inst.operands[1].reg;
13684 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13685 _("immediate has bits set outside the operand size"));
13687 float_p = inst.operands[1].immisfloat;
13689 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13690 et.size, et.type)) == FAIL)
13692 /* Invert relevant bits only. */
13693 neon_invert_size (&immlo, &immhi, et.size);
13694 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13695 with one or the other; those cases are caught by
13696 neon_cmode_for_move_imm. */
13697 op = !op;
13698 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13699 &op, et.size, et.type)) == FAIL)
13701 first_error (_("immediate out of range"));
13702 return;
13706 inst.instruction &= ~(1 << 5);
13707 inst.instruction |= op << 5;
13709 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13710 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13711 inst.instruction |= neon_quad (rs) << 6;
13712 inst.instruction |= cmode << 8;
13714 neon_write_immbits (immbits);
13717 static void
13718 do_neon_mvn (void)
13720 if (inst.operands[1].isreg)
13722 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13724 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13725 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13726 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13727 inst.instruction |= LOW4 (inst.operands[1].reg);
13728 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13729 inst.instruction |= neon_quad (rs) << 6;
13731 else
13733 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13734 neon_move_immediate ();
13737 inst.instruction = neon_dp_fixup (inst.instruction);
13740 /* Encode instructions of form:
13742 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13743 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13745 static void
13746 neon_mixed_length (struct neon_type_el et, unsigned size)
13748 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13749 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13750 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13751 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13752 inst.instruction |= LOW4 (inst.operands[2].reg);
13753 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13754 inst.instruction |= (et.type == NT_unsigned) << 24;
13755 inst.instruction |= neon_logbits (size) << 20;
13757 inst.instruction = neon_dp_fixup (inst.instruction);
13760 static void
13761 do_neon_dyadic_long (void)
13763 /* FIXME: Type checking for lengthening op. */
13764 struct neon_type_el et = neon_check_type (3, NS_QDD,
13765 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
13766 neon_mixed_length (et, et.size);
13769 static void
13770 do_neon_abal (void)
13772 struct neon_type_el et = neon_check_type (3, NS_QDD,
13773 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
13774 neon_mixed_length (et, et.size);
13777 static void
13778 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
13780 if (inst.operands[2].isscalar)
13782 struct neon_type_el et = neon_check_type (3, NS_QDS,
13783 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
13784 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13785 neon_mul_mac (et, et.type == NT_unsigned);
13787 else
13789 struct neon_type_el et = neon_check_type (3, NS_QDD,
13790 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
13791 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13792 neon_mixed_length (et, et.size);
13796 static void
13797 do_neon_mac_maybe_scalar_long (void)
13799 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
13802 static void
13803 do_neon_dyadic_wide (void)
13805 struct neon_type_el et = neon_check_type (3, NS_QQD,
13806 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
13807 neon_mixed_length (et, et.size);
13810 static void
13811 do_neon_dyadic_narrow (void)
13813 struct neon_type_el et = neon_check_type (3, NS_QDD,
13814 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13815 /* Operand sign is unimportant, and the U bit is part of the opcode,
13816 so force the operand type to integer. */
13817 et.type = NT_integer;
13818 neon_mixed_length (et, et.size / 2);
13821 static void
13822 do_neon_mul_sat_scalar_long (void)
13824 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13827 static void
13828 do_neon_vmull (void)
13830 if (inst.operands[2].isscalar)
13831 do_neon_mac_maybe_scalar_long ();
13832 else
13834 struct neon_type_el et = neon_check_type (3, NS_QDD,
13835 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13836 if (et.type == NT_poly)
13837 inst.instruction = NEON_ENC_POLY (inst.instruction);
13838 else
13839 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13840 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13841 zero. Should be OK as-is. */
13842 neon_mixed_length (et, et.size);
13846 static void
13847 do_neon_ext (void)
13849 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
13850 struct neon_type_el et = neon_check_type (3, rs,
13851 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13852 unsigned imm = (inst.operands[3].imm * et.size) / 8;
13854 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
13855 _("shift out of range"));
13856 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13857 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13858 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13859 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13860 inst.instruction |= LOW4 (inst.operands[2].reg);
13861 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13862 inst.instruction |= neon_quad (rs) << 6;
13863 inst.instruction |= imm << 8;
13865 inst.instruction = neon_dp_fixup (inst.instruction);
13868 static void
13869 do_neon_rev (void)
13871 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13872 struct neon_type_el et = neon_check_type (2, rs,
13873 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13874 unsigned op = (inst.instruction >> 7) & 3;
13875 /* N (width of reversed regions) is encoded as part of the bitmask. We
13876 extract it here to check the elements to be reversed are smaller.
13877 Otherwise we'd get a reserved instruction. */
13878 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
13879 gas_assert (elsize != 0);
13880 constraint (et.size >= elsize,
13881 _("elements must be smaller than reversal region"));
13882 neon_two_same (neon_quad (rs), 1, et.size);
13885 static void
13886 do_neon_dup (void)
13888 if (inst.operands[1].isscalar)
13890 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
13891 struct neon_type_el et = neon_check_type (2, rs,
13892 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13893 unsigned sizebits = et.size >> 3;
13894 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
13895 int logsize = neon_logbits (et.size);
13896 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
13898 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
13899 return;
13901 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13902 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13903 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13904 inst.instruction |= LOW4 (dm);
13905 inst.instruction |= HI1 (dm) << 5;
13906 inst.instruction |= neon_quad (rs) << 6;
13907 inst.instruction |= x << 17;
13908 inst.instruction |= sizebits << 16;
13910 inst.instruction = neon_dp_fixup (inst.instruction);
13912 else
13914 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
13915 struct neon_type_el et = neon_check_type (2, rs,
13916 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13917 /* Duplicate ARM register to lanes of vector. */
13918 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
13919 switch (et.size)
13921 case 8: inst.instruction |= 0x400000; break;
13922 case 16: inst.instruction |= 0x000020; break;
13923 case 32: inst.instruction |= 0x000000; break;
13924 default: break;
13926 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13927 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
13928 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
13929 inst.instruction |= neon_quad (rs) << 21;
13930 /* The encoding for this instruction is identical for the ARM and Thumb
13931 variants, except for the condition field. */
13932 do_vfp_cond_or_thumb ();
13936 /* VMOV has particularly many variations. It can be one of:
13937 0. VMOV<c><q> <Qd>, <Qm>
13938 1. VMOV<c><q> <Dd>, <Dm>
13939 (Register operations, which are VORR with Rm = Rn.)
13940 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13941 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13942 (Immediate loads.)
13943 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13944 (ARM register to scalar.)
13945 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13946 (Two ARM registers to vector.)
13947 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13948 (Scalar to ARM register.)
13949 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13950 (Vector to two ARM registers.)
13951 8. VMOV.F32 <Sd>, <Sm>
13952 9. VMOV.F64 <Dd>, <Dm>
13953 (VFP register moves.)
13954 10. VMOV.F32 <Sd>, #imm
13955 11. VMOV.F64 <Dd>, #imm
13956 (VFP float immediate load.)
13957 12. VMOV <Rd>, <Sm>
13958 (VFP single to ARM reg.)
13959 13. VMOV <Sd>, <Rm>
13960 (ARM reg to VFP single.)
13961 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13962 (Two ARM regs to two VFP singles.)
13963 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13964 (Two VFP singles to two ARM regs.)
13966 These cases can be disambiguated using neon_select_shape, except cases 1/9
13967 and 3/11 which depend on the operand type too.
13969 All the encoded bits are hardcoded by this function.
13971 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13972 Cases 5, 7 may be used with VFPv2 and above.
13974 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13975 can specify a type where it doesn't make sense to, and is ignored). */
13977 static void
13978 do_neon_mov (void)
13980 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
13981 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
13982 NS_NULL);
13983 struct neon_type_el et;
13984 const char *ldconst = 0;
13986 switch (rs)
13988 case NS_DD: /* case 1/9. */
13989 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13990 /* It is not an error here if no type is given. */
13991 inst.error = NULL;
13992 if (et.type == NT_float && et.size == 64)
13994 do_vfp_nsyn_opcode ("fcpyd");
13995 break;
13997 /* fall through. */
13999 case NS_QQ: /* case 0/1. */
14001 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14002 return;
14003 /* The architecture manual I have doesn't explicitly state which
14004 value the U bit should have for register->register moves, but
14005 the equivalent VORR instruction has U = 0, so do that. */
14006 inst.instruction = 0x0200110;
14007 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14008 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14009 inst.instruction |= LOW4 (inst.operands[1].reg);
14010 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14011 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14012 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14013 inst.instruction |= neon_quad (rs) << 6;
14015 inst.instruction = neon_dp_fixup (inst.instruction);
14017 break;
14019 case NS_DI: /* case 3/11. */
14020 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14021 inst.error = NULL;
14022 if (et.type == NT_float && et.size == 64)
14024 /* case 11 (fconstd). */
14025 ldconst = "fconstd";
14026 goto encode_fconstd;
14028 /* fall through. */
14030 case NS_QI: /* case 2/3. */
14031 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14032 return;
14033 inst.instruction = 0x0800010;
14034 neon_move_immediate ();
14035 inst.instruction = neon_dp_fixup (inst.instruction);
14036 break;
14038 case NS_SR: /* case 4. */
14040 unsigned bcdebits = 0;
14041 struct neon_type_el et = neon_check_type (2, NS_NULL,
14042 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14043 int logsize = neon_logbits (et.size);
14044 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14045 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14048 _(BAD_FPU));
14049 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14050 && et.size != 32, _(BAD_FPU));
14051 constraint (et.type == NT_invtype, _("bad type for scalar"));
14052 constraint (x >= 64 / et.size, _("scalar index out of range"));
14054 switch (et.size)
14056 case 8: bcdebits = 0x8; break;
14057 case 16: bcdebits = 0x1; break;
14058 case 32: bcdebits = 0x0; break;
14059 default: ;
14062 bcdebits |= x << logsize;
14064 inst.instruction = 0xe000b10;
14065 do_vfp_cond_or_thumb ();
14066 inst.instruction |= LOW4 (dn) << 16;
14067 inst.instruction |= HI1 (dn) << 7;
14068 inst.instruction |= inst.operands[1].reg << 12;
14069 inst.instruction |= (bcdebits & 3) << 5;
14070 inst.instruction |= (bcdebits >> 2) << 21;
14072 break;
14074 case NS_DRR: /* case 5 (fmdrr). */
14075 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14076 _(BAD_FPU));
14078 inst.instruction = 0xc400b10;
14079 do_vfp_cond_or_thumb ();
14080 inst.instruction |= LOW4 (inst.operands[0].reg);
14081 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14082 inst.instruction |= inst.operands[1].reg << 12;
14083 inst.instruction |= inst.operands[2].reg << 16;
14084 break;
14086 case NS_RS: /* case 6. */
14088 struct neon_type_el et = neon_check_type (2, NS_NULL,
14089 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14090 unsigned logsize = neon_logbits (et.size);
14091 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14092 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14093 unsigned abcdebits = 0;
14095 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14096 _(BAD_FPU));
14097 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14098 && et.size != 32, _(BAD_FPU));
14099 constraint (et.type == NT_invtype, _("bad type for scalar"));
14100 constraint (x >= 64 / et.size, _("scalar index out of range"));
14102 switch (et.size)
14104 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14105 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14106 case 32: abcdebits = 0x00; break;
14107 default: ;
14110 abcdebits |= x << logsize;
14111 inst.instruction = 0xe100b10;
14112 do_vfp_cond_or_thumb ();
14113 inst.instruction |= LOW4 (dn) << 16;
14114 inst.instruction |= HI1 (dn) << 7;
14115 inst.instruction |= inst.operands[0].reg << 12;
14116 inst.instruction |= (abcdebits & 3) << 5;
14117 inst.instruction |= (abcdebits >> 2) << 21;
14119 break;
14121 case NS_RRD: /* case 7 (fmrrd). */
14122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14123 _(BAD_FPU));
14125 inst.instruction = 0xc500b10;
14126 do_vfp_cond_or_thumb ();
14127 inst.instruction |= inst.operands[0].reg << 12;
14128 inst.instruction |= inst.operands[1].reg << 16;
14129 inst.instruction |= LOW4 (inst.operands[2].reg);
14130 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14131 break;
14133 case NS_FF: /* case 8 (fcpys). */
14134 do_vfp_nsyn_opcode ("fcpys");
14135 break;
14137 case NS_FI: /* case 10 (fconsts). */
14138 ldconst = "fconsts";
14139 encode_fconstd:
14140 if (is_quarter_float (inst.operands[1].imm))
14142 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14143 do_vfp_nsyn_opcode (ldconst);
14145 else
14146 first_error (_("immediate out of range"));
14147 break;
14149 case NS_RF: /* case 12 (fmrs). */
14150 do_vfp_nsyn_opcode ("fmrs");
14151 break;
14153 case NS_FR: /* case 13 (fmsr). */
14154 do_vfp_nsyn_opcode ("fmsr");
14155 break;
14157 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14158 (one of which is a list), but we have parsed four. Do some fiddling to
14159 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14160 expect. */
14161 case NS_RRFF: /* case 14 (fmrrs). */
14162 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
14163 _("VFP registers must be adjacent"));
14164 inst.operands[2].imm = 2;
14165 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14166 do_vfp_nsyn_opcode ("fmrrs");
14167 break;
14169 case NS_FFRR: /* case 15 (fmsrr). */
14170 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
14171 _("VFP registers must be adjacent"));
14172 inst.operands[1] = inst.operands[2];
14173 inst.operands[2] = inst.operands[3];
14174 inst.operands[0].imm = 2;
14175 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14176 do_vfp_nsyn_opcode ("fmsrr");
14177 break;
14179 default:
14180 abort ();
14184 static void
14185 do_neon_rshift_round_imm (void)
14187 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14188 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14189 int imm = inst.operands[2].imm;
14191 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14192 if (imm == 0)
14194 inst.operands[2].present = 0;
14195 do_neon_mov ();
14196 return;
14199 constraint (imm < 1 || (unsigned)imm > et.size,
14200 _("immediate out of range for shift"));
14201 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14202 et.size - imm);
14205 static void
14206 do_neon_movl (void)
14208 struct neon_type_el et = neon_check_type (2, NS_QD,
14209 N_EQK | N_DBL, N_SU_32 | N_KEY);
14210 unsigned sizebits = et.size >> 3;
14211 inst.instruction |= sizebits << 19;
14212 neon_two_same (0, et.type == NT_unsigned, -1);
14215 static void
14216 do_neon_trn (void)
14218 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14219 struct neon_type_el et = neon_check_type (2, rs,
14220 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14221 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
14222 neon_two_same (neon_quad (rs), 1, et.size);
14225 static void
14226 do_neon_zip_uzp (void)
14228 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14229 struct neon_type_el et = neon_check_type (2, rs,
14230 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14231 if (rs == NS_DD && et.size == 32)
14233 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14234 inst.instruction = N_MNEM_vtrn;
14235 do_neon_trn ();
14236 return;
14238 neon_two_same (neon_quad (rs), 1, et.size);
14241 static void
14242 do_neon_sat_abs_neg (void)
14244 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14245 struct neon_type_el et = neon_check_type (2, rs,
14246 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14247 neon_two_same (neon_quad (rs), 1, et.size);
14250 static void
14251 do_neon_pair_long (void)
14253 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14254 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
14255 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14256 inst.instruction |= (et.type == NT_unsigned) << 7;
14257 neon_two_same (neon_quad (rs), 1, et.size);
14260 static void
14261 do_neon_recip_est (void)
14263 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14264 struct neon_type_el et = neon_check_type (2, rs,
14265 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
14266 inst.instruction |= (et.type == NT_float) << 8;
14267 neon_two_same (neon_quad (rs), 1, et.size);
14270 static void
14271 do_neon_cls (void)
14273 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14274 struct neon_type_el et = neon_check_type (2, rs,
14275 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14276 neon_two_same (neon_quad (rs), 1, et.size);
14279 static void
14280 do_neon_clz (void)
14282 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14283 struct neon_type_el et = neon_check_type (2, rs,
14284 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
14285 neon_two_same (neon_quad (rs), 1, et.size);
14288 static void
14289 do_neon_cnt (void)
14291 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14292 struct neon_type_el et = neon_check_type (2, rs,
14293 N_EQK | N_INT, N_8 | N_KEY);
14294 neon_two_same (neon_quad (rs), 1, et.size);
14297 static void
14298 do_neon_swp (void)
14300 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14301 neon_two_same (neon_quad (rs), 1, -1);
14304 static void
14305 do_neon_tbl_tbx (void)
14307 unsigned listlenbits;
14308 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
14310 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
14312 first_error (_("bad list length for table lookup"));
14313 return;
14316 listlenbits = inst.operands[1].imm - 1;
14317 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14318 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14319 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14320 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14321 inst.instruction |= LOW4 (inst.operands[2].reg);
14322 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14323 inst.instruction |= listlenbits << 8;
14325 inst.instruction = neon_dp_fixup (inst.instruction);
14328 static void
14329 do_neon_ldm_stm (void)
14331 /* P, U and L bits are part of bitmask. */
14332 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
14333 unsigned offsetbits = inst.operands[1].imm * 2;
14335 if (inst.operands[1].issingle)
14337 do_vfp_nsyn_ldm_stm (is_dbmode);
14338 return;
14341 constraint (is_dbmode && !inst.operands[0].writeback,
14342 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14344 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14345 _("register list must contain at least 1 and at most 16 "
14346 "registers"));
14348 inst.instruction |= inst.operands[0].reg << 16;
14349 inst.instruction |= inst.operands[0].writeback << 21;
14350 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14351 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
14353 inst.instruction |= offsetbits;
14355 do_vfp_cond_or_thumb ();
14358 static void
14359 do_neon_ldr_str (void)
14361 int is_ldr = (inst.instruction & (1 << 20)) != 0;
14363 if (inst.operands[0].issingle)
14365 if (is_ldr)
14366 do_vfp_nsyn_opcode ("flds");
14367 else
14368 do_vfp_nsyn_opcode ("fsts");
14370 else
14372 if (is_ldr)
14373 do_vfp_nsyn_opcode ("fldd");
14374 else
14375 do_vfp_nsyn_opcode ("fstd");
14379 /* "interleave" version also handles non-interleaving register VLD1/VST1
14380 instructions. */
14382 static void
14383 do_neon_ld_st_interleave (void)
14385 struct neon_type_el et = neon_check_type (1, NS_NULL,
14386 N_8 | N_16 | N_32 | N_64);
14387 unsigned alignbits = 0;
14388 unsigned idx;
14389 /* The bits in this table go:
14390 0: register stride of one (0) or two (1)
14391 1,2: register list length, minus one (1, 2, 3, 4).
14392 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14393 We use -1 for invalid entries. */
14394 const int typetable[] =
14396 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14397 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14398 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14399 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14401 int typebits;
14403 if (et.type == NT_invtype)
14404 return;
14406 if (inst.operands[1].immisalign)
14407 switch (inst.operands[1].imm >> 8)
14409 case 64: alignbits = 1; break;
14410 case 128:
14411 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14412 goto bad_alignment;
14413 alignbits = 2;
14414 break;
14415 case 256:
14416 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14417 goto bad_alignment;
14418 alignbits = 3;
14419 break;
14420 default:
14421 bad_alignment:
14422 first_error (_("bad alignment"));
14423 return;
14426 inst.instruction |= alignbits << 4;
14427 inst.instruction |= neon_logbits (et.size) << 6;
14429 /* Bits [4:6] of the immediate in a list specifier encode register stride
14430 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14431 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14432 up the right value for "type" in a table based on this value and the given
14433 list style, then stick it back. */
14434 idx = ((inst.operands[0].imm >> 4) & 7)
14435 | (((inst.instruction >> 8) & 3) << 3);
14437 typebits = typetable[idx];
14439 constraint (typebits == -1, _("bad list type for instruction"));
14441 inst.instruction &= ~0xf00;
14442 inst.instruction |= typebits << 8;
14445 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14446 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14447 otherwise. The variable arguments are a list of pairs of legal (size, align)
14448 values, terminated with -1. */
14450 static int
14451 neon_alignment_bit (int size, int align, int *do_align, ...)
14453 va_list ap;
14454 int result = FAIL, thissize, thisalign;
14456 if (!inst.operands[1].immisalign)
14458 *do_align = 0;
14459 return SUCCESS;
14462 va_start (ap, do_align);
14466 thissize = va_arg (ap, int);
14467 if (thissize == -1)
14468 break;
14469 thisalign = va_arg (ap, int);
14471 if (size == thissize && align == thisalign)
14472 result = SUCCESS;
14474 while (result != SUCCESS);
14476 va_end (ap);
14478 if (result == SUCCESS)
14479 *do_align = 1;
14480 else
14481 first_error (_("unsupported alignment for instruction"));
14483 return result;
14486 static void
14487 do_neon_ld_st_lane (void)
14489 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14490 int align_good, do_align = 0;
14491 int logsize = neon_logbits (et.size);
14492 int align = inst.operands[1].imm >> 8;
14493 int n = (inst.instruction >> 8) & 3;
14494 int max_el = 64 / et.size;
14496 if (et.type == NT_invtype)
14497 return;
14499 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14500 _("bad list length"));
14501 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14502 _("scalar index out of range"));
14503 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14504 && et.size == 8,
14505 _("stride of 2 unavailable when element size is 8"));
14507 switch (n)
14509 case 0: /* VLD1 / VST1. */
14510 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14511 32, 32, -1);
14512 if (align_good == FAIL)
14513 return;
14514 if (do_align)
14516 unsigned alignbits = 0;
14517 switch (et.size)
14519 case 16: alignbits = 0x1; break;
14520 case 32: alignbits = 0x3; break;
14521 default: ;
14523 inst.instruction |= alignbits << 4;
14525 break;
14527 case 1: /* VLD2 / VST2. */
14528 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14529 32, 64, -1);
14530 if (align_good == FAIL)
14531 return;
14532 if (do_align)
14533 inst.instruction |= 1 << 4;
14534 break;
14536 case 2: /* VLD3 / VST3. */
14537 constraint (inst.operands[1].immisalign,
14538 _("can't use alignment with this instruction"));
14539 break;
14541 case 3: /* VLD4 / VST4. */
14542 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14543 16, 64, 32, 64, 32, 128, -1);
14544 if (align_good == FAIL)
14545 return;
14546 if (do_align)
14548 unsigned alignbits = 0;
14549 switch (et.size)
14551 case 8: alignbits = 0x1; break;
14552 case 16: alignbits = 0x1; break;
14553 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14554 default: ;
14556 inst.instruction |= alignbits << 4;
14558 break;
14560 default: ;
14563 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14564 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14565 inst.instruction |= 1 << (4 + logsize);
14567 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14568 inst.instruction |= logsize << 10;
14571 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14573 static void
14574 do_neon_ld_dup (void)
14576 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14577 int align_good, do_align = 0;
14579 if (et.type == NT_invtype)
14580 return;
14582 switch ((inst.instruction >> 8) & 3)
14584 case 0: /* VLD1. */
14585 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14586 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14587 &do_align, 16, 16, 32, 32, -1);
14588 if (align_good == FAIL)
14589 return;
14590 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14592 case 1: break;
14593 case 2: inst.instruction |= 1 << 5; break;
14594 default: first_error (_("bad list length")); return;
14596 inst.instruction |= neon_logbits (et.size) << 6;
14597 break;
14599 case 1: /* VLD2. */
14600 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14601 &do_align, 8, 16, 16, 32, 32, 64, -1);
14602 if (align_good == FAIL)
14603 return;
14604 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14605 _("bad list length"));
14606 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14607 inst.instruction |= 1 << 5;
14608 inst.instruction |= neon_logbits (et.size) << 6;
14609 break;
14611 case 2: /* VLD3. */
14612 constraint (inst.operands[1].immisalign,
14613 _("can't use alignment with this instruction"));
14614 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14615 _("bad list length"));
14616 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14617 inst.instruction |= 1 << 5;
14618 inst.instruction |= neon_logbits (et.size) << 6;
14619 break;
14621 case 3: /* VLD4. */
14623 int align = inst.operands[1].imm >> 8;
14624 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14625 16, 64, 32, 64, 32, 128, -1);
14626 if (align_good == FAIL)
14627 return;
14628 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14629 _("bad list length"));
14630 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14631 inst.instruction |= 1 << 5;
14632 if (et.size == 32 && align == 128)
14633 inst.instruction |= 0x3 << 6;
14634 else
14635 inst.instruction |= neon_logbits (et.size) << 6;
14637 break;
14639 default: ;
14642 inst.instruction |= do_align << 4;
14645 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14646 apart from bits [11:4]. */
14648 static void
14649 do_neon_ldx_stx (void)
14651 switch (NEON_LANE (inst.operands[0].imm))
14653 case NEON_INTERLEAVE_LANES:
14654 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
14655 do_neon_ld_st_interleave ();
14656 break;
14658 case NEON_ALL_LANES:
14659 inst.instruction = NEON_ENC_DUP (inst.instruction);
14660 do_neon_ld_dup ();
14661 break;
14663 default:
14664 inst.instruction = NEON_ENC_LANE (inst.instruction);
14665 do_neon_ld_st_lane ();
14668 /* L bit comes from bit mask. */
14669 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14670 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14671 inst.instruction |= inst.operands[1].reg << 16;
14673 if (inst.operands[1].postind)
14675 int postreg = inst.operands[1].imm & 0xf;
14676 constraint (!inst.operands[1].immisreg,
14677 _("post-index must be a register"));
14678 constraint (postreg == 0xd || postreg == 0xf,
14679 _("bad register for post-index"));
14680 inst.instruction |= postreg;
14682 else if (inst.operands[1].writeback)
14684 inst.instruction |= 0xd;
14686 else
14687 inst.instruction |= 0xf;
14689 if (thumb_mode)
14690 inst.instruction |= 0xf9000000;
14691 else
14692 inst.instruction |= 0xf4000000;
14695 /* Overall per-instruction processing. */
14697 /* We need to be able to fix up arbitrary expressions in some statements.
14698 This is so that we can handle symbols that are an arbitrary distance from
14699 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14700 which returns part of an address in a form which will be valid for
14701 a data instruction. We do this by pushing the expression into a symbol
14702 in the expr_section, and creating a fix for that. */
14704 static void
14705 fix_new_arm (fragS * frag,
14706 int where,
14707 short int size,
14708 expressionS * exp,
14709 int pc_rel,
14710 int reloc)
14712 fixS * new_fix;
14714 switch (exp->X_op)
14716 case O_constant:
14717 case O_symbol:
14718 case O_add:
14719 case O_subtract:
14720 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
14721 break;
14723 default:
14724 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
14725 pc_rel, reloc);
14726 break;
14729 /* Mark whether the fix is to a THUMB instruction, or an ARM
14730 instruction. */
14731 new_fix->tc_fix_data = thumb_mode;
14734 /* Create a frg for an instruction requiring relaxation. */
14735 static void
14736 output_relax_insn (void)
14738 char * to;
14739 symbolS *sym;
14740 int offset;
14742 /* The size of the instruction is unknown, so tie the debug info to the
14743 start of the instruction. */
14744 dwarf2_emit_insn (0);
14746 switch (inst.reloc.exp.X_op)
14748 case O_symbol:
14749 sym = inst.reloc.exp.X_add_symbol;
14750 offset = inst.reloc.exp.X_add_number;
14751 break;
14752 case O_constant:
14753 sym = NULL;
14754 offset = inst.reloc.exp.X_add_number;
14755 break;
14756 default:
14757 sym = make_expr_symbol (&inst.reloc.exp);
14758 offset = 0;
14759 break;
14761 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
14762 inst.relax, sym, offset, NULL/*offset, opcode*/);
14763 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
14766 /* Write a 32-bit thumb instruction to buf. */
14767 static void
14768 put_thumb32_insn (char * buf, unsigned long insn)
14770 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
14771 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
14774 static void
14775 output_inst (const char * str)
14777 char * to = NULL;
14779 if (inst.error)
14781 as_bad ("%s -- `%s'", inst.error, str);
14782 return;
14784 if (inst.relax)
14786 output_relax_insn ();
14787 return;
14789 if (inst.size == 0)
14790 return;
14792 to = frag_more (inst.size);
14793 /* PR 9814: Record the thumb mode into the current frag so that we know
14794 what type of NOP padding to use, if necessary. We override any previous
14795 setting so that if the mode has changed then the NOPS that we use will
14796 match the encoding of the last instruction in the frag. */
14797 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
14799 if (thumb_mode && (inst.size > THUMB_SIZE))
14801 gas_assert (inst.size == (2 * THUMB_SIZE));
14802 put_thumb32_insn (to, inst.instruction);
14804 else if (inst.size > INSN_SIZE)
14806 gas_assert (inst.size == (2 * INSN_SIZE));
14807 md_number_to_chars (to, inst.instruction, INSN_SIZE);
14808 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
14810 else
14811 md_number_to_chars (to, inst.instruction, inst.size);
14813 if (inst.reloc.type != BFD_RELOC_UNUSED)
14814 fix_new_arm (frag_now, to - frag_now->fr_literal,
14815 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14816 inst.reloc.type);
14818 dwarf2_emit_insn (inst.size);
14821 static char *
14822 output_it_inst (int cond, int mask, char * to)
14824 unsigned long instruction = 0xbf00;
14826 mask &= 0xf;
14827 instruction |= mask;
14828 instruction |= cond << 4;
14830 if (to == NULL)
14832 to = frag_more (2);
14833 #ifdef OBJ_ELF
14834 dwarf2_emit_insn (2);
14835 #endif
14838 md_number_to_chars (to, instruction, 2);
14840 return to;
14843 /* Tag values used in struct asm_opcode's tag field. */
14844 enum opcode_tag
14846 OT_unconditional, /* Instruction cannot be conditionalized.
14847 The ARM condition field is still 0xE. */
14848 OT_unconditionalF, /* Instruction cannot be conditionalized
14849 and carries 0xF in its ARM condition field. */
14850 OT_csuffix, /* Instruction takes a conditional suffix. */
14851 OT_csuffixF, /* Some forms of the instruction take a conditional
14852 suffix, others place 0xF where the condition field
14853 would be. */
14854 OT_cinfix3, /* Instruction takes a conditional infix,
14855 beginning at character index 3. (In
14856 unified mode, it becomes a suffix.) */
14857 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
14858 tsts, cmps, cmns, and teqs. */
14859 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
14860 character index 3, even in unified mode. Used for
14861 legacy instructions where suffix and infix forms
14862 may be ambiguous. */
14863 OT_csuf_or_in3, /* Instruction takes either a conditional
14864 suffix or an infix at character index 3. */
14865 OT_odd_infix_unc, /* This is the unconditional variant of an
14866 instruction that takes a conditional infix
14867 at an unusual position. In unified mode,
14868 this variant will accept a suffix. */
14869 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
14870 are the conditional variants of instructions that
14871 take conditional infixes in unusual positions.
14872 The infix appears at character index
14873 (tag - OT_odd_infix_0). These are not accepted
14874 in unified mode. */
14877 /* Subroutine of md_assemble, responsible for looking up the primary
14878 opcode from the mnemonic the user wrote. STR points to the
14879 beginning of the mnemonic.
14881 This is not simply a hash table lookup, because of conditional
14882 variants. Most instructions have conditional variants, which are
14883 expressed with a _conditional affix_ to the mnemonic. If we were
14884 to encode each conditional variant as a literal string in the opcode
14885 table, it would have approximately 20,000 entries.
14887 Most mnemonics take this affix as a suffix, and in unified syntax,
14888 'most' is upgraded to 'all'. However, in the divided syntax, some
14889 instructions take the affix as an infix, notably the s-variants of
14890 the arithmetic instructions. Of those instructions, all but six
14891 have the infix appear after the third character of the mnemonic.
14893 Accordingly, the algorithm for looking up primary opcodes given
14894 an identifier is:
14896 1. Look up the identifier in the opcode table.
14897 If we find a match, go to step U.
14899 2. Look up the last two characters of the identifier in the
14900 conditions table. If we find a match, look up the first N-2
14901 characters of the identifier in the opcode table. If we
14902 find a match, go to step CE.
14904 3. Look up the fourth and fifth characters of the identifier in
14905 the conditions table. If we find a match, extract those
14906 characters from the identifier, and look up the remaining
14907 characters in the opcode table. If we find a match, go
14908 to step CM.
14910 4. Fail.
14912 U. Examine the tag field of the opcode structure, in case this is
14913 one of the six instructions with its conditional infix in an
14914 unusual place. If it is, the tag tells us where to find the
14915 infix; look it up in the conditions table and set inst.cond
14916 accordingly. Otherwise, this is an unconditional instruction.
14917 Again set inst.cond accordingly. Return the opcode structure.
14919 CE. Examine the tag field to make sure this is an instruction that
14920 should receive a conditional suffix. If it is not, fail.
14921 Otherwise, set inst.cond from the suffix we already looked up,
14922 and return the opcode structure.
14924 CM. Examine the tag field to make sure this is an instruction that
14925 should receive a conditional infix after the third character.
14926 If it is not, fail. Otherwise, undo the edits to the current
14927 line of input and proceed as for case CE. */
14929 static const struct asm_opcode *
14930 opcode_lookup (char **str)
14932 char *end, *base;
14933 char *affix;
14934 const struct asm_opcode *opcode;
14935 const struct asm_cond *cond;
14936 char save[2];
14937 bfd_boolean neon_supported;
14939 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
14941 /* Scan up to the end of the mnemonic, which must end in white space,
14942 '.' (in unified mode, or for Neon instructions), or end of string. */
14943 for (base = end = *str; *end != '\0'; end++)
14944 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
14945 break;
14947 if (end == base)
14948 return NULL;
14950 /* Handle a possible width suffix and/or Neon type suffix. */
14951 if (end[0] == '.')
14953 int offset = 2;
14955 /* The .w and .n suffixes are only valid if the unified syntax is in
14956 use. */
14957 if (unified_syntax && end[1] == 'w')
14958 inst.size_req = 4;
14959 else if (unified_syntax && end[1] == 'n')
14960 inst.size_req = 2;
14961 else
14962 offset = 0;
14964 inst.vectype.elems = 0;
14966 *str = end + offset;
14968 if (end[offset] == '.')
14970 /* See if we have a Neon type suffix (possible in either unified or
14971 non-unified ARM syntax mode). */
14972 if (parse_neon_type (&inst.vectype, str) == FAIL)
14973 return NULL;
14975 else if (end[offset] != '\0' && end[offset] != ' ')
14976 return NULL;
14978 else
14979 *str = end;
14981 /* Look for unaffixed or special-case affixed mnemonic. */
14982 opcode = hash_find_n (arm_ops_hsh, base, end - base);
14983 if (opcode)
14985 /* step U */
14986 if (opcode->tag < OT_odd_infix_0)
14988 inst.cond = COND_ALWAYS;
14989 return opcode;
14992 if (warn_on_deprecated && unified_syntax)
14993 as_warn (_("conditional infixes are deprecated in unified syntax"));
14994 affix = base + (opcode->tag - OT_odd_infix_0);
14995 cond = hash_find_n (arm_cond_hsh, affix, 2);
14996 gas_assert (cond);
14998 inst.cond = cond->value;
14999 return opcode;
15002 /* Cannot have a conditional suffix on a mnemonic of less than two
15003 characters. */
15004 if (end - base < 3)
15005 return NULL;
15007 /* Look for suffixed mnemonic. */
15008 affix = end - 2;
15009 cond = hash_find_n (arm_cond_hsh, affix, 2);
15010 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
15011 if (opcode && cond)
15013 /* step CE */
15014 switch (opcode->tag)
15016 case OT_cinfix3_legacy:
15017 /* Ignore conditional suffixes matched on infix only mnemonics. */
15018 break;
15020 case OT_cinfix3:
15021 case OT_cinfix3_deprecated:
15022 case OT_odd_infix_unc:
15023 if (!unified_syntax)
15024 return 0;
15025 /* else fall through */
15027 case OT_csuffix:
15028 case OT_csuffixF:
15029 case OT_csuf_or_in3:
15030 inst.cond = cond->value;
15031 return opcode;
15033 case OT_unconditional:
15034 case OT_unconditionalF:
15035 if (thumb_mode)
15036 inst.cond = cond->value;
15037 else
15039 /* Delayed diagnostic. */
15040 inst.error = BAD_COND;
15041 inst.cond = COND_ALWAYS;
15043 return opcode;
15045 default:
15046 return NULL;
15050 /* Cannot have a usual-position infix on a mnemonic of less than
15051 six characters (five would be a suffix). */
15052 if (end - base < 6)
15053 return NULL;
15055 /* Look for infixed mnemonic in the usual position. */
15056 affix = base + 3;
15057 cond = hash_find_n (arm_cond_hsh, affix, 2);
15058 if (!cond)
15059 return NULL;
15061 memcpy (save, affix, 2);
15062 memmove (affix, affix + 2, (end - affix) - 2);
15063 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
15064 memmove (affix + 2, affix, (end - affix) - 2);
15065 memcpy (affix, save, 2);
15067 if (opcode
15068 && (opcode->tag == OT_cinfix3
15069 || opcode->tag == OT_cinfix3_deprecated
15070 || opcode->tag == OT_csuf_or_in3
15071 || opcode->tag == OT_cinfix3_legacy))
15073 /* Step CM. */
15074 if (warn_on_deprecated && unified_syntax
15075 && (opcode->tag == OT_cinfix3
15076 || opcode->tag == OT_cinfix3_deprecated))
15077 as_warn (_("conditional infixes are deprecated in unified syntax"));
15079 inst.cond = cond->value;
15080 return opcode;
15083 return NULL;
15086 /* This function generates an initial IT instruction, leaving its block
15087 virtually open for the new instructions. Eventually,
15088 the mask will be updated by now_it_add_mask () each time
15089 a new instruction needs to be included in the IT block.
15090 Finally, the block is closed with close_automatic_it_block ().
15091 The block closure can be requested either from md_assemble (),
15092 a tencode (), or due to a label hook. */
15094 static void
15095 new_automatic_it_block (int cond)
15097 now_it.state = AUTOMATIC_IT_BLOCK;
15098 now_it.mask = 0x18;
15099 now_it.cc = cond;
15100 now_it.block_length = 1;
15101 mapping_state (MAP_THUMB);
15102 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15105 /* Close an automatic IT block.
15106 See comments in new_automatic_it_block (). */
15108 static void
15109 close_automatic_it_block (void)
15111 now_it.mask = 0x10;
15112 now_it.block_length = 0;
15115 /* Update the mask of the current automatically-generated IT
15116 instruction. See comments in new_automatic_it_block (). */
15118 static void
15119 now_it_add_mask (int cond)
15121 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15122 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15123 | ((bitvalue) << (nbit)))
15124 const int resulting_bit = (cond & 1);
15126 now_it.mask &= 0xf;
15127 now_it.mask = SET_BIT_VALUE (now_it.mask,
15128 resulting_bit,
15129 (5 - now_it.block_length));
15130 now_it.mask = SET_BIT_VALUE (now_it.mask,
15132 ((5 - now_it.block_length) - 1) );
15133 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
15135 #undef CLEAR_BIT
15136 #undef SET_BIT_VALUE
15139 /* The IT blocks handling machinery is accessed through the these functions:
15140 it_fsm_pre_encode () from md_assemble ()
15141 set_it_insn_type () optional, from the tencode functions
15142 set_it_insn_type_last () ditto
15143 in_it_block () ditto
15144 it_fsm_post_encode () from md_assemble ()
15145 force_automatic_it_block_close () from label habdling functions
15147 Rationale:
15148 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15149 initializing the IT insn type with a generic initial value depending
15150 on the inst.condition.
15151 2) During the tencode function, two things may happen:
15152 a) The tencode function overrides the IT insn type by
15153 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15154 b) The tencode function queries the IT block state by
15155 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15157 Both set_it_insn_type and in_it_block run the internal FSM state
15158 handling function (handle_it_state), because: a) setting the IT insn
15159 type may incur in an invalid state (exiting the function),
15160 and b) querying the state requires the FSM to be updated.
15161 Specifically we want to avoid creating an IT block for conditional
15162 branches, so it_fsm_pre_encode is actually a guess and we can't
15163 determine whether an IT block is required until the tencode () routine
15164 has decided what type of instruction this actually it.
15165 Because of this, if set_it_insn_type and in_it_block have to be used,
15166 set_it_insn_type has to be called first.
15168 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15169 determines the insn IT type depending on the inst.cond code.
15170 When a tencode () routine encodes an instruction that can be
15171 either outside an IT block, or, in the case of being inside, has to be
15172 the last one, set_it_insn_type_last () will determine the proper
15173 IT instruction type based on the inst.cond code. Otherwise,
15174 set_it_insn_type can be called for overriding that logic or
15175 for covering other cases.
15177 Calling handle_it_state () may not transition the IT block state to
15178 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15179 still queried. Instead, if the FSM determines that the state should
15180 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15181 after the tencode () function: that's what it_fsm_post_encode () does.
15183 Since in_it_block () calls the state handling function to get an
15184 updated state, an error may occur (due to invalid insns combination).
15185 In that case, inst.error is set.
15186 Therefore, inst.error has to be checked after the execution of
15187 the tencode () routine.
15189 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15190 any pending state change (if any) that didn't take place in
15191 handle_it_state () as explained above. */
15193 static void
15194 it_fsm_pre_encode (void)
15196 if (inst.cond != COND_ALWAYS)
15197 inst.it_insn_type = INSIDE_IT_INSN;
15198 else
15199 inst.it_insn_type = OUTSIDE_IT_INSN;
15201 now_it.state_handled = 0;
15204 /* IT state FSM handling function. */
15206 static int
15207 handle_it_state (void)
15209 now_it.state_handled = 1;
15211 switch (now_it.state)
15213 case OUTSIDE_IT_BLOCK:
15214 switch (inst.it_insn_type)
15216 case OUTSIDE_IT_INSN:
15217 break;
15219 case INSIDE_IT_INSN:
15220 case INSIDE_IT_LAST_INSN:
15221 if (thumb_mode == 0)
15223 if (unified_syntax
15224 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
15225 as_tsktsk (_("Warning: conditional outside an IT block"\
15226 " for Thumb."));
15228 else
15230 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
15231 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
15233 /* Automatically generate the IT instruction. */
15234 new_automatic_it_block (inst.cond);
15235 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
15236 close_automatic_it_block ();
15238 else
15240 inst.error = BAD_OUT_IT;
15241 return FAIL;
15244 break;
15246 case IF_INSIDE_IT_LAST_INSN:
15247 case NEUTRAL_IT_INSN:
15248 break;
15250 case IT_INSN:
15251 now_it.state = MANUAL_IT_BLOCK;
15252 now_it.block_length = 0;
15253 break;
15255 break;
15257 case AUTOMATIC_IT_BLOCK:
15258 /* Three things may happen now:
15259 a) We should increment current it block size;
15260 b) We should close current it block (closing insn or 4 insns);
15261 c) We should close current it block and start a new one (due
15262 to incompatible conditions or
15263 4 insns-length block reached). */
15265 switch (inst.it_insn_type)
15267 case OUTSIDE_IT_INSN:
15268 /* The closure of the block shall happen immediatelly,
15269 so any in_it_block () call reports the block as closed. */
15270 force_automatic_it_block_close ();
15271 break;
15273 case INSIDE_IT_INSN:
15274 case INSIDE_IT_LAST_INSN:
15275 case IF_INSIDE_IT_LAST_INSN:
15276 now_it.block_length++;
15278 if (now_it.block_length > 4
15279 || !now_it_compatible (inst.cond))
15281 force_automatic_it_block_close ();
15282 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
15283 new_automatic_it_block (inst.cond);
15285 else
15287 now_it_add_mask (inst.cond);
15290 if (now_it.state == AUTOMATIC_IT_BLOCK
15291 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
15292 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
15293 close_automatic_it_block ();
15294 break;
15296 case NEUTRAL_IT_INSN:
15297 now_it.block_length++;
15299 if (now_it.block_length > 4)
15300 force_automatic_it_block_close ();
15301 else
15302 now_it_add_mask (now_it.cc & 1);
15303 break;
15305 case IT_INSN:
15306 close_automatic_it_block ();
15307 now_it.state = MANUAL_IT_BLOCK;
15308 break;
15310 break;
15312 case MANUAL_IT_BLOCK:
15314 /* Check conditional suffixes. */
15315 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
15316 int is_last;
15317 now_it.mask <<= 1;
15318 now_it.mask &= 0x1f;
15319 is_last = (now_it.mask == 0x10);
15321 switch (inst.it_insn_type)
15323 case OUTSIDE_IT_INSN:
15324 inst.error = BAD_NOT_IT;
15325 return FAIL;
15327 case INSIDE_IT_INSN:
15328 if (cond != inst.cond)
15330 inst.error = BAD_IT_COND;
15331 return FAIL;
15333 break;
15335 case INSIDE_IT_LAST_INSN:
15336 case IF_INSIDE_IT_LAST_INSN:
15337 if (cond != inst.cond)
15339 inst.error = BAD_IT_COND;
15340 return FAIL;
15342 if (!is_last)
15344 inst.error = BAD_BRANCH;
15345 return FAIL;
15347 break;
15349 case NEUTRAL_IT_INSN:
15350 /* The BKPT instruction is unconditional even in an IT block. */
15351 break;
15353 case IT_INSN:
15354 inst.error = BAD_IT_IT;
15355 return FAIL;
15358 break;
15361 return SUCCESS;
15364 static void
15365 it_fsm_post_encode (void)
15367 int is_last;
15369 if (!now_it.state_handled)
15370 handle_it_state ();
15372 is_last = (now_it.mask == 0x10);
15373 if (is_last)
15375 now_it.state = OUTSIDE_IT_BLOCK;
15376 now_it.mask = 0;
15380 static void
15381 force_automatic_it_block_close (void)
15383 if (now_it.state == AUTOMATIC_IT_BLOCK)
15385 close_automatic_it_block ();
15386 now_it.state = OUTSIDE_IT_BLOCK;
15387 now_it.mask = 0;
15391 static int
15392 in_it_block (void)
15394 if (!now_it.state_handled)
15395 handle_it_state ();
15397 return now_it.state != OUTSIDE_IT_BLOCK;
15400 void
15401 md_assemble (char *str)
15403 char *p = str;
15404 const struct asm_opcode * opcode;
15406 /* Align the previous label if needed. */
15407 if (last_label_seen != NULL)
15409 symbol_set_frag (last_label_seen, frag_now);
15410 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
15411 S_SET_SEGMENT (last_label_seen, now_seg);
15414 memset (&inst, '\0', sizeof (inst));
15415 inst.reloc.type = BFD_RELOC_UNUSED;
15417 opcode = opcode_lookup (&p);
15418 if (!opcode)
15420 /* It wasn't an instruction, but it might be a register alias of
15421 the form alias .req reg, or a Neon .dn/.qn directive. */
15422 if (! create_register_alias (str, p)
15423 && ! create_neon_reg_alias (str, p))
15424 as_bad (_("bad instruction `%s'"), str);
15426 return;
15429 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
15430 as_warn (_("s suffix on comparison instruction is deprecated"));
15432 /* The value which unconditional instructions should have in place of the
15433 condition field. */
15434 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
15436 if (thumb_mode)
15438 arm_feature_set variant;
15440 variant = cpu_variant;
15441 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15442 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
15443 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
15444 /* Check that this instruction is supported for this CPU. */
15445 if (!opcode->tvariant
15446 || (thumb_mode == 1
15447 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
15449 as_bad (_("selected processor does not support `%s'"), str);
15450 return;
15452 if (inst.cond != COND_ALWAYS && !unified_syntax
15453 && opcode->tencode != do_t_branch)
15455 as_bad (_("Thumb does not support conditional execution"));
15456 return;
15459 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
15461 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
15462 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
15463 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
15465 /* Two things are addressed here.
15466 1) Implicit require narrow instructions on Thumb-1.
15467 This avoids relaxation accidentally introducing Thumb-2
15468 instructions.
15469 2) Reject wide instructions in non Thumb-2 cores. */
15470 if (inst.size_req == 0)
15471 inst.size_req = 2;
15472 else if (inst.size_req == 4)
15474 as_bad (_("selected processor does not support `%s'"), str);
15475 return;
15480 inst.instruction = opcode->tvalue;
15482 if (!parse_operands (p, opcode->operands))
15484 /* Prepare the it_insn_type for those encodings that don't set
15485 it. */
15486 it_fsm_pre_encode ();
15488 opcode->tencode ();
15490 it_fsm_post_encode ();
15493 if (!(inst.error || inst.relax))
15495 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
15496 inst.size = (inst.instruction > 0xffff ? 4 : 2);
15497 if (inst.size_req && inst.size_req != inst.size)
15499 as_bad (_("cannot honor width suffix -- `%s'"), str);
15500 return;
15504 /* Something has gone badly wrong if we try to relax a fixed size
15505 instruction. */
15506 gas_assert (inst.size_req == 0 || !inst.relax);
15508 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15509 *opcode->tvariant);
15510 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15511 set those bits when Thumb-2 32-bit instructions are seen. ie.
15512 anything other than bl/blx and v6-M instructions.
15513 This is overly pessimistic for relaxable instructions. */
15514 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
15515 || inst.relax)
15516 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
15517 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
15518 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15519 arm_ext_v6t2);
15521 if (!inst.error)
15522 mapping_state (MAP_THUMB);
15524 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
15526 bfd_boolean is_bx;
15528 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15529 is_bx = (opcode->aencode == do_bx);
15531 /* Check that this instruction is supported for this CPU. */
15532 if (!(is_bx && fix_v4bx)
15533 && !(opcode->avariant &&
15534 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
15536 as_bad (_("selected processor does not support `%s'"), str);
15537 return;
15539 if (inst.size_req)
15541 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
15542 return;
15545 inst.instruction = opcode->avalue;
15546 if (opcode->tag == OT_unconditionalF)
15547 inst.instruction |= 0xF << 28;
15548 else
15549 inst.instruction |= inst.cond << 28;
15550 inst.size = INSN_SIZE;
15551 if (!parse_operands (p, opcode->operands))
15553 it_fsm_pre_encode ();
15554 opcode->aencode ();
15555 it_fsm_post_encode ();
15557 /* Arm mode bx is marked as both v4T and v5 because it's still required
15558 on a hypothetical non-thumb v5 core. */
15559 if (is_bx)
15560 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
15561 else
15562 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
15563 *opcode->avariant);
15564 if (!inst.error)
15565 mapping_state (MAP_ARM);
15567 else
15569 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15570 "-- `%s'"), str);
15571 return;
15573 output_inst (str);
15576 static void
15577 check_it_blocks_finished (void)
15579 #ifdef OBJ_ELF
15580 asection *sect;
15582 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
15583 if (seg_info (sect)->tc_segment_info_data.current_it.state
15584 == MANUAL_IT_BLOCK)
15586 as_warn (_("section '%s' finished with an open IT block."),
15587 sect->name);
15589 #else
15590 if (now_it.state == MANUAL_IT_BLOCK)
15591 as_warn (_("file finished with an open IT block."));
15592 #endif
15595 /* Various frobbings of labels and their addresses. */
15597 void
15598 arm_start_line_hook (void)
15600 last_label_seen = NULL;
15603 void
15604 arm_frob_label (symbolS * sym)
15606 last_label_seen = sym;
15608 ARM_SET_THUMB (sym, thumb_mode);
15610 #if defined OBJ_COFF || defined OBJ_ELF
15611 ARM_SET_INTERWORK (sym, support_interwork);
15612 #endif
15614 force_automatic_it_block_close ();
15616 /* Note - do not allow local symbols (.Lxxx) to be labelled
15617 as Thumb functions. This is because these labels, whilst
15618 they exist inside Thumb code, are not the entry points for
15619 possible ARM->Thumb calls. Also, these labels can be used
15620 as part of a computed goto or switch statement. eg gcc
15621 can generate code that looks like this:
15623 ldr r2, [pc, .Laaa]
15624 lsl r3, r3, #2
15625 ldr r2, [r3, r2]
15626 mov pc, r2
15628 .Lbbb: .word .Lxxx
15629 .Lccc: .word .Lyyy
15630 ..etc...
15631 .Laaa: .word Lbbb
15633 The first instruction loads the address of the jump table.
15634 The second instruction converts a table index into a byte offset.
15635 The third instruction gets the jump address out of the table.
15636 The fourth instruction performs the jump.
15638 If the address stored at .Laaa is that of a symbol which has the
15639 Thumb_Func bit set, then the linker will arrange for this address
15640 to have the bottom bit set, which in turn would mean that the
15641 address computation performed by the third instruction would end
15642 up with the bottom bit set. Since the ARM is capable of unaligned
15643 word loads, the instruction would then load the incorrect address
15644 out of the jump table, and chaos would ensue. */
15645 if (label_is_thumb_function_name
15646 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
15647 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
15649 /* When the address of a Thumb function is taken the bottom
15650 bit of that address should be set. This will allow
15651 interworking between Arm and Thumb functions to work
15652 correctly. */
15654 THUMB_SET_FUNC (sym, 1);
15656 label_is_thumb_function_name = FALSE;
15659 dwarf2_emit_label (sym);
15662 bfd_boolean
15663 arm_data_in_code (void)
15665 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
15667 *input_line_pointer = '/';
15668 input_line_pointer += 5;
15669 *input_line_pointer = 0;
15670 return TRUE;
15673 return FALSE;
15676 char *
15677 arm_canonicalize_symbol_name (char * name)
15679 int len;
15681 if (thumb_mode && (len = strlen (name)) > 5
15682 && streq (name + len - 5, "/data"))
15683 *(name + len - 5) = 0;
15685 return name;
15688 /* Table of all register names defined by default. The user can
15689 define additional names with .req. Note that all register names
15690 should appear in both upper and lowercase variants. Some registers
15691 also have mixed-case names. */
15693 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15694 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15695 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15696 #define REGSET(p,t) \
15697 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15698 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15699 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15700 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15701 #define REGSETH(p,t) \
15702 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15703 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15704 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15705 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15706 #define REGSET2(p,t) \
15707 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15708 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15709 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15710 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15712 static const struct reg_entry reg_names[] =
15714 /* ARM integer registers. */
15715 REGSET(r, RN), REGSET(R, RN),
15717 /* ATPCS synonyms. */
15718 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
15719 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
15720 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
15722 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
15723 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
15724 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
15726 /* Well-known aliases. */
15727 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
15728 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
15730 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
15731 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
15733 /* Coprocessor numbers. */
15734 REGSET(p, CP), REGSET(P, CP),
15736 /* Coprocessor register numbers. The "cr" variants are for backward
15737 compatibility. */
15738 REGSET(c, CN), REGSET(C, CN),
15739 REGSET(cr, CN), REGSET(CR, CN),
15741 /* FPA registers. */
15742 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
15743 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
15745 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
15746 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
15748 /* VFP SP registers. */
15749 REGSET(s,VFS), REGSET(S,VFS),
15750 REGSETH(s,VFS), REGSETH(S,VFS),
15752 /* VFP DP Registers. */
15753 REGSET(d,VFD), REGSET(D,VFD),
15754 /* Extra Neon DP registers. */
15755 REGSETH(d,VFD), REGSETH(D,VFD),
15757 /* Neon QP registers. */
15758 REGSET2(q,NQ), REGSET2(Q,NQ),
15760 /* VFP control registers. */
15761 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
15762 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
15763 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
15764 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
15765 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
15766 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
15768 /* Maverick DSP coprocessor registers. */
15769 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
15770 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
15772 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
15773 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
15774 REGDEF(dspsc,0,DSPSC),
15776 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
15777 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
15778 REGDEF(DSPSC,0,DSPSC),
15780 /* iWMMXt data registers - p0, c0-15. */
15781 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
15783 /* iWMMXt control registers - p1, c0-3. */
15784 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
15785 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
15786 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
15787 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
15789 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15790 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
15791 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
15792 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
15793 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
15795 /* XScale accumulator registers. */
15796 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
15798 #undef REGDEF
15799 #undef REGNUM
15800 #undef REGSET
15802 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15803 within psr_required_here. */
15804 static const struct asm_psr psrs[] =
15806 /* Backward compatibility notation. Note that "all" is no longer
15807 truly all possible PSR bits. */
15808 {"all", PSR_c | PSR_f},
15809 {"flg", PSR_f},
15810 {"ctl", PSR_c},
15812 /* Individual flags. */
15813 {"f", PSR_f},
15814 {"c", PSR_c},
15815 {"x", PSR_x},
15816 {"s", PSR_s},
15817 /* Combinations of flags. */
15818 {"fs", PSR_f | PSR_s},
15819 {"fx", PSR_f | PSR_x},
15820 {"fc", PSR_f | PSR_c},
15821 {"sf", PSR_s | PSR_f},
15822 {"sx", PSR_s | PSR_x},
15823 {"sc", PSR_s | PSR_c},
15824 {"xf", PSR_x | PSR_f},
15825 {"xs", PSR_x | PSR_s},
15826 {"xc", PSR_x | PSR_c},
15827 {"cf", PSR_c | PSR_f},
15828 {"cs", PSR_c | PSR_s},
15829 {"cx", PSR_c | PSR_x},
15830 {"fsx", PSR_f | PSR_s | PSR_x},
15831 {"fsc", PSR_f | PSR_s | PSR_c},
15832 {"fxs", PSR_f | PSR_x | PSR_s},
15833 {"fxc", PSR_f | PSR_x | PSR_c},
15834 {"fcs", PSR_f | PSR_c | PSR_s},
15835 {"fcx", PSR_f | PSR_c | PSR_x},
15836 {"sfx", PSR_s | PSR_f | PSR_x},
15837 {"sfc", PSR_s | PSR_f | PSR_c},
15838 {"sxf", PSR_s | PSR_x | PSR_f},
15839 {"sxc", PSR_s | PSR_x | PSR_c},
15840 {"scf", PSR_s | PSR_c | PSR_f},
15841 {"scx", PSR_s | PSR_c | PSR_x},
15842 {"xfs", PSR_x | PSR_f | PSR_s},
15843 {"xfc", PSR_x | PSR_f | PSR_c},
15844 {"xsf", PSR_x | PSR_s | PSR_f},
15845 {"xsc", PSR_x | PSR_s | PSR_c},
15846 {"xcf", PSR_x | PSR_c | PSR_f},
15847 {"xcs", PSR_x | PSR_c | PSR_s},
15848 {"cfs", PSR_c | PSR_f | PSR_s},
15849 {"cfx", PSR_c | PSR_f | PSR_x},
15850 {"csf", PSR_c | PSR_s | PSR_f},
15851 {"csx", PSR_c | PSR_s | PSR_x},
15852 {"cxf", PSR_c | PSR_x | PSR_f},
15853 {"cxs", PSR_c | PSR_x | PSR_s},
15854 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
15855 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
15856 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
15857 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
15858 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
15859 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
15860 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
15861 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
15862 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
15863 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
15864 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
15865 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
15866 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
15867 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
15868 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
15869 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
15870 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
15871 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
15872 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
15873 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
15874 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
15875 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
15876 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
15877 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
15880 /* Table of V7M psr names. */
15881 static const struct asm_psr v7m_psrs[] =
15883 {"apsr", 0 }, {"APSR", 0 },
15884 {"iapsr", 1 }, {"IAPSR", 1 },
15885 {"eapsr", 2 }, {"EAPSR", 2 },
15886 {"psr", 3 }, {"PSR", 3 },
15887 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15888 {"ipsr", 5 }, {"IPSR", 5 },
15889 {"epsr", 6 }, {"EPSR", 6 },
15890 {"iepsr", 7 }, {"IEPSR", 7 },
15891 {"msp", 8 }, {"MSP", 8 },
15892 {"psp", 9 }, {"PSP", 9 },
15893 {"primask", 16}, {"PRIMASK", 16},
15894 {"basepri", 17}, {"BASEPRI", 17},
15895 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15896 {"faultmask", 19}, {"FAULTMASK", 19},
15897 {"control", 20}, {"CONTROL", 20}
15900 /* Table of all shift-in-operand names. */
15901 static const struct asm_shift_name shift_names [] =
15903 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
15904 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
15905 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
15906 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
15907 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
15908 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
15911 /* Table of all explicit relocation names. */
15912 #ifdef OBJ_ELF
15913 static struct reloc_entry reloc_names[] =
15915 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
15916 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
15917 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
15918 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
15919 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
15920 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
15921 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
15922 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
15923 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
15924 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
15925 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
15927 #endif
15929 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15930 static const struct asm_cond conds[] =
15932 {"eq", 0x0},
15933 {"ne", 0x1},
15934 {"cs", 0x2}, {"hs", 0x2},
15935 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15936 {"mi", 0x4},
15937 {"pl", 0x5},
15938 {"vs", 0x6},
15939 {"vc", 0x7},
15940 {"hi", 0x8},
15941 {"ls", 0x9},
15942 {"ge", 0xa},
15943 {"lt", 0xb},
15944 {"gt", 0xc},
15945 {"le", 0xd},
15946 {"al", 0xe}
15949 static struct asm_barrier_opt barrier_opt_names[] =
15951 { "sy", 0xf },
15952 { "un", 0x7 },
15953 { "st", 0xe },
15954 { "unst", 0x6 }
15957 /* Table of ARM-format instructions. */
15959 /* Macros for gluing together operand strings. N.B. In all cases
15960 other than OPS0, the trailing OP_stop comes from default
15961 zero-initialization of the unspecified elements of the array. */
15962 #define OPS0() { OP_stop, }
15963 #define OPS1(a) { OP_##a, }
15964 #define OPS2(a,b) { OP_##a,OP_##b, }
15965 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15966 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15967 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15968 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15970 /* These macros abstract out the exact format of the mnemonic table and
15971 save some repeated characters. */
15973 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15974 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15975 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15976 THUMB_VARIANT, do_##ae, do_##te }
15978 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15979 a T_MNEM_xyz enumerator. */
15980 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15981 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
15982 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15983 TxCE (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15985 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
15986 infix after the third character. */
15987 #define TxC3(mnem, op, top, nops, ops, ae, te) \
15988 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
15989 THUMB_VARIANT, do_##ae, do_##te }
15990 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
15991 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
15992 THUMB_VARIANT, do_##ae, do_##te }
15993 #define TC3(mnem, aop, top, nops, ops, ae, te) \
15994 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
15995 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
15996 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
15997 #define tC3(mnem, aop, top, nops, ops, ae, te) \
15998 TxC3 (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15999 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16000 TxC3w (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
16002 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16003 appear in the condition table. */
16004 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16005 { #m1 #m2 #m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16006 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16008 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16009 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16010 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16011 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16012 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16013 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16014 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16015 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16016 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16017 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16018 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16019 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16020 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16021 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16022 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16023 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16024 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16025 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16026 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16027 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16029 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16030 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16031 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16032 TxCM (m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
16034 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16035 field is still 0xE. Many of the Thumb variants can be executed
16036 conditionally, so this is checked separately. */
16037 #define TUE(mnem, op, top, nops, ops, ae, te) \
16038 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16039 THUMB_VARIANT, do_##ae, do_##te }
16041 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16042 condition code field. */
16043 #define TUF(mnem, op, top, nops, ops, ae, te) \
16044 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16045 THUMB_VARIANT, do_##ae, do_##te }
16047 /* ARM-only variants of all the above. */
16048 #define CE(mnem, op, nops, ops, ae) \
16049 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16051 #define C3(mnem, op, nops, ops, ae) \
16052 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16054 /* Legacy mnemonics that always have conditional infix after the third
16055 character. */
16056 #define CL(mnem, op, nops, ops, ae) \
16057 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16058 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16060 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16061 #define cCE(mnem, op, nops, ops, ae) \
16062 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16064 /* Legacy coprocessor instructions where conditional infix and conditional
16065 suffix are ambiguous. For consistency this includes all FPA instructions,
16066 not just the potentially ambiguous ones. */
16067 #define cCL(mnem, op, nops, ops, ae) \
16068 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16069 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16071 /* Coprocessor, takes either a suffix or a position-3 infix
16072 (for an FPA corner case). */
16073 #define C3E(mnem, op, nops, ops, ae) \
16074 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
16075 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16077 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16078 { #m1 #m2 #m3, OPS##nops ops, \
16079 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16080 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16082 #define CM(m1, m2, op, nops, ops, ae) \
16083 xCM_ (m1, , m2, op, nops, ops, ae), \
16084 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16085 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16086 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16087 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16088 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16089 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16090 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16091 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16092 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16093 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16094 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16095 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16096 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16097 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16098 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16099 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16100 xCM_ (m1, le, m2, op, nops, ops, ae), \
16101 xCM_ (m1, al, m2, op, nops, ops, ae)
16103 #define UE(mnem, op, nops, ops, ae) \
16104 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16106 #define UF(mnem, op, nops, ops, ae) \
16107 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16109 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16110 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16111 use the same encoding function for each. */
16112 #define NUF(mnem, op, nops, ops, enc) \
16113 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16114 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16116 /* Neon data processing, version which indirects through neon_enc_tab for
16117 the various overloaded versions of opcodes. */
16118 #define nUF(mnem, op, nops, ops, enc) \
16119 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
16120 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16122 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16123 version. */
16124 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16125 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16126 THUMB_VARIANT, do_##enc, do_##enc }
16128 #define NCE(mnem, op, nops, ops, enc) \
16129 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16131 #define NCEF(mnem, op, nops, ops, enc) \
16132 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16134 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16135 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16136 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
16137 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16139 #define nCE(mnem, op, nops, ops, enc) \
16140 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16142 #define nCEF(mnem, op, nops, ops, enc) \
16143 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16145 #define do_0 0
16147 /* Thumb-only, unconditional. */
16148 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16150 static const struct asm_opcode insns[] =
16152 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16153 #define THUMB_VARIANT &arm_ext_v4t
16154 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
16155 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
16156 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
16157 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
16158 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
16159 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
16160 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
16161 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
16162 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
16163 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
16164 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
16165 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
16166 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
16167 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
16168 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
16169 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
16171 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16172 for setting PSR flag bits. They are obsolete in V6 and do not
16173 have Thumb equivalents. */
16174 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
16175 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
16176 CL(tstp, 110f000, 2, (RR, SH), cmp),
16177 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
16178 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
16179 CL(cmpp, 150f000, 2, (RR, SH), cmp),
16180 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
16181 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
16182 CL(cmnp, 170f000, 2, (RR, SH), cmp),
16184 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
16185 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
16186 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
16187 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
16189 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
16190 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16191 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
16192 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16194 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16195 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16196 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16197 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16198 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16199 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16201 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
16202 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
16203 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
16204 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
16206 /* Pseudo ops. */
16207 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
16208 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
16209 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
16211 /* Thumb-compatibility pseudo ops. */
16212 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
16213 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
16214 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
16215 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
16216 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
16217 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
16218 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
16219 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
16220 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
16221 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
16222 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
16223 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
16225 /* These may simplify to neg. */
16226 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
16227 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
16229 #undef THUMB_VARIANT
16230 #define THUMB_VARIANT & arm_ext_v6
16232 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
16234 /* V1 instructions with no Thumb analogue prior to V6T2. */
16235 #undef THUMB_VARIANT
16236 #define THUMB_VARIANT & arm_ext_v6t2
16238 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16239 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16240 CL(teqp, 130f000, 2, (RR, SH), cmp),
16242 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
16243 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
16244 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
16245 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
16247 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16248 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16250 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16251 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16253 /* V1 instructions with no Thumb analogue at all. */
16254 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
16255 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
16257 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
16258 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
16259 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
16260 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
16261 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
16262 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
16263 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
16264 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
16266 #undef ARM_VARIANT
16267 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16268 #undef THUMB_VARIANT
16269 #define THUMB_VARIANT & arm_ext_v4t
16271 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16272 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16274 #undef THUMB_VARIANT
16275 #define THUMB_VARIANT & arm_ext_v6t2
16277 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16278 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
16280 /* Generic coprocessor instructions. */
16281 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16282 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16283 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16284 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16285 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16286 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16287 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16289 #undef ARM_VARIANT
16290 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16292 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16293 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16295 #undef ARM_VARIANT
16296 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16297 #undef THUMB_VARIANT
16298 #define THUMB_VARIANT & arm_ext_msr
16300 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
16301 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
16303 #undef ARM_VARIANT
16304 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16305 #undef THUMB_VARIANT
16306 #define THUMB_VARIANT & arm_ext_v6t2
16308 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16309 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16310 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16311 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16312 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16313 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16314 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16315 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16317 #undef ARM_VARIANT
16318 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16319 #undef THUMB_VARIANT
16320 #define THUMB_VARIANT & arm_ext_v4t
16322 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16323 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16324 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16325 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16326 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16327 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16329 #undef ARM_VARIANT
16330 #define ARM_VARIANT & arm_ext_v4t_5
16332 /* ARM Architecture 4T. */
16333 /* Note: bx (and blx) are required on V5, even if the processor does
16334 not support Thumb. */
16335 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
16337 #undef ARM_VARIANT
16338 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16339 #undef THUMB_VARIANT
16340 #define THUMB_VARIANT & arm_ext_v5t
16342 /* Note: blx has 2 variants; the .value coded here is for
16343 BLX(2). Only this variant has conditional execution. */
16344 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
16345 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
16347 #undef THUMB_VARIANT
16348 #define THUMB_VARIANT & arm_ext_v6t2
16350 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
16351 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16352 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16353 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16354 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16355 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16356 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16357 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16359 #undef ARM_VARIANT
16360 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16362 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16363 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16364 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16365 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16367 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16368 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16370 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16371 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16372 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16373 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16375 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16376 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16377 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16378 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16380 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16381 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16383 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
16384 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
16385 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
16386 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
16388 #undef ARM_VARIANT
16389 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16391 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
16392 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16393 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16395 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16396 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16398 #undef ARM_VARIANT
16399 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16401 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
16403 #undef ARM_VARIANT
16404 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16405 #undef THUMB_VARIANT
16406 #define THUMB_VARIANT & arm_ext_v6
16408 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
16409 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
16410 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16411 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16412 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16413 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16414 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16415 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16416 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16417 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
16419 #undef THUMB_VARIANT
16420 #define THUMB_VARIANT & arm_ext_v6t2
16422 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
16423 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
16424 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16425 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16427 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
16428 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
16430 /* ARM V6 not included in V7M (eg. integer SIMD). */
16431 #undef THUMB_VARIANT
16432 #define THUMB_VARIANT & arm_ext_v6_notm
16434 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
16435 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
16436 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
16437 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16438 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16439 TCE(qasx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16440 /* Old name for QASX. */
16441 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16442 TCE(qsax, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16443 /* Old name for QSAX. */
16444 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16445 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16446 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16447 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16448 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16449 TCE(sasx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16450 /* Old name for SASX. */
16451 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16452 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16453 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16454 TCE(shasx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16455 /* Old name for SHASX. */
16456 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16457 TCE(shsax, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16458 /* Old name for SHSAX. */
16459 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16460 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16461 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16462 TCE(ssax, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16463 /* Old name for SSAX. */
16464 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16465 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16466 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16467 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16468 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16469 TCE(uasx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16470 /* Old name for UASX. */
16471 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16472 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16473 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16474 TCE(uhasx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16475 /* Old name for UHASX. */
16476 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16477 TCE(uhsax, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16478 /* Old name for UHSAX. */
16479 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16480 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16481 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16482 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16483 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16484 TCE(uqasx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16485 /* Old name for UQASX. */
16486 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16487 TCE(uqsax, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16488 /* Old name for UQSAX. */
16489 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16490 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16491 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16492 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16493 TCE(usax, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16494 /* Old name for USAX. */
16495 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16496 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16497 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
16498 UF(rfeib, 9900a00, 1, (RRw), rfe),
16499 UF(rfeda, 8100a00, 1, (RRw), rfe),
16500 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
16501 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
16502 UF(rfefa, 9900a00, 1, (RRw), rfe),
16503 UF(rfeea, 8100a00, 1, (RRw), rfe),
16504 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
16505 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16506 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16507 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16508 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16509 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16510 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16511 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16512 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16513 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16514 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16515 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16516 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16517 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16518 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16519 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16520 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16521 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16522 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16523 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16524 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16525 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16526 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16527 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16528 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16529 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16530 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16531 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16532 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
16533 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
16534 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
16535 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
16536 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
16537 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
16538 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16539 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16540 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
16542 #undef ARM_VARIANT
16543 #define ARM_VARIANT & arm_ext_v6k
16544 #undef THUMB_VARIANT
16545 #define THUMB_VARIANT & arm_ext_v6k
16547 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
16548 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
16549 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
16550 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
16552 #undef THUMB_VARIANT
16553 #define THUMB_VARIANT & arm_ext_v6_notm
16555 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
16556 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
16558 #undef THUMB_VARIANT
16559 #define THUMB_VARIANT & arm_ext_v6t2
16561 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16562 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16563 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16564 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16565 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
16567 #undef ARM_VARIANT
16568 #define ARM_VARIANT & arm_ext_v6z
16570 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
16572 #undef ARM_VARIANT
16573 #define ARM_VARIANT & arm_ext_v6t2
16575 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
16576 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
16577 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16578 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16580 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16581 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
16582 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
16583 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
16585 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16586 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16587 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16588 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16590 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
16591 UT(cbz, b100, 2, (RR, EXP), t_cbz),
16593 /* ARM does not really have an IT instruction, so always allow it.
16594 The opcode is copied from Thumb in order to allow warnings in
16595 -mimplicit-it=[never | arm] modes. */
16596 #undef ARM_VARIANT
16597 #define ARM_VARIANT & arm_ext_v1
16599 TUE(it, bf08, bf08, 1, (COND), it, t_it),
16600 TUE(itt, bf0c, bf0c, 1, (COND), it, t_it),
16601 TUE(ite, bf04, bf04, 1, (COND), it, t_it),
16602 TUE(ittt, bf0e, bf0e, 1, (COND), it, t_it),
16603 TUE(itet, bf06, bf06, 1, (COND), it, t_it),
16604 TUE(itte, bf0a, bf0a, 1, (COND), it, t_it),
16605 TUE(itee, bf02, bf02, 1, (COND), it, t_it),
16606 TUE(itttt, bf0f, bf0f, 1, (COND), it, t_it),
16607 TUE(itett, bf07, bf07, 1, (COND), it, t_it),
16608 TUE(ittet, bf0b, bf0b, 1, (COND), it, t_it),
16609 TUE(iteet, bf03, bf03, 1, (COND), it, t_it),
16610 TUE(ittte, bf0d, bf0d, 1, (COND), it, t_it),
16611 TUE(itete, bf05, bf05, 1, (COND), it, t_it),
16612 TUE(ittee, bf09, bf09, 1, (COND), it, t_it),
16613 TUE(iteee, bf01, bf01, 1, (COND), it, t_it),
16614 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16615 TC3(rrx, 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
16616 TC3(rrxs, 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
16618 /* Thumb2 only instructions. */
16619 #undef ARM_VARIANT
16620 #define ARM_VARIANT NULL
16622 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16623 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16624 TCE(orn, 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
16625 TCE(orns, 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
16626 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
16627 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
16629 /* Thumb-2 hardware division instructions (R and M profiles only). */
16630 #undef THUMB_VARIANT
16631 #define THUMB_VARIANT & arm_ext_div
16633 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
16634 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
16636 /* ARM V6M/V7 instructions. */
16637 #undef ARM_VARIANT
16638 #define ARM_VARIANT & arm_ext_barrier
16639 #undef THUMB_VARIANT
16640 #define THUMB_VARIANT & arm_ext_barrier
16642 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
16643 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
16644 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
16646 /* ARM V7 instructions. */
16647 #undef ARM_VARIANT
16648 #define ARM_VARIANT & arm_ext_v7
16649 #undef THUMB_VARIANT
16650 #define THUMB_VARIANT & arm_ext_v7
16652 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
16653 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
16655 #undef ARM_VARIANT
16656 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16658 cCE(wfs, e200110, 1, (RR), rd),
16659 cCE(rfs, e300110, 1, (RR), rd),
16660 cCE(wfc, e400110, 1, (RR), rd),
16661 cCE(rfc, e500110, 1, (RR), rd),
16663 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
16664 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
16665 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
16666 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
16668 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
16669 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
16670 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
16671 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
16673 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
16674 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
16675 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
16676 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
16677 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
16678 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
16679 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
16680 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
16681 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
16682 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
16683 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
16684 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
16686 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
16687 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
16688 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
16689 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
16690 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
16691 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
16692 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
16693 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
16694 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
16695 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
16696 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
16697 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
16699 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
16700 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
16701 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
16702 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
16703 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
16704 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
16705 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
16706 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
16707 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
16708 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
16709 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
16710 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
16712 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
16713 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
16714 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
16715 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
16716 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
16717 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
16718 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
16719 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
16720 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
16721 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
16722 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
16723 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
16725 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
16726 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
16727 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
16728 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
16729 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
16730 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
16731 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
16732 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
16733 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
16734 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
16735 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
16736 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
16738 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
16739 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
16740 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
16741 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
16742 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
16743 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
16744 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
16745 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
16746 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
16747 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
16748 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
16749 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
16751 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
16752 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
16753 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
16754 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
16755 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
16756 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
16757 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
16758 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
16759 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
16760 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
16761 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
16762 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
16764 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
16765 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
16766 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
16767 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
16768 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
16769 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
16770 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
16771 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
16772 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
16773 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
16774 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
16775 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
16777 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
16778 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
16779 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
16780 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
16781 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
16782 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
16783 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
16784 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
16785 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
16786 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
16787 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
16788 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
16790 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
16791 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
16792 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
16793 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
16794 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
16795 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
16796 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
16797 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
16798 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
16799 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
16800 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
16801 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
16803 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
16804 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
16805 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
16806 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
16807 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
16808 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
16809 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
16810 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
16811 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
16812 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
16813 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
16814 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
16816 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
16817 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
16818 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
16819 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
16820 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
16821 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
16822 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
16823 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
16824 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
16825 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
16826 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
16827 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
16829 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
16830 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
16831 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
16832 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
16833 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
16834 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
16835 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
16836 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
16837 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
16838 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
16839 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
16840 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
16842 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
16843 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
16844 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
16845 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
16846 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
16847 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
16848 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
16849 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
16850 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
16851 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
16852 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
16853 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
16855 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
16856 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
16857 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
16858 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
16859 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
16860 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
16861 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
16862 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
16863 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
16864 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
16865 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
16866 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
16868 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
16869 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
16870 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
16871 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
16872 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
16873 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
16874 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
16875 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
16876 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
16877 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
16878 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
16879 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
16881 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
16882 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
16883 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
16884 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
16885 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
16886 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16887 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16888 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16889 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
16890 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
16891 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
16892 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
16894 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
16895 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
16896 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
16897 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
16898 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
16899 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16900 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16901 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16902 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
16903 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
16904 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
16905 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
16907 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
16908 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
16909 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
16910 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
16911 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
16912 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16913 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16914 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16915 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
16916 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
16917 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
16918 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
16920 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
16921 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
16922 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
16923 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
16924 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
16925 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16926 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16927 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16928 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
16929 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
16930 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
16931 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
16933 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
16934 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
16935 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
16936 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
16937 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
16938 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16939 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16940 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16941 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
16942 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
16943 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
16944 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
16946 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
16947 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
16948 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
16949 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
16950 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
16951 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16952 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16953 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16954 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
16955 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
16956 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
16957 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
16959 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
16960 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
16961 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
16962 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
16963 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
16964 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16965 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16966 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16967 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
16968 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
16969 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
16970 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
16972 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
16973 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
16974 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
16975 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
16976 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
16977 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16978 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16979 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16980 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
16981 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
16982 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
16983 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
16985 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
16986 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
16987 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
16988 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
16989 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
16990 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16991 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16992 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16993 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
16994 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
16995 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
16996 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
16998 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
16999 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17000 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17001 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17002 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17003 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17004 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17005 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17006 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
17007 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
17008 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
17009 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
17011 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17012 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17013 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17014 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17015 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17016 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17017 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17018 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17019 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17020 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17021 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17022 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17024 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17025 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17026 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17027 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17028 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17029 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17030 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17031 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17032 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17033 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17034 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17035 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17037 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17038 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17039 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17040 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17041 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17042 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17043 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17044 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17045 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17046 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17047 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17048 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17050 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
17051 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
17052 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
17053 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
17055 cCL(flts, e000110, 2, (RF, RR), rn_rd),
17056 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
17057 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
17058 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
17059 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
17060 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
17061 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
17062 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
17063 cCL(flte, e080110, 2, (RF, RR), rn_rd),
17064 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
17065 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
17066 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
17068 /* The implementation of the FIX instruction is broken on some
17069 assemblers, in that it accepts a precision specifier as well as a
17070 rounding specifier, despite the fact that this is meaningless.
17071 To be more compatible, we accept it as well, though of course it
17072 does not set any bits. */
17073 cCE(fix, e100110, 2, (RR, RF), rd_rm),
17074 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
17075 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
17076 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
17077 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
17078 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
17079 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
17080 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
17081 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
17082 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
17083 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
17084 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
17085 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
17087 /* Instructions that were new with the real FPA, call them V2. */
17088 #undef ARM_VARIANT
17089 #define ARM_VARIANT & fpu_fpa_ext_v2
17091 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17092 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17093 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17094 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17095 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17096 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17098 #undef ARM_VARIANT
17099 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17101 /* Moves and type conversions. */
17102 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
17103 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
17104 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
17105 cCE(fmstat, ef1fa10, 0, (), noargs),
17106 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
17107 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
17108 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
17109 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17110 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
17111 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17112 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
17113 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
17115 /* Memory operations. */
17116 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17117 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17118 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17119 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17120 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17121 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17122 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17123 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17124 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17125 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17126 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17127 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17128 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17129 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17130 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17131 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17132 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17133 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17135 /* Monadic operations. */
17136 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
17137 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
17138 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
17140 /* Dyadic operations. */
17141 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17142 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17143 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17144 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17145 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17146 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17147 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17148 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17149 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17151 /* Comparisons. */
17152 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
17153 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
17154 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
17155 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
17157 #undef ARM_VARIANT
17158 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17160 /* Moves and type conversions. */
17161 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17162 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17163 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17164 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
17165 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
17166 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
17167 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
17168 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17169 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
17170 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17171 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17172 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17173 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17175 /* Memory operations. */
17176 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17177 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17178 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17179 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17180 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17181 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17182 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17183 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17184 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17185 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17187 /* Monadic operations. */
17188 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17189 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17190 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17192 /* Dyadic operations. */
17193 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17194 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17195 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17196 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17197 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17198 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17199 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17200 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17201 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17203 /* Comparisons. */
17204 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17205 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
17206 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17207 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
17209 #undef ARM_VARIANT
17210 #define ARM_VARIANT & fpu_vfp_ext_v2
17212 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
17213 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
17214 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
17215 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
17217 /* Instructions which may belong to either the Neon or VFP instruction sets.
17218 Individual encoder functions perform additional architecture checks. */
17219 #undef ARM_VARIANT
17220 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17221 #undef THUMB_VARIANT
17222 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17224 /* These mnemonics are unique to VFP. */
17225 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
17226 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
17227 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17228 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17229 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17230 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17231 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17232 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
17233 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
17234 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
17236 /* Mnemonics shared by Neon and VFP. */
17237 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
17238 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17239 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17241 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17242 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17244 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17245 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17247 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17248 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17249 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17250 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17251 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17252 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17253 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17254 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17256 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
17257 nCEF(vcvtb, vcvt, 2, (RVS, RVS), neon_cvtb),
17258 nCEF(vcvtt, vcvt, 2, (RVS, RVS), neon_cvtt),
17261 /* NOTE: All VMOV encoding is special-cased! */
17262 NCE(vmov, 0, 1, (VMOV), neon_mov),
17263 NCE(vmovq, 0, 1, (VMOV), neon_mov),
17265 #undef THUMB_VARIANT
17266 #define THUMB_VARIANT & fpu_neon_ext_v1
17267 #undef ARM_VARIANT
17268 #define ARM_VARIANT & fpu_neon_ext_v1
17270 /* Data processing with three registers of the same length. */
17271 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17272 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
17273 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
17274 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17275 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17276 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17277 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17278 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17279 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17280 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17281 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17282 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17283 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17284 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17285 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17286 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17287 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17288 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17289 /* If not immediate, fall back to neon_dyadic_i64_su.
17290 shl_imm should accept I8 I16 I32 I64,
17291 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17292 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
17293 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
17294 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
17295 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
17296 /* Logic ops, types optional & ignored. */
17297 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
17298 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
17299 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
17300 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
17301 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
17302 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
17303 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
17304 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
17305 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
17306 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
17307 /* Bitfield ops, untyped. */
17308 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17309 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17310 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17311 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17312 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17313 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17314 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17315 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17316 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17317 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17318 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17319 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17320 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17321 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17322 back to neon_dyadic_if_su. */
17323 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17324 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17325 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17326 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17327 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17328 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17329 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17330 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17331 /* Comparison. Type I8 I16 I32 F32. */
17332 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
17333 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
17334 /* As above, D registers only. */
17335 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17336 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17337 /* Int and float variants, signedness unimportant. */
17338 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17339 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17340 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
17341 /* Add/sub take types I8 I16 I32 I64 F32. */
17342 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17343 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17344 /* vtst takes sizes 8, 16, 32. */
17345 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
17346 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
17347 /* VMUL takes I8 I16 I32 F32 P8. */
17348 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
17349 /* VQD{R}MULH takes S16 S32. */
17350 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17351 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17352 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17353 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17354 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17355 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17356 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17357 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17358 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17359 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17360 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17361 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17362 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17363 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17364 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17365 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17367 /* Two address, int/float. Types S8 S16 S32 F32. */
17368 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
17369 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
17371 /* Data processing with two registers and a shift amount. */
17372 /* Right shifts, and variants with rounding.
17373 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17374 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17375 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17376 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17377 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17378 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17379 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17380 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17381 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17382 /* Shift and insert. Sizes accepted 8 16 32 64. */
17383 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
17384 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
17385 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
17386 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
17387 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17388 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
17389 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
17390 /* Right shift immediate, saturating & narrowing, with rounding variants.
17391 Types accepted S16 S32 S64 U16 U32 U64. */
17392 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17393 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17394 /* As above, unsigned. Types accepted S16 S32 S64. */
17395 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17396 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17397 /* Right shift narrowing. Types accepted I16 I32 I64. */
17398 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17399 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17400 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17401 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
17402 /* CVT with optional immediate for fixed-point variant. */
17403 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
17405 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
17406 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
17408 /* Data processing, three registers of different lengths. */
17409 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17410 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
17411 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
17412 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
17413 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
17414 /* If not scalar, fall back to neon_dyadic_long.
17415 Vector types as above, scalar types S16 S32 U16 U32. */
17416 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17417 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17418 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17419 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17420 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17421 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17422 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17423 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17424 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17425 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17426 /* Saturating doubling multiplies. Types S16 S32. */
17427 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17428 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17429 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17430 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17431 S16 S32 U16 U32. */
17432 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
17434 /* Extract. Size 8. */
17435 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
17436 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
17438 /* Two registers, miscellaneous. */
17439 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17440 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
17441 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
17442 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
17443 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
17444 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
17445 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
17446 /* Vector replicate. Sizes 8 16 32. */
17447 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
17448 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
17449 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17450 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
17451 /* VMOVN. Types I16 I32 I64. */
17452 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
17453 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17454 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
17455 /* VQMOVUN. Types S16 S32 S64. */
17456 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
17457 /* VZIP / VUZP. Sizes 8 16 32. */
17458 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
17459 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
17460 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
17461 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
17462 /* VQABS / VQNEG. Types S8 S16 S32. */
17463 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17464 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
17465 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17466 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
17467 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17468 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
17469 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
17470 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
17471 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
17472 /* Reciprocal estimates. Types U32 F32. */
17473 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
17474 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
17475 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
17476 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
17477 /* VCLS. Types S8 S16 S32. */
17478 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
17479 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
17480 /* VCLZ. Types I8 I16 I32. */
17481 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
17482 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
17483 /* VCNT. Size 8. */
17484 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
17485 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
17486 /* Two address, untyped. */
17487 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
17488 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
17489 /* VTRN. Sizes 8 16 32. */
17490 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
17491 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
17493 /* Table lookup. Size 8. */
17494 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17495 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17497 #undef THUMB_VARIANT
17498 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17499 #undef ARM_VARIANT
17500 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17502 /* Neon element/structure load/store. */
17503 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17504 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17505 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17506 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17507 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17508 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17509 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17510 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17512 #undef THUMB_VARIANT
17513 #define THUMB_VARIANT & fpu_vfp_ext_v3
17514 #undef ARM_VARIANT
17515 #define ARM_VARIANT & fpu_vfp_ext_v3
17517 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
17518 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
17519 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17520 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17521 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17522 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17523 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17524 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17525 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17526 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17527 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17528 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17529 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17530 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17531 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17532 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17533 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17534 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17536 #undef THUMB_VARIANT
17537 #undef ARM_VARIANT
17538 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17540 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17541 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17542 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17543 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17544 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17545 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17546 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
17547 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
17549 #undef ARM_VARIANT
17550 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17552 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
17553 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
17554 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
17555 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
17556 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
17557 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
17558 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
17559 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
17560 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
17561 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17562 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17563 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17564 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17565 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17566 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17567 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17568 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17569 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17570 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
17571 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
17572 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17573 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17574 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17575 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17576 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17577 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17578 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
17579 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
17580 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
17581 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
17582 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
17583 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
17584 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
17585 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
17586 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
17587 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
17588 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
17589 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17590 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17591 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17592 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17593 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17594 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17595 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17596 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17597 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17598 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
17599 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17600 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17601 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17602 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17603 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17604 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17605 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17606 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17607 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17608 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17609 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17610 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17611 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17612 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17613 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17614 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17615 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17616 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17617 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17618 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17619 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17620 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17621 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17622 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17623 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17624 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17625 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17626 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17627 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17628 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17629 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17630 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17631 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17632 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17633 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17634 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17635 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17636 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17637 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17638 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17639 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17640 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
17641 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17642 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17643 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17644 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17645 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17646 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17647 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17648 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17649 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17650 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17651 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17652 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17653 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17654 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17655 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17656 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17657 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17658 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17659 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17660 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17661 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17662 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
17663 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17664 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17665 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17666 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17667 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17668 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17669 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17670 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17671 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17672 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17673 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17674 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17675 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17676 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17677 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17678 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17679 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17680 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17681 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17682 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17683 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17684 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17685 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17686 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17687 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17688 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17689 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17690 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17691 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17692 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17693 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17694 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
17695 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
17696 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
17697 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
17698 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
17699 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
17700 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17701 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17702 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17703 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
17704 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
17705 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
17706 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
17707 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
17708 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
17709 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17710 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17711 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17712 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17713 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
17715 #undef ARM_VARIANT
17716 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
17718 cCE(torvscb, e12f190, 1, (RR), iwmmxt_tandorc),
17719 cCE(torvsch, e52f190, 1, (RR), iwmmxt_tandorc),
17720 cCE(torvscw, e92f190, 1, (RR), iwmmxt_tandorc),
17721 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
17722 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
17723 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
17724 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17725 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17726 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17727 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17728 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17729 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17730 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17731 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17732 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17733 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17734 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17735 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17736 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17737 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17738 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
17739 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17740 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17741 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17742 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17743 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17744 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17745 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17746 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17747 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17748 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17749 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17750 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17751 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17752 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17753 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17754 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17755 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17756 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17757 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17758 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17759 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17760 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17761 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17762 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17763 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17764 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17765 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17766 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17767 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17768 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17769 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17770 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17771 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17772 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17773 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17774 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17776 #undef ARM_VARIANT
17777 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
17779 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17780 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17781 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17782 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17783 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17784 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17785 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17786 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17787 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
17788 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
17789 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
17790 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
17791 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
17792 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
17793 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
17794 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
17795 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
17796 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
17797 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
17798 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
17799 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
17800 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
17801 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
17802 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
17803 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
17804 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
17805 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
17806 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
17807 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
17808 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
17809 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
17810 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
17811 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
17812 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
17813 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
17814 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
17815 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
17816 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
17817 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
17818 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
17819 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
17820 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
17821 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
17822 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
17823 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
17824 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
17825 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
17826 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
17827 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
17828 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
17829 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
17830 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
17831 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
17832 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
17833 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
17834 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
17835 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
17836 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
17837 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
17838 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
17839 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
17840 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
17841 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
17842 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
17843 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17844 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17845 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17846 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17847 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17848 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17849 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17850 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17851 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17852 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17853 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17854 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17856 #undef ARM_VARIANT
17857 #undef THUMB_VARIANT
17858 #undef TCE
17859 #undef TCM
17860 #undef TUE
17861 #undef TUF
17862 #undef TCC
17863 #undef cCE
17864 #undef cCL
17865 #undef C3E
17866 #undef CE
17867 #undef CM
17868 #undef UE
17869 #undef UF
17870 #undef UT
17871 #undef NUF
17872 #undef nUF
17873 #undef NCE
17874 #undef nCE
17875 #undef OPS0
17876 #undef OPS1
17877 #undef OPS2
17878 #undef OPS3
17879 #undef OPS4
17880 #undef OPS5
17881 #undef OPS6
17882 #undef do_0
17884 /* MD interface: bits in the object file. */
17886 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17887 for use in the a.out file, and stores them in the array pointed to by buf.
17888 This knows about the endian-ness of the target machine and does
17889 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17890 2 (short) and 4 (long) Floating numbers are put out as a series of
17891 LITTLENUMS (shorts, here at least). */
17893 void
17894 md_number_to_chars (char * buf, valueT val, int n)
17896 if (target_big_endian)
17897 number_to_chars_bigendian (buf, val, n);
17898 else
17899 number_to_chars_littleendian (buf, val, n);
17902 static valueT
17903 md_chars_to_number (char * buf, int n)
17905 valueT result = 0;
17906 unsigned char * where = (unsigned char *) buf;
17908 if (target_big_endian)
17910 while (n--)
17912 result <<= 8;
17913 result |= (*where++ & 255);
17916 else
17918 while (n--)
17920 result <<= 8;
17921 result |= (where[n] & 255);
17925 return result;
17928 /* MD interface: Sections. */
17930 /* Estimate the size of a frag before relaxing. Assume everything fits in
17931 2 bytes. */
17934 md_estimate_size_before_relax (fragS * fragp,
17935 segT segtype ATTRIBUTE_UNUSED)
17937 fragp->fr_var = 2;
17938 return 2;
17941 /* Convert a machine dependent frag. */
17943 void
17944 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
17946 unsigned long insn;
17947 unsigned long old_op;
17948 char *buf;
17949 expressionS exp;
17950 fixS *fixp;
17951 int reloc_type;
17952 int pc_rel;
17953 int opcode;
17955 buf = fragp->fr_literal + fragp->fr_fix;
17957 old_op = bfd_get_16(abfd, buf);
17958 if (fragp->fr_symbol)
17960 exp.X_op = O_symbol;
17961 exp.X_add_symbol = fragp->fr_symbol;
17963 else
17965 exp.X_op = O_constant;
17967 exp.X_add_number = fragp->fr_offset;
17968 opcode = fragp->fr_subtype;
17969 switch (opcode)
17971 case T_MNEM_ldr_pc:
17972 case T_MNEM_ldr_pc2:
17973 case T_MNEM_ldr_sp:
17974 case T_MNEM_str_sp:
17975 case T_MNEM_ldr:
17976 case T_MNEM_ldrb:
17977 case T_MNEM_ldrh:
17978 case T_MNEM_str:
17979 case T_MNEM_strb:
17980 case T_MNEM_strh:
17981 if (fragp->fr_var == 4)
17983 insn = THUMB_OP32 (opcode);
17984 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
17986 insn |= (old_op & 0x700) << 4;
17988 else
17990 insn |= (old_op & 7) << 12;
17991 insn |= (old_op & 0x38) << 13;
17993 insn |= 0x00000c00;
17994 put_thumb32_insn (buf, insn);
17995 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
17997 else
17999 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
18001 pc_rel = (opcode == T_MNEM_ldr_pc2);
18002 break;
18003 case T_MNEM_adr:
18004 if (fragp->fr_var == 4)
18006 insn = THUMB_OP32 (opcode);
18007 insn |= (old_op & 0xf0) << 4;
18008 put_thumb32_insn (buf, insn);
18009 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
18011 else
18013 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18014 exp.X_add_number -= 4;
18016 pc_rel = 1;
18017 break;
18018 case T_MNEM_mov:
18019 case T_MNEM_movs:
18020 case T_MNEM_cmp:
18021 case T_MNEM_cmn:
18022 if (fragp->fr_var == 4)
18024 int r0off = (opcode == T_MNEM_mov
18025 || opcode == T_MNEM_movs) ? 0 : 8;
18026 insn = THUMB_OP32 (opcode);
18027 insn = (insn & 0xe1ffffff) | 0x10000000;
18028 insn |= (old_op & 0x700) << r0off;
18029 put_thumb32_insn (buf, insn);
18030 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18032 else
18034 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
18036 pc_rel = 0;
18037 break;
18038 case T_MNEM_b:
18039 if (fragp->fr_var == 4)
18041 insn = THUMB_OP32(opcode);
18042 put_thumb32_insn (buf, insn);
18043 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
18045 else
18046 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
18047 pc_rel = 1;
18048 break;
18049 case T_MNEM_bcond:
18050 if (fragp->fr_var == 4)
18052 insn = THUMB_OP32(opcode);
18053 insn |= (old_op & 0xf00) << 14;
18054 put_thumb32_insn (buf, insn);
18055 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
18057 else
18058 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
18059 pc_rel = 1;
18060 break;
18061 case T_MNEM_add_sp:
18062 case T_MNEM_add_pc:
18063 case T_MNEM_inc_sp:
18064 case T_MNEM_dec_sp:
18065 if (fragp->fr_var == 4)
18067 /* ??? Choose between add and addw. */
18068 insn = THUMB_OP32 (opcode);
18069 insn |= (old_op & 0xf0) << 4;
18070 put_thumb32_insn (buf, insn);
18071 if (opcode == T_MNEM_add_pc)
18072 reloc_type = BFD_RELOC_ARM_T32_IMM12;
18073 else
18074 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18076 else
18077 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18078 pc_rel = 0;
18079 break;
18081 case T_MNEM_addi:
18082 case T_MNEM_addis:
18083 case T_MNEM_subi:
18084 case T_MNEM_subis:
18085 if (fragp->fr_var == 4)
18087 insn = THUMB_OP32 (opcode);
18088 insn |= (old_op & 0xf0) << 4;
18089 insn |= (old_op & 0xf) << 16;
18090 put_thumb32_insn (buf, insn);
18091 if (insn & (1 << 20))
18092 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18093 else
18094 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18096 else
18097 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18098 pc_rel = 0;
18099 break;
18100 default:
18101 abort ();
18103 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
18104 reloc_type);
18105 fixp->fx_file = fragp->fr_file;
18106 fixp->fx_line = fragp->fr_line;
18107 fragp->fr_fix += fragp->fr_var;
18110 /* Return the size of a relaxable immediate operand instruction.
18111 SHIFT and SIZE specify the form of the allowable immediate. */
18112 static int
18113 relax_immediate (fragS *fragp, int size, int shift)
18115 offsetT offset;
18116 offsetT mask;
18117 offsetT low;
18119 /* ??? Should be able to do better than this. */
18120 if (fragp->fr_symbol)
18121 return 4;
18123 low = (1 << shift) - 1;
18124 mask = (1 << (shift + size)) - (1 << shift);
18125 offset = fragp->fr_offset;
18126 /* Force misaligned offsets to 32-bit variant. */
18127 if (offset & low)
18128 return 4;
18129 if (offset & ~mask)
18130 return 4;
18131 return 2;
18134 /* Get the address of a symbol during relaxation. */
18135 static addressT
18136 relaxed_symbol_addr (fragS *fragp, long stretch)
18138 fragS *sym_frag;
18139 addressT addr;
18140 symbolS *sym;
18142 sym = fragp->fr_symbol;
18143 sym_frag = symbol_get_frag (sym);
18144 know (S_GET_SEGMENT (sym) != absolute_section
18145 || sym_frag == &zero_address_frag);
18146 addr = S_GET_VALUE (sym) + fragp->fr_offset;
18148 /* If frag has yet to be reached on this pass, assume it will
18149 move by STRETCH just as we did. If this is not so, it will
18150 be because some frag between grows, and that will force
18151 another pass. */
18153 if (stretch != 0
18154 && sym_frag->relax_marker != fragp->relax_marker)
18156 fragS *f;
18158 /* Adjust stretch for any alignment frag. Note that if have
18159 been expanding the earlier code, the symbol may be
18160 defined in what appears to be an earlier frag. FIXME:
18161 This doesn't handle the fr_subtype field, which specifies
18162 a maximum number of bytes to skip when doing an
18163 alignment. */
18164 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
18166 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
18168 if (stretch < 0)
18169 stretch = - ((- stretch)
18170 & ~ ((1 << (int) f->fr_offset) - 1));
18171 else
18172 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
18173 if (stretch == 0)
18174 break;
18177 if (f != NULL)
18178 addr += stretch;
18181 return addr;
18184 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18185 load. */
18186 static int
18187 relax_adr (fragS *fragp, asection *sec, long stretch)
18189 addressT addr;
18190 offsetT val;
18192 /* Assume worst case for symbols not known to be in the same section. */
18193 if (!S_IS_DEFINED (fragp->fr_symbol)
18194 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18195 return 4;
18197 val = relaxed_symbol_addr (fragp, stretch);
18198 addr = fragp->fr_address + fragp->fr_fix;
18199 addr = (addr + 4) & ~3;
18200 /* Force misaligned targets to 32-bit variant. */
18201 if (val & 3)
18202 return 4;
18203 val -= addr;
18204 if (val < 0 || val > 1020)
18205 return 4;
18206 return 2;
18209 /* Return the size of a relaxable add/sub immediate instruction. */
18210 static int
18211 relax_addsub (fragS *fragp, asection *sec)
18213 char *buf;
18214 int op;
18216 buf = fragp->fr_literal + fragp->fr_fix;
18217 op = bfd_get_16(sec->owner, buf);
18218 if ((op & 0xf) == ((op >> 4) & 0xf))
18219 return relax_immediate (fragp, 8, 0);
18220 else
18221 return relax_immediate (fragp, 3, 0);
18225 /* Return the size of a relaxable branch instruction. BITS is the
18226 size of the offset field in the narrow instruction. */
18228 static int
18229 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
18231 addressT addr;
18232 offsetT val;
18233 offsetT limit;
18235 /* Assume worst case for symbols not known to be in the same section. */
18236 if (!S_IS_DEFINED (fragp->fr_symbol)
18237 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18238 return 4;
18240 #ifdef OBJ_ELF
18241 if (S_IS_DEFINED (fragp->fr_symbol)
18242 && ARM_IS_FUNC (fragp->fr_symbol))
18243 return 4;
18244 #endif
18246 val = relaxed_symbol_addr (fragp, stretch);
18247 addr = fragp->fr_address + fragp->fr_fix + 4;
18248 val -= addr;
18250 /* Offset is a signed value *2 */
18251 limit = 1 << bits;
18252 if (val >= limit || val < -limit)
18253 return 4;
18254 return 2;
18258 /* Relax a machine dependent frag. This returns the amount by which
18259 the current size of the frag should change. */
18262 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
18264 int oldsize;
18265 int newsize;
18267 oldsize = fragp->fr_var;
18268 switch (fragp->fr_subtype)
18270 case T_MNEM_ldr_pc2:
18271 newsize = relax_adr (fragp, sec, stretch);
18272 break;
18273 case T_MNEM_ldr_pc:
18274 case T_MNEM_ldr_sp:
18275 case T_MNEM_str_sp:
18276 newsize = relax_immediate (fragp, 8, 2);
18277 break;
18278 case T_MNEM_ldr:
18279 case T_MNEM_str:
18280 newsize = relax_immediate (fragp, 5, 2);
18281 break;
18282 case T_MNEM_ldrh:
18283 case T_MNEM_strh:
18284 newsize = relax_immediate (fragp, 5, 1);
18285 break;
18286 case T_MNEM_ldrb:
18287 case T_MNEM_strb:
18288 newsize = relax_immediate (fragp, 5, 0);
18289 break;
18290 case T_MNEM_adr:
18291 newsize = relax_adr (fragp, sec, stretch);
18292 break;
18293 case T_MNEM_mov:
18294 case T_MNEM_movs:
18295 case T_MNEM_cmp:
18296 case T_MNEM_cmn:
18297 newsize = relax_immediate (fragp, 8, 0);
18298 break;
18299 case T_MNEM_b:
18300 newsize = relax_branch (fragp, sec, 11, stretch);
18301 break;
18302 case T_MNEM_bcond:
18303 newsize = relax_branch (fragp, sec, 8, stretch);
18304 break;
18305 case T_MNEM_add_sp:
18306 case T_MNEM_add_pc:
18307 newsize = relax_immediate (fragp, 8, 2);
18308 break;
18309 case T_MNEM_inc_sp:
18310 case T_MNEM_dec_sp:
18311 newsize = relax_immediate (fragp, 7, 2);
18312 break;
18313 case T_MNEM_addi:
18314 case T_MNEM_addis:
18315 case T_MNEM_subi:
18316 case T_MNEM_subis:
18317 newsize = relax_addsub (fragp, sec);
18318 break;
18319 default:
18320 abort ();
18323 fragp->fr_var = newsize;
18324 /* Freeze wide instructions that are at or before the same location as
18325 in the previous pass. This avoids infinite loops.
18326 Don't freeze them unconditionally because targets may be artificially
18327 misaligned by the expansion of preceding frags. */
18328 if (stretch <= 0 && newsize > 2)
18330 md_convert_frag (sec->owner, sec, fragp);
18331 frag_wane (fragp);
18334 return newsize - oldsize;
18337 /* Round up a section size to the appropriate boundary. */
18339 valueT
18340 md_section_align (segT segment ATTRIBUTE_UNUSED,
18341 valueT size)
18343 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18344 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
18346 /* For a.out, force the section size to be aligned. If we don't do
18347 this, BFD will align it for us, but it will not write out the
18348 final bytes of the section. This may be a bug in BFD, but it is
18349 easier to fix it here since that is how the other a.out targets
18350 work. */
18351 int align;
18353 align = bfd_get_section_alignment (stdoutput, segment);
18354 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
18356 #endif
18358 return size;
18361 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18362 of an rs_align_code fragment. */
18364 void
18365 arm_handle_align (fragS * fragP)
18367 static char const arm_noop[2][2][4] =
18369 { /* ARMv1 */
18370 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18371 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18373 { /* ARMv6k */
18374 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18375 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18378 static char const thumb_noop[2][2][2] =
18380 { /* Thumb-1 */
18381 {0xc0, 0x46}, /* LE */
18382 {0x46, 0xc0}, /* BE */
18384 { /* Thumb-2 */
18385 {0x00, 0xbf}, /* LE */
18386 {0xbf, 0x00} /* BE */
18389 static char const wide_thumb_noop[2][4] =
18390 { /* Wide Thumb-2 */
18391 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18392 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18395 unsigned bytes, fix, noop_size;
18396 char * p;
18397 const char * noop;
18398 const char *narrow_noop = NULL;
18399 #ifdef OBJ_ELF
18400 enum mstate state;
18401 #endif
18403 if (fragP->fr_type != rs_align_code)
18404 return;
18406 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
18407 p = fragP->fr_literal + fragP->fr_fix;
18408 fix = 0;
18410 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
18411 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
18413 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
18415 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
18417 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
18419 narrow_noop = thumb_noop[1][target_big_endian];
18420 noop = wide_thumb_noop[target_big_endian];
18422 else
18423 noop = thumb_noop[0][target_big_endian];
18424 noop_size = 2;
18425 #ifdef OBJ_ELF
18426 state = MAP_THUMB;
18427 #endif
18429 else
18431 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
18432 [target_big_endian];
18433 noop_size = 4;
18434 #ifdef OBJ_ELF
18435 state = MAP_ARM;
18436 #endif
18439 fragP->fr_var = noop_size;
18441 if (bytes & (noop_size - 1))
18443 fix = bytes & (noop_size - 1);
18444 #ifdef OBJ_ELF
18445 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
18446 #endif
18447 memset (p, 0, fix);
18448 p += fix;
18449 bytes -= fix;
18452 if (narrow_noop)
18454 if (bytes & noop_size)
18456 /* Insert a narrow noop. */
18457 memcpy (p, narrow_noop, noop_size);
18458 p += noop_size;
18459 bytes -= noop_size;
18460 fix += noop_size;
18463 /* Use wide noops for the remainder */
18464 noop_size = 4;
18467 while (bytes >= noop_size)
18469 memcpy (p, noop, noop_size);
18470 p += noop_size;
18471 bytes -= noop_size;
18472 fix += noop_size;
18475 fragP->fr_fix += fix;
18478 /* Called from md_do_align. Used to create an alignment
18479 frag in a code section. */
18481 void
18482 arm_frag_align_code (int n, int max)
18484 char * p;
18486 /* We assume that there will never be a requirement
18487 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18488 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
18490 char err_msg[128];
18492 sprintf (err_msg,
18493 _("alignments greater than %d bytes not supported in .text sections."),
18494 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
18495 as_fatal ("%s", err_msg);
18498 p = frag_var (rs_align_code,
18499 MAX_MEM_FOR_RS_ALIGN_CODE,
18501 (relax_substateT) max,
18502 (symbolS *) NULL,
18503 (offsetT) n,
18504 (char *) NULL);
18505 *p = 0;
18508 /* Perform target specific initialisation of a frag.
18509 Note - despite the name this initialisation is not done when the frag
18510 is created, but only when its type is assigned. A frag can be created
18511 and used a long time before its type is set, so beware of assuming that
18512 this initialisationis performed first. */
18514 #ifndef OBJ_ELF
18515 void
18516 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
18518 /* Record whether this frag is in an ARM or a THUMB area. */
18519 fragP->tc_frag_data.thumb_mode = thumb_mode;
18522 #else /* OBJ_ELF is defined. */
18523 void
18524 arm_init_frag (fragS * fragP, int max_chars)
18526 /* If the current ARM vs THUMB mode has not already
18527 been recorded into this frag then do so now. */
18528 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
18530 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18532 /* Record a mapping symbol for alignment frags. We will delete this
18533 later if the alignment ends up empty. */
18534 switch (fragP->fr_type)
18536 case rs_align:
18537 case rs_align_test:
18538 case rs_fill:
18539 mapping_state_2 (MAP_DATA, max_chars);
18540 break;
18541 case rs_align_code:
18542 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
18543 break;
18544 default:
18545 break;
18550 /* When we change sections we need to issue a new mapping symbol. */
18552 void
18553 arm_elf_change_section (void)
18555 segment_info_type *seginfo;
18557 /* Link an unlinked unwind index table section to the .text section. */
18558 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
18559 && elf_linked_to_section (now_seg) == NULL)
18560 elf_linked_to_section (now_seg) = text_section;
18562 if (!SEG_NORMAL (now_seg))
18563 return;
18565 seginfo = seg_info (now_seg);
18566 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
18567 mapstate = seginfo->tc_segment_info_data.mapstate;
18571 arm_elf_section_type (const char * str, size_t len)
18573 if (len == 5 && strncmp (str, "exidx", 5) == 0)
18574 return SHT_ARM_EXIDX;
18576 return -1;
18579 /* Code to deal with unwinding tables. */
18581 static void add_unwind_adjustsp (offsetT);
18583 /* Generate any deferred unwind frame offset. */
18585 static void
18586 flush_pending_unwind (void)
18588 offsetT offset;
18590 offset = unwind.pending_offset;
18591 unwind.pending_offset = 0;
18592 if (offset != 0)
18593 add_unwind_adjustsp (offset);
18596 /* Add an opcode to this list for this function. Two-byte opcodes should
18597 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18598 order. */
18600 static void
18601 add_unwind_opcode (valueT op, int length)
18603 /* Add any deferred stack adjustment. */
18604 if (unwind.pending_offset)
18605 flush_pending_unwind ();
18607 unwind.sp_restored = 0;
18609 if (unwind.opcode_count + length > unwind.opcode_alloc)
18611 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
18612 if (unwind.opcodes)
18613 unwind.opcodes = xrealloc (unwind.opcodes,
18614 unwind.opcode_alloc);
18615 else
18616 unwind.opcodes = xmalloc (unwind.opcode_alloc);
18618 while (length > 0)
18620 length--;
18621 unwind.opcodes[unwind.opcode_count] = op & 0xff;
18622 op >>= 8;
18623 unwind.opcode_count++;
18627 /* Add unwind opcodes to adjust the stack pointer. */
18629 static void
18630 add_unwind_adjustsp (offsetT offset)
18632 valueT op;
18634 if (offset > 0x200)
18636 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18637 char bytes[5];
18638 int n;
18639 valueT o;
18641 /* Long form: 0xb2, uleb128. */
18642 /* This might not fit in a word so add the individual bytes,
18643 remembering the list is built in reverse order. */
18644 o = (valueT) ((offset - 0x204) >> 2);
18645 if (o == 0)
18646 add_unwind_opcode (0, 1);
18648 /* Calculate the uleb128 encoding of the offset. */
18649 n = 0;
18650 while (o)
18652 bytes[n] = o & 0x7f;
18653 o >>= 7;
18654 if (o)
18655 bytes[n] |= 0x80;
18656 n++;
18658 /* Add the insn. */
18659 for (; n; n--)
18660 add_unwind_opcode (bytes[n - 1], 1);
18661 add_unwind_opcode (0xb2, 1);
18663 else if (offset > 0x100)
18665 /* Two short opcodes. */
18666 add_unwind_opcode (0x3f, 1);
18667 op = (offset - 0x104) >> 2;
18668 add_unwind_opcode (op, 1);
18670 else if (offset > 0)
18672 /* Short opcode. */
18673 op = (offset - 4) >> 2;
18674 add_unwind_opcode (op, 1);
18676 else if (offset < 0)
18678 offset = -offset;
18679 while (offset > 0x100)
18681 add_unwind_opcode (0x7f, 1);
18682 offset -= 0x100;
18684 op = ((offset - 4) >> 2) | 0x40;
18685 add_unwind_opcode (op, 1);
18689 /* Finish the list of unwind opcodes for this function. */
18690 static void
18691 finish_unwind_opcodes (void)
18693 valueT op;
18695 if (unwind.fp_used)
18697 /* Adjust sp as necessary. */
18698 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
18699 flush_pending_unwind ();
18701 /* After restoring sp from the frame pointer. */
18702 op = 0x90 | unwind.fp_reg;
18703 add_unwind_opcode (op, 1);
18705 else
18706 flush_pending_unwind ();
18710 /* Start an exception table entry. If idx is nonzero this is an index table
18711 entry. */
18713 static void
18714 start_unwind_section (const segT text_seg, int idx)
18716 const char * text_name;
18717 const char * prefix;
18718 const char * prefix_once;
18719 const char * group_name;
18720 size_t prefix_len;
18721 size_t text_len;
18722 char * sec_name;
18723 size_t sec_name_len;
18724 int type;
18725 int flags;
18726 int linkonce;
18728 if (idx)
18730 prefix = ELF_STRING_ARM_unwind;
18731 prefix_once = ELF_STRING_ARM_unwind_once;
18732 type = SHT_ARM_EXIDX;
18734 else
18736 prefix = ELF_STRING_ARM_unwind_info;
18737 prefix_once = ELF_STRING_ARM_unwind_info_once;
18738 type = SHT_PROGBITS;
18741 text_name = segment_name (text_seg);
18742 if (streq (text_name, ".text"))
18743 text_name = "";
18745 if (strncmp (text_name, ".gnu.linkonce.t.",
18746 strlen (".gnu.linkonce.t.")) == 0)
18748 prefix = prefix_once;
18749 text_name += strlen (".gnu.linkonce.t.");
18752 prefix_len = strlen (prefix);
18753 text_len = strlen (text_name);
18754 sec_name_len = prefix_len + text_len;
18755 sec_name = xmalloc (sec_name_len + 1);
18756 memcpy (sec_name, prefix, prefix_len);
18757 memcpy (sec_name + prefix_len, text_name, text_len);
18758 sec_name[prefix_len + text_len] = '\0';
18760 flags = SHF_ALLOC;
18761 linkonce = 0;
18762 group_name = 0;
18764 /* Handle COMDAT group. */
18765 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
18767 group_name = elf_group_name (text_seg);
18768 if (group_name == NULL)
18770 as_bad (_("Group section `%s' has no group signature"),
18771 segment_name (text_seg));
18772 ignore_rest_of_line ();
18773 return;
18775 flags |= SHF_GROUP;
18776 linkonce = 1;
18779 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
18781 /* Set the section link for index tables. */
18782 if (idx)
18783 elf_linked_to_section (now_seg) = text_seg;
18787 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
18788 personality routine data. Returns zero, or the index table value for
18789 and inline entry. */
18791 static valueT
18792 create_unwind_entry (int have_data)
18794 int size;
18795 addressT where;
18796 char *ptr;
18797 /* The current word of data. */
18798 valueT data;
18799 /* The number of bytes left in this word. */
18800 int n;
18802 finish_unwind_opcodes ();
18804 /* Remember the current text section. */
18805 unwind.saved_seg = now_seg;
18806 unwind.saved_subseg = now_subseg;
18808 start_unwind_section (now_seg, 0);
18810 if (unwind.personality_routine == NULL)
18812 if (unwind.personality_index == -2)
18814 if (have_data)
18815 as_bad (_("handlerdata in cantunwind frame"));
18816 return 1; /* EXIDX_CANTUNWIND. */
18819 /* Use a default personality routine if none is specified. */
18820 if (unwind.personality_index == -1)
18822 if (unwind.opcode_count > 3)
18823 unwind.personality_index = 1;
18824 else
18825 unwind.personality_index = 0;
18828 /* Space for the personality routine entry. */
18829 if (unwind.personality_index == 0)
18831 if (unwind.opcode_count > 3)
18832 as_bad (_("too many unwind opcodes for personality routine 0"));
18834 if (!have_data)
18836 /* All the data is inline in the index table. */
18837 data = 0x80;
18838 n = 3;
18839 while (unwind.opcode_count > 0)
18841 unwind.opcode_count--;
18842 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
18843 n--;
18846 /* Pad with "finish" opcodes. */
18847 while (n--)
18848 data = (data << 8) | 0xb0;
18850 return data;
18852 size = 0;
18854 else
18855 /* We get two opcodes "free" in the first word. */
18856 size = unwind.opcode_count - 2;
18858 else
18859 /* An extra byte is required for the opcode count. */
18860 size = unwind.opcode_count + 1;
18862 size = (size + 3) >> 2;
18863 if (size > 0xff)
18864 as_bad (_("too many unwind opcodes"));
18866 frag_align (2, 0, 0);
18867 record_alignment (now_seg, 2);
18868 unwind.table_entry = expr_build_dot ();
18870 /* Allocate the table entry. */
18871 ptr = frag_more ((size << 2) + 4);
18872 where = frag_now_fix () - ((size << 2) + 4);
18874 switch (unwind.personality_index)
18876 case -1:
18877 /* ??? Should this be a PLT generating relocation? */
18878 /* Custom personality routine. */
18879 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
18880 BFD_RELOC_ARM_PREL31);
18882 where += 4;
18883 ptr += 4;
18885 /* Set the first byte to the number of additional words. */
18886 data = size - 1;
18887 n = 3;
18888 break;
18890 /* ABI defined personality routines. */
18891 case 0:
18892 /* Three opcodes bytes are packed into the first word. */
18893 data = 0x80;
18894 n = 3;
18895 break;
18897 case 1:
18898 case 2:
18899 /* The size and first two opcode bytes go in the first word. */
18900 data = ((0x80 + unwind.personality_index) << 8) | size;
18901 n = 2;
18902 break;
18904 default:
18905 /* Should never happen. */
18906 abort ();
18909 /* Pack the opcodes into words (MSB first), reversing the list at the same
18910 time. */
18911 while (unwind.opcode_count > 0)
18913 if (n == 0)
18915 md_number_to_chars (ptr, data, 4);
18916 ptr += 4;
18917 n = 4;
18918 data = 0;
18920 unwind.opcode_count--;
18921 n--;
18922 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
18925 /* Finish off the last word. */
18926 if (n < 4)
18928 /* Pad with "finish" opcodes. */
18929 while (n--)
18930 data = (data << 8) | 0xb0;
18932 md_number_to_chars (ptr, data, 4);
18935 if (!have_data)
18937 /* Add an empty descriptor if there is no user-specified data. */
18938 ptr = frag_more (4);
18939 md_number_to_chars (ptr, 0, 4);
18942 return 0;
18946 /* Initialize the DWARF-2 unwind information for this procedure. */
18948 void
18949 tc_arm_frame_initial_instructions (void)
18951 cfi_add_CFA_def_cfa (REG_SP, 0);
18953 #endif /* OBJ_ELF */
18955 /* Convert REGNAME to a DWARF-2 register number. */
18958 tc_arm_regname_to_dw2regnum (char *regname)
18960 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
18962 if (reg == FAIL)
18963 return -1;
18965 return reg;
18968 #ifdef TE_PE
18969 void
18970 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
18972 expressionS expr;
18974 expr.X_op = O_secrel;
18975 expr.X_add_symbol = symbol;
18976 expr.X_add_number = 0;
18977 emit_expr (&expr, size);
18979 #endif
18981 /* MD interface: Symbol and relocation handling. */
18983 /* Return the address within the segment that a PC-relative fixup is
18984 relative to. For ARM, PC-relative fixups applied to instructions
18985 are generally relative to the location of the fixup plus 8 bytes.
18986 Thumb branches are offset by 4, and Thumb loads relative to PC
18987 require special handling. */
18989 long
18990 md_pcrel_from_section (fixS * fixP, segT seg)
18992 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
18994 /* If this is pc-relative and we are going to emit a relocation
18995 then we just want to put out any pipeline compensation that the linker
18996 will need. Otherwise we want to use the calculated base.
18997 For WinCE we skip the bias for externals as well, since this
18998 is how the MS ARM-CE assembler behaves and we want to be compatible. */
18999 if (fixP->fx_pcrel
19000 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19001 || (arm_force_relocation (fixP)
19002 #ifdef TE_WINCE
19003 && !S_IS_EXTERNAL (fixP->fx_addsy)
19004 #endif
19006 base = 0;
19009 switch (fixP->fx_r_type)
19011 /* PC relative addressing on the Thumb is slightly odd as the
19012 bottom two bits of the PC are forced to zero for the
19013 calculation. This happens *after* application of the
19014 pipeline offset. However, Thumb adrl already adjusts for
19015 this, so we need not do it again. */
19016 case BFD_RELOC_ARM_THUMB_ADD:
19017 return base & ~3;
19019 case BFD_RELOC_ARM_THUMB_OFFSET:
19020 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19021 case BFD_RELOC_ARM_T32_ADD_PC12:
19022 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19023 return (base + 4) & ~3;
19025 /* Thumb branches are simply offset by +4. */
19026 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19027 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19028 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19029 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19030 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19031 return base + 4;
19033 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19034 if (fixP->fx_addsy
19035 && ARM_IS_FUNC (fixP->fx_addsy)
19036 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19037 base = fixP->fx_where + fixP->fx_frag->fr_address;
19038 return base + 4;
19040 /* BLX is like branches above, but forces the low two bits of PC to
19041 zero. */
19042 case BFD_RELOC_THUMB_PCREL_BLX:
19043 if (fixP->fx_addsy
19044 && THUMB_IS_FUNC (fixP->fx_addsy)
19045 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19046 base = fixP->fx_where + fixP->fx_frag->fr_address;
19047 return (base + 4) & ~3;
19049 /* ARM mode branches are offset by +8. However, the Windows CE
19050 loader expects the relocation not to take this into account. */
19051 case BFD_RELOC_ARM_PCREL_BLX:
19052 if (fixP->fx_addsy
19053 && ARM_IS_FUNC (fixP->fx_addsy)
19054 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19055 base = fixP->fx_where + fixP->fx_frag->fr_address;
19056 return base + 8;
19058 case BFD_RELOC_ARM_PCREL_CALL:
19059 if (fixP->fx_addsy
19060 && THUMB_IS_FUNC (fixP->fx_addsy)
19061 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19062 base = fixP->fx_where + fixP->fx_frag->fr_address;
19063 return base + 8;
19065 case BFD_RELOC_ARM_PCREL_BRANCH:
19066 case BFD_RELOC_ARM_PCREL_JUMP:
19067 case BFD_RELOC_ARM_PLT32:
19068 #ifdef TE_WINCE
19069 /* When handling fixups immediately, because we have already
19070 discovered the value of a symbol, or the address of the frag involved
19071 we must account for the offset by +8, as the OS loader will never see the reloc.
19072 see fixup_segment() in write.c
19073 The S_IS_EXTERNAL test handles the case of global symbols.
19074 Those need the calculated base, not just the pipe compensation the linker will need. */
19075 if (fixP->fx_pcrel
19076 && fixP->fx_addsy != NULL
19077 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19078 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
19079 return base + 8;
19080 return base;
19081 #else
19082 return base + 8;
19083 #endif
19086 /* ARM mode loads relative to PC are also offset by +8. Unlike
19087 branches, the Windows CE loader *does* expect the relocation
19088 to take this into account. */
19089 case BFD_RELOC_ARM_OFFSET_IMM:
19090 case BFD_RELOC_ARM_OFFSET_IMM8:
19091 case BFD_RELOC_ARM_HWLITERAL:
19092 case BFD_RELOC_ARM_LITERAL:
19093 case BFD_RELOC_ARM_CP_OFF_IMM:
19094 return base + 8;
19097 /* Other PC-relative relocations are un-offset. */
19098 default:
19099 return base;
19103 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19104 Otherwise we have no need to default values of symbols. */
19106 symbolS *
19107 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
19109 #ifdef OBJ_ELF
19110 if (name[0] == '_' && name[1] == 'G'
19111 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
19113 if (!GOT_symbol)
19115 if (symbol_find (name))
19116 as_bad (_("GOT already in the symbol table"));
19118 GOT_symbol = symbol_new (name, undefined_section,
19119 (valueT) 0, & zero_address_frag);
19122 return GOT_symbol;
19124 #endif
19126 return NULL;
19129 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19130 computed as two separate immediate values, added together. We
19131 already know that this value cannot be computed by just one ARM
19132 instruction. */
19134 static unsigned int
19135 validate_immediate_twopart (unsigned int val,
19136 unsigned int * highpart)
19138 unsigned int a;
19139 unsigned int i;
19141 for (i = 0; i < 32; i += 2)
19142 if (((a = rotate_left (val, i)) & 0xff) != 0)
19144 if (a & 0xff00)
19146 if (a & ~ 0xffff)
19147 continue;
19148 * highpart = (a >> 8) | ((i + 24) << 7);
19150 else if (a & 0xff0000)
19152 if (a & 0xff000000)
19153 continue;
19154 * highpart = (a >> 16) | ((i + 16) << 7);
19156 else
19158 gas_assert (a & 0xff000000);
19159 * highpart = (a >> 24) | ((i + 8) << 7);
19162 return (a & 0xff) | (i << 7);
19165 return FAIL;
19168 static int
19169 validate_offset_imm (unsigned int val, int hwse)
19171 if ((hwse && val > 255) || val > 4095)
19172 return FAIL;
19173 return val;
19176 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19177 negative immediate constant by altering the instruction. A bit of
19178 a hack really.
19179 MOV <-> MVN
19180 AND <-> BIC
19181 ADC <-> SBC
19182 by inverting the second operand, and
19183 ADD <-> SUB
19184 CMP <-> CMN
19185 by negating the second operand. */
19187 static int
19188 negate_data_op (unsigned long * instruction,
19189 unsigned long value)
19191 int op, new_inst;
19192 unsigned long negated, inverted;
19194 negated = encode_arm_immediate (-value);
19195 inverted = encode_arm_immediate (~value);
19197 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
19198 switch (op)
19200 /* First negates. */
19201 case OPCODE_SUB: /* ADD <-> SUB */
19202 new_inst = OPCODE_ADD;
19203 value = negated;
19204 break;
19206 case OPCODE_ADD:
19207 new_inst = OPCODE_SUB;
19208 value = negated;
19209 break;
19211 case OPCODE_CMP: /* CMP <-> CMN */
19212 new_inst = OPCODE_CMN;
19213 value = negated;
19214 break;
19216 case OPCODE_CMN:
19217 new_inst = OPCODE_CMP;
19218 value = negated;
19219 break;
19221 /* Now Inverted ops. */
19222 case OPCODE_MOV: /* MOV <-> MVN */
19223 new_inst = OPCODE_MVN;
19224 value = inverted;
19225 break;
19227 case OPCODE_MVN:
19228 new_inst = OPCODE_MOV;
19229 value = inverted;
19230 break;
19232 case OPCODE_AND: /* AND <-> BIC */
19233 new_inst = OPCODE_BIC;
19234 value = inverted;
19235 break;
19237 case OPCODE_BIC:
19238 new_inst = OPCODE_AND;
19239 value = inverted;
19240 break;
19242 case OPCODE_ADC: /* ADC <-> SBC */
19243 new_inst = OPCODE_SBC;
19244 value = inverted;
19245 break;
19247 case OPCODE_SBC:
19248 new_inst = OPCODE_ADC;
19249 value = inverted;
19250 break;
19252 /* We cannot do anything. */
19253 default:
19254 return FAIL;
19257 if (value == (unsigned) FAIL)
19258 return FAIL;
19260 *instruction &= OPCODE_MASK;
19261 *instruction |= new_inst << DATA_OP_SHIFT;
19262 return value;
19265 /* Like negate_data_op, but for Thumb-2. */
19267 static unsigned int
19268 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
19270 int op, new_inst;
19271 int rd;
19272 unsigned int negated, inverted;
19274 negated = encode_thumb32_immediate (-value);
19275 inverted = encode_thumb32_immediate (~value);
19277 rd = (*instruction >> 8) & 0xf;
19278 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
19279 switch (op)
19281 /* ADD <-> SUB. Includes CMP <-> CMN. */
19282 case T2_OPCODE_SUB:
19283 new_inst = T2_OPCODE_ADD;
19284 value = negated;
19285 break;
19287 case T2_OPCODE_ADD:
19288 new_inst = T2_OPCODE_SUB;
19289 value = negated;
19290 break;
19292 /* ORR <-> ORN. Includes MOV <-> MVN. */
19293 case T2_OPCODE_ORR:
19294 new_inst = T2_OPCODE_ORN;
19295 value = inverted;
19296 break;
19298 case T2_OPCODE_ORN:
19299 new_inst = T2_OPCODE_ORR;
19300 value = inverted;
19301 break;
19303 /* AND <-> BIC. TST has no inverted equivalent. */
19304 case T2_OPCODE_AND:
19305 new_inst = T2_OPCODE_BIC;
19306 if (rd == 15)
19307 value = FAIL;
19308 else
19309 value = inverted;
19310 break;
19312 case T2_OPCODE_BIC:
19313 new_inst = T2_OPCODE_AND;
19314 value = inverted;
19315 break;
19317 /* ADC <-> SBC */
19318 case T2_OPCODE_ADC:
19319 new_inst = T2_OPCODE_SBC;
19320 value = inverted;
19321 break;
19323 case T2_OPCODE_SBC:
19324 new_inst = T2_OPCODE_ADC;
19325 value = inverted;
19326 break;
19328 /* We cannot do anything. */
19329 default:
19330 return FAIL;
19333 if (value == (unsigned int)FAIL)
19334 return FAIL;
19336 *instruction &= T2_OPCODE_MASK;
19337 *instruction |= new_inst << T2_DATA_OP_SHIFT;
19338 return value;
19341 /* Read a 32-bit thumb instruction from buf. */
19342 static unsigned long
19343 get_thumb32_insn (char * buf)
19345 unsigned long insn;
19346 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
19347 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19349 return insn;
19353 /* We usually want to set the low bit on the address of thumb function
19354 symbols. In particular .word foo - . should have the low bit set.
19355 Generic code tries to fold the difference of two symbols to
19356 a constant. Prevent this and force a relocation when the first symbols
19357 is a thumb function. */
19359 bfd_boolean
19360 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
19362 if (op == O_subtract
19363 && l->X_op == O_symbol
19364 && r->X_op == O_symbol
19365 && THUMB_IS_FUNC (l->X_add_symbol))
19367 l->X_op = O_subtract;
19368 l->X_op_symbol = r->X_add_symbol;
19369 l->X_add_number -= r->X_add_number;
19370 return TRUE;
19373 /* Process as normal. */
19374 return FALSE;
19377 void
19378 md_apply_fix (fixS * fixP,
19379 valueT * valP,
19380 segT seg)
19382 offsetT value = * valP;
19383 offsetT newval;
19384 unsigned int newimm;
19385 unsigned long temp;
19386 int sign;
19387 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
19389 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
19391 /* Note whether this will delete the relocation. */
19393 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
19394 fixP->fx_done = 1;
19396 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19397 consistency with the behaviour on 32-bit hosts. Remember value
19398 for emit_reloc. */
19399 value &= 0xffffffff;
19400 value ^= 0x80000000;
19401 value -= 0x80000000;
19403 *valP = value;
19404 fixP->fx_addnumber = value;
19406 /* Same treatment for fixP->fx_offset. */
19407 fixP->fx_offset &= 0xffffffff;
19408 fixP->fx_offset ^= 0x80000000;
19409 fixP->fx_offset -= 0x80000000;
19411 switch (fixP->fx_r_type)
19413 case BFD_RELOC_NONE:
19414 /* This will need to go in the object file. */
19415 fixP->fx_done = 0;
19416 break;
19418 case BFD_RELOC_ARM_IMMEDIATE:
19419 /* We claim that this fixup has been processed here,
19420 even if in fact we generate an error because we do
19421 not have a reloc for it, so tc_gen_reloc will reject it. */
19422 fixP->fx_done = 1;
19424 if (fixP->fx_addsy
19425 && ! S_IS_DEFINED (fixP->fx_addsy))
19427 as_bad_where (fixP->fx_file, fixP->fx_line,
19428 _("undefined symbol %s used as an immediate value"),
19429 S_GET_NAME (fixP->fx_addsy));
19430 break;
19433 if (fixP->fx_addsy
19434 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19436 as_bad_where (fixP->fx_file, fixP->fx_line,
19437 _("symbol %s is in a different section"),
19438 S_GET_NAME (fixP->fx_addsy));
19439 break;
19442 newimm = encode_arm_immediate (value);
19443 temp = md_chars_to_number (buf, INSN_SIZE);
19445 /* If the instruction will fail, see if we can fix things up by
19446 changing the opcode. */
19447 if (newimm == (unsigned int) FAIL
19448 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
19450 as_bad_where (fixP->fx_file, fixP->fx_line,
19451 _("invalid constant (%lx) after fixup"),
19452 (unsigned long) value);
19453 break;
19456 newimm |= (temp & 0xfffff000);
19457 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19458 break;
19460 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19462 unsigned int highpart = 0;
19463 unsigned int newinsn = 0xe1a00000; /* nop. */
19465 if (fixP->fx_addsy
19466 && ! S_IS_DEFINED (fixP->fx_addsy))
19468 as_bad_where (fixP->fx_file, fixP->fx_line,
19469 _("undefined symbol %s used as an immediate value"),
19470 S_GET_NAME (fixP->fx_addsy));
19471 break;
19474 if (fixP->fx_addsy
19475 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19477 as_bad_where (fixP->fx_file, fixP->fx_line,
19478 _("symbol %s is in a different section"),
19479 S_GET_NAME (fixP->fx_addsy));
19480 break;
19483 newimm = encode_arm_immediate (value);
19484 temp = md_chars_to_number (buf, INSN_SIZE);
19486 /* If the instruction will fail, see if we can fix things up by
19487 changing the opcode. */
19488 if (newimm == (unsigned int) FAIL
19489 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
19491 /* No ? OK - try using two ADD instructions to generate
19492 the value. */
19493 newimm = validate_immediate_twopart (value, & highpart);
19495 /* Yes - then make sure that the second instruction is
19496 also an add. */
19497 if (newimm != (unsigned int) FAIL)
19498 newinsn = temp;
19499 /* Still No ? Try using a negated value. */
19500 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
19501 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
19502 /* Otherwise - give up. */
19503 else
19505 as_bad_where (fixP->fx_file, fixP->fx_line,
19506 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19507 (long) value);
19508 break;
19511 /* Replace the first operand in the 2nd instruction (which
19512 is the PC) with the destination register. We have
19513 already added in the PC in the first instruction and we
19514 do not want to do it again. */
19515 newinsn &= ~ 0xf0000;
19516 newinsn |= ((newinsn & 0x0f000) << 4);
19519 newimm |= (temp & 0xfffff000);
19520 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19522 highpart |= (newinsn & 0xfffff000);
19523 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
19525 break;
19527 case BFD_RELOC_ARM_OFFSET_IMM:
19528 if (!fixP->fx_done && seg->use_rela_p)
19529 value = 0;
19531 case BFD_RELOC_ARM_LITERAL:
19532 sign = value >= 0;
19534 if (value < 0)
19535 value = - value;
19537 if (validate_offset_imm (value, 0) == FAIL)
19539 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
19540 as_bad_where (fixP->fx_file, fixP->fx_line,
19541 _("invalid literal constant: pool needs to be closer"));
19542 else
19543 as_bad_where (fixP->fx_file, fixP->fx_line,
19544 _("bad immediate value for offset (%ld)"),
19545 (long) value);
19546 break;
19549 newval = md_chars_to_number (buf, INSN_SIZE);
19550 newval &= 0xff7ff000;
19551 newval |= value | (sign ? INDEX_UP : 0);
19552 md_number_to_chars (buf, newval, INSN_SIZE);
19553 break;
19555 case BFD_RELOC_ARM_OFFSET_IMM8:
19556 case BFD_RELOC_ARM_HWLITERAL:
19557 sign = value >= 0;
19559 if (value < 0)
19560 value = - value;
19562 if (validate_offset_imm (value, 1) == FAIL)
19564 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
19565 as_bad_where (fixP->fx_file, fixP->fx_line,
19566 _("invalid literal constant: pool needs to be closer"));
19567 else
19568 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19569 (long) value);
19570 break;
19573 newval = md_chars_to_number (buf, INSN_SIZE);
19574 newval &= 0xff7ff0f0;
19575 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
19576 md_number_to_chars (buf, newval, INSN_SIZE);
19577 break;
19579 case BFD_RELOC_ARM_T32_OFFSET_U8:
19580 if (value < 0 || value > 1020 || value % 4 != 0)
19581 as_bad_where (fixP->fx_file, fixP->fx_line,
19582 _("bad immediate value for offset (%ld)"), (long) value);
19583 value /= 4;
19585 newval = md_chars_to_number (buf+2, THUMB_SIZE);
19586 newval |= value;
19587 md_number_to_chars (buf+2, newval, THUMB_SIZE);
19588 break;
19590 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19591 /* This is a complicated relocation used for all varieties of Thumb32
19592 load/store instruction with immediate offset:
19594 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19595 *4, optional writeback(W)
19596 (doubleword load/store)
19598 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19599 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19600 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19601 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19602 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19604 Uppercase letters indicate bits that are already encoded at
19605 this point. Lowercase letters are our problem. For the
19606 second block of instructions, the secondary opcode nybble
19607 (bits 8..11) is present, and bit 23 is zero, even if this is
19608 a PC-relative operation. */
19609 newval = md_chars_to_number (buf, THUMB_SIZE);
19610 newval <<= 16;
19611 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
19613 if ((newval & 0xf0000000) == 0xe0000000)
19615 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19616 if (value >= 0)
19617 newval |= (1 << 23);
19618 else
19619 value = -value;
19620 if (value % 4 != 0)
19622 as_bad_where (fixP->fx_file, fixP->fx_line,
19623 _("offset not a multiple of 4"));
19624 break;
19626 value /= 4;
19627 if (value > 0xff)
19629 as_bad_where (fixP->fx_file, fixP->fx_line,
19630 _("offset out of range"));
19631 break;
19633 newval &= ~0xff;
19635 else if ((newval & 0x000f0000) == 0x000f0000)
19637 /* PC-relative, 12-bit offset. */
19638 if (value >= 0)
19639 newval |= (1 << 23);
19640 else
19641 value = -value;
19642 if (value > 0xfff)
19644 as_bad_where (fixP->fx_file, fixP->fx_line,
19645 _("offset out of range"));
19646 break;
19648 newval &= ~0xfff;
19650 else if ((newval & 0x00000100) == 0x00000100)
19652 /* Writeback: 8-bit, +/- offset. */
19653 if (value >= 0)
19654 newval |= (1 << 9);
19655 else
19656 value = -value;
19657 if (value > 0xff)
19659 as_bad_where (fixP->fx_file, fixP->fx_line,
19660 _("offset out of range"));
19661 break;
19663 newval &= ~0xff;
19665 else if ((newval & 0x00000f00) == 0x00000e00)
19667 /* T-instruction: positive 8-bit offset. */
19668 if (value < 0 || value > 0xff)
19670 as_bad_where (fixP->fx_file, fixP->fx_line,
19671 _("offset out of range"));
19672 break;
19674 newval &= ~0xff;
19675 newval |= value;
19677 else
19679 /* Positive 12-bit or negative 8-bit offset. */
19680 int limit;
19681 if (value >= 0)
19683 newval |= (1 << 23);
19684 limit = 0xfff;
19686 else
19688 value = -value;
19689 limit = 0xff;
19691 if (value > limit)
19693 as_bad_where (fixP->fx_file, fixP->fx_line,
19694 _("offset out of range"));
19695 break;
19697 newval &= ~limit;
19700 newval |= value;
19701 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
19702 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
19703 break;
19705 case BFD_RELOC_ARM_SHIFT_IMM:
19706 newval = md_chars_to_number (buf, INSN_SIZE);
19707 if (((unsigned long) value) > 32
19708 || (value == 32
19709 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
19711 as_bad_where (fixP->fx_file, fixP->fx_line,
19712 _("shift expression is too large"));
19713 break;
19716 if (value == 0)
19717 /* Shifts of zero must be done as lsl. */
19718 newval &= ~0x60;
19719 else if (value == 32)
19720 value = 0;
19721 newval &= 0xfffff07f;
19722 newval |= (value & 0x1f) << 7;
19723 md_number_to_chars (buf, newval, INSN_SIZE);
19724 break;
19726 case BFD_RELOC_ARM_T32_IMMEDIATE:
19727 case BFD_RELOC_ARM_T32_ADD_IMM:
19728 case BFD_RELOC_ARM_T32_IMM12:
19729 case BFD_RELOC_ARM_T32_ADD_PC12:
19730 /* We claim that this fixup has been processed here,
19731 even if in fact we generate an error because we do
19732 not have a reloc for it, so tc_gen_reloc will reject it. */
19733 fixP->fx_done = 1;
19735 if (fixP->fx_addsy
19736 && ! S_IS_DEFINED (fixP->fx_addsy))
19738 as_bad_where (fixP->fx_file, fixP->fx_line,
19739 _("undefined symbol %s used as an immediate value"),
19740 S_GET_NAME (fixP->fx_addsy));
19741 break;
19744 newval = md_chars_to_number (buf, THUMB_SIZE);
19745 newval <<= 16;
19746 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
19748 newimm = FAIL;
19749 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19750 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19752 newimm = encode_thumb32_immediate (value);
19753 if (newimm == (unsigned int) FAIL)
19754 newimm = thumb32_negate_data_op (&newval, value);
19756 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
19757 && newimm == (unsigned int) FAIL)
19759 /* Turn add/sum into addw/subw. */
19760 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19761 newval = (newval & 0xfeffffff) | 0x02000000;
19763 /* 12 bit immediate for addw/subw. */
19764 if (value < 0)
19766 value = -value;
19767 newval ^= 0x00a00000;
19769 if (value > 0xfff)
19770 newimm = (unsigned int) FAIL;
19771 else
19772 newimm = value;
19775 if (newimm == (unsigned int)FAIL)
19777 as_bad_where (fixP->fx_file, fixP->fx_line,
19778 _("invalid constant (%lx) after fixup"),
19779 (unsigned long) value);
19780 break;
19783 newval |= (newimm & 0x800) << 15;
19784 newval |= (newimm & 0x700) << 4;
19785 newval |= (newimm & 0x0ff);
19787 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
19788 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
19789 break;
19791 case BFD_RELOC_ARM_SMC:
19792 if (((unsigned long) value) > 0xffff)
19793 as_bad_where (fixP->fx_file, fixP->fx_line,
19794 _("invalid smc expression"));
19795 newval = md_chars_to_number (buf, INSN_SIZE);
19796 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
19797 md_number_to_chars (buf, newval, INSN_SIZE);
19798 break;
19800 case BFD_RELOC_ARM_SWI:
19801 if (fixP->tc_fix_data != 0)
19803 if (((unsigned long) value) > 0xff)
19804 as_bad_where (fixP->fx_file, fixP->fx_line,
19805 _("invalid swi expression"));
19806 newval = md_chars_to_number (buf, THUMB_SIZE);
19807 newval |= value;
19808 md_number_to_chars (buf, newval, THUMB_SIZE);
19810 else
19812 if (((unsigned long) value) > 0x00ffffff)
19813 as_bad_where (fixP->fx_file, fixP->fx_line,
19814 _("invalid swi expression"));
19815 newval = md_chars_to_number (buf, INSN_SIZE);
19816 newval |= value;
19817 md_number_to_chars (buf, newval, INSN_SIZE);
19819 break;
19821 case BFD_RELOC_ARM_MULTI:
19822 if (((unsigned long) value) > 0xffff)
19823 as_bad_where (fixP->fx_file, fixP->fx_line,
19824 _("invalid expression in load/store multiple"));
19825 newval = value | md_chars_to_number (buf, INSN_SIZE);
19826 md_number_to_chars (buf, newval, INSN_SIZE);
19827 break;
19829 #ifdef OBJ_ELF
19830 case BFD_RELOC_ARM_PCREL_CALL:
19832 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
19833 && fixP->fx_addsy
19834 && !S_IS_EXTERNAL (fixP->fx_addsy)
19835 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19836 && THUMB_IS_FUNC (fixP->fx_addsy))
19837 /* Flip the bl to blx. This is a simple flip
19838 bit here because we generate PCREL_CALL for
19839 unconditional bls. */
19841 newval = md_chars_to_number (buf, INSN_SIZE);
19842 newval = newval | 0x10000000;
19843 md_number_to_chars (buf, newval, INSN_SIZE);
19844 temp = 1;
19845 fixP->fx_done = 1;
19847 else
19848 temp = 3;
19849 goto arm_branch_common;
19851 case BFD_RELOC_ARM_PCREL_JUMP:
19852 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
19853 && fixP->fx_addsy
19854 && !S_IS_EXTERNAL (fixP->fx_addsy)
19855 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19856 && THUMB_IS_FUNC (fixP->fx_addsy))
19858 /* This would map to a bl<cond>, b<cond>,
19859 b<always> to a Thumb function. We
19860 need to force a relocation for this particular
19861 case. */
19862 newval = md_chars_to_number (buf, INSN_SIZE);
19863 fixP->fx_done = 0;
19866 case BFD_RELOC_ARM_PLT32:
19867 #endif
19868 case BFD_RELOC_ARM_PCREL_BRANCH:
19869 temp = 3;
19870 goto arm_branch_common;
19872 case BFD_RELOC_ARM_PCREL_BLX:
19874 temp = 1;
19875 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
19876 && fixP->fx_addsy
19877 && !S_IS_EXTERNAL (fixP->fx_addsy)
19878 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19879 && ARM_IS_FUNC (fixP->fx_addsy))
19881 /* Flip the blx to a bl and warn. */
19882 const char *name = S_GET_NAME (fixP->fx_addsy);
19883 newval = 0xeb000000;
19884 as_warn_where (fixP->fx_file, fixP->fx_line,
19885 _("blx to '%s' an ARM ISA state function changed to bl"),
19886 name);
19887 md_number_to_chars (buf, newval, INSN_SIZE);
19888 temp = 3;
19889 fixP->fx_done = 1;
19892 #ifdef OBJ_ELF
19893 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
19894 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
19895 #endif
19897 arm_branch_common:
19898 /* We are going to store value (shifted right by two) in the
19899 instruction, in a 24 bit, signed field. Bits 26 through 32 either
19900 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
19901 also be be clear. */
19902 if (value & temp)
19903 as_bad_where (fixP->fx_file, fixP->fx_line,
19904 _("misaligned branch destination"));
19905 if ((value & (offsetT)0xfe000000) != (offsetT)0
19906 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
19907 as_bad_where (fixP->fx_file, fixP->fx_line,
19908 _("branch out of range"));
19910 if (fixP->fx_done || !seg->use_rela_p)
19912 newval = md_chars_to_number (buf, INSN_SIZE);
19913 newval |= (value >> 2) & 0x00ffffff;
19914 /* Set the H bit on BLX instructions. */
19915 if (temp == 1)
19917 if (value & 2)
19918 newval |= 0x01000000;
19919 else
19920 newval &= ~0x01000000;
19922 md_number_to_chars (buf, newval, INSN_SIZE);
19924 break;
19926 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
19927 /* CBZ can only branch forward. */
19929 /* Attempts to use CBZ to branch to the next instruction
19930 (which, strictly speaking, are prohibited) will be turned into
19931 no-ops.
19933 FIXME: It may be better to remove the instruction completely and
19934 perform relaxation. */
19935 if (value == -2)
19937 newval = md_chars_to_number (buf, THUMB_SIZE);
19938 newval = 0xbf00; /* NOP encoding T1 */
19939 md_number_to_chars (buf, newval, THUMB_SIZE);
19941 else
19943 if (value & ~0x7e)
19944 as_bad_where (fixP->fx_file, fixP->fx_line,
19945 _("branch out of range"));
19947 if (fixP->fx_done || !seg->use_rela_p)
19949 newval = md_chars_to_number (buf, THUMB_SIZE);
19950 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
19951 md_number_to_chars (buf, newval, THUMB_SIZE);
19954 break;
19956 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
19957 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
19958 as_bad_where (fixP->fx_file, fixP->fx_line,
19959 _("branch out of range"));
19961 if (fixP->fx_done || !seg->use_rela_p)
19963 newval = md_chars_to_number (buf, THUMB_SIZE);
19964 newval |= (value & 0x1ff) >> 1;
19965 md_number_to_chars (buf, newval, THUMB_SIZE);
19967 break;
19969 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
19970 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
19971 as_bad_where (fixP->fx_file, fixP->fx_line,
19972 _("branch out of range"));
19974 if (fixP->fx_done || !seg->use_rela_p)
19976 newval = md_chars_to_number (buf, THUMB_SIZE);
19977 newval |= (value & 0xfff) >> 1;
19978 md_number_to_chars (buf, newval, THUMB_SIZE);
19980 break;
19982 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19983 if (fixP->fx_addsy
19984 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19985 && !S_IS_EXTERNAL (fixP->fx_addsy)
19986 && S_IS_DEFINED (fixP->fx_addsy)
19987 && ARM_IS_FUNC (fixP->fx_addsy)
19988 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19990 /* Force a relocation for a branch 20 bits wide. */
19991 fixP->fx_done = 0;
19993 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
19994 as_bad_where (fixP->fx_file, fixP->fx_line,
19995 _("conditional branch out of range"));
19997 if (fixP->fx_done || !seg->use_rela_p)
19999 offsetT newval2;
20000 addressT S, J1, J2, lo, hi;
20002 S = (value & 0x00100000) >> 20;
20003 J2 = (value & 0x00080000) >> 19;
20004 J1 = (value & 0x00040000) >> 18;
20005 hi = (value & 0x0003f000) >> 12;
20006 lo = (value & 0x00000ffe) >> 1;
20008 newval = md_chars_to_number (buf, THUMB_SIZE);
20009 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20010 newval |= (S << 10) | hi;
20011 newval2 |= (J1 << 13) | (J2 << 11) | lo;
20012 md_number_to_chars (buf, newval, THUMB_SIZE);
20013 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20015 break;
20017 case BFD_RELOC_THUMB_PCREL_BLX:
20019 /* If there is a blx from a thumb state function to
20020 another thumb function flip this to a bl and warn
20021 about it. */
20023 if (fixP->fx_addsy
20024 && S_IS_DEFINED (fixP->fx_addsy)
20025 && !S_IS_EXTERNAL (fixP->fx_addsy)
20026 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20027 && THUMB_IS_FUNC (fixP->fx_addsy))
20029 const char *name = S_GET_NAME (fixP->fx_addsy);
20030 as_warn_where (fixP->fx_file, fixP->fx_line,
20031 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20032 name);
20033 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20034 newval = newval | 0x1000;
20035 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20036 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20037 fixP->fx_done = 1;
20041 goto thumb_bl_common;
20043 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20045 /* A bl from Thumb state ISA to an internal ARM state function
20046 is converted to a blx. */
20047 if (fixP->fx_addsy
20048 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20049 && !S_IS_EXTERNAL (fixP->fx_addsy)
20050 && S_IS_DEFINED (fixP->fx_addsy)
20051 && ARM_IS_FUNC (fixP->fx_addsy)
20052 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20054 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20055 newval = newval & ~0x1000;
20056 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20057 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
20058 fixP->fx_done = 1;
20061 thumb_bl_common:
20063 #ifdef OBJ_ELF
20064 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
20065 fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20066 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20067 #endif
20069 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
20070 as_bad_where (fixP->fx_file, fixP->fx_line,
20071 _("branch out of range"));
20073 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20074 /* For a BLX instruction, make sure that the relocation is rounded up
20075 to a word boundary. This follows the semantics of the instruction
20076 which specifies that bit 1 of the target address will come from bit
20077 1 of the base address. */
20078 value = (value + 1) & ~ 1;
20080 if (fixP->fx_done || !seg->use_rela_p)
20082 offsetT newval2;
20084 newval = md_chars_to_number (buf, THUMB_SIZE);
20085 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20086 newval |= (value & 0x7fffff) >> 12;
20087 newval2 |= (value & 0xfff) >> 1;
20088 md_number_to_chars (buf, newval, THUMB_SIZE);
20089 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20091 break;
20093 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20094 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
20095 as_bad_where (fixP->fx_file, fixP->fx_line,
20096 _("branch out of range"));
20098 if (fixP->fx_done || !seg->use_rela_p)
20100 offsetT newval2;
20101 addressT S, I1, I2, lo, hi;
20103 S = (value & 0x01000000) >> 24;
20104 I1 = (value & 0x00800000) >> 23;
20105 I2 = (value & 0x00400000) >> 22;
20106 hi = (value & 0x003ff000) >> 12;
20107 lo = (value & 0x00000ffe) >> 1;
20109 I1 = !(I1 ^ S);
20110 I2 = !(I2 ^ S);
20112 newval = md_chars_to_number (buf, THUMB_SIZE);
20113 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20114 newval |= (S << 10) | hi;
20115 newval2 |= (I1 << 13) | (I2 << 11) | lo;
20116 md_number_to_chars (buf, newval, THUMB_SIZE);
20117 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20119 break;
20121 case BFD_RELOC_8:
20122 if (fixP->fx_done || !seg->use_rela_p)
20123 md_number_to_chars (buf, value, 1);
20124 break;
20126 case BFD_RELOC_16:
20127 if (fixP->fx_done || !seg->use_rela_p)
20128 md_number_to_chars (buf, value, 2);
20129 break;
20131 #ifdef OBJ_ELF
20132 case BFD_RELOC_ARM_TLS_GD32:
20133 case BFD_RELOC_ARM_TLS_LE32:
20134 case BFD_RELOC_ARM_TLS_IE32:
20135 case BFD_RELOC_ARM_TLS_LDM32:
20136 case BFD_RELOC_ARM_TLS_LDO32:
20137 S_SET_THREAD_LOCAL (fixP->fx_addsy);
20138 /* fall through */
20140 case BFD_RELOC_ARM_GOT32:
20141 case BFD_RELOC_ARM_GOTOFF:
20142 if (fixP->fx_done || !seg->use_rela_p)
20143 md_number_to_chars (buf, 0, 4);
20144 break;
20146 case BFD_RELOC_ARM_TARGET2:
20147 /* TARGET2 is not partial-inplace, so we need to write the
20148 addend here for REL targets, because it won't be written out
20149 during reloc processing later. */
20150 if (fixP->fx_done || !seg->use_rela_p)
20151 md_number_to_chars (buf, fixP->fx_offset, 4);
20152 break;
20153 #endif
20155 case BFD_RELOC_RVA:
20156 case BFD_RELOC_32:
20157 case BFD_RELOC_ARM_TARGET1:
20158 case BFD_RELOC_ARM_ROSEGREL32:
20159 case BFD_RELOC_ARM_SBREL32:
20160 case BFD_RELOC_32_PCREL:
20161 #ifdef TE_PE
20162 case BFD_RELOC_32_SECREL:
20163 #endif
20164 if (fixP->fx_done || !seg->use_rela_p)
20165 #ifdef TE_WINCE
20166 /* For WinCE we only do this for pcrel fixups. */
20167 if (fixP->fx_done || fixP->fx_pcrel)
20168 #endif
20169 md_number_to_chars (buf, value, 4);
20170 break;
20172 #ifdef OBJ_ELF
20173 case BFD_RELOC_ARM_PREL31:
20174 if (fixP->fx_done || !seg->use_rela_p)
20176 newval = md_chars_to_number (buf, 4) & 0x80000000;
20177 if ((value ^ (value >> 1)) & 0x40000000)
20179 as_bad_where (fixP->fx_file, fixP->fx_line,
20180 _("rel31 relocation overflow"));
20182 newval |= value & 0x7fffffff;
20183 md_number_to_chars (buf, newval, 4);
20185 break;
20186 #endif
20188 case BFD_RELOC_ARM_CP_OFF_IMM:
20189 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20190 if (value < -1023 || value > 1023 || (value & 3))
20191 as_bad_where (fixP->fx_file, fixP->fx_line,
20192 _("co-processor offset out of range"));
20193 cp_off_common:
20194 sign = value >= 0;
20195 if (value < 0)
20196 value = -value;
20197 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20198 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20199 newval = md_chars_to_number (buf, INSN_SIZE);
20200 else
20201 newval = get_thumb32_insn (buf);
20202 newval &= 0xff7fff00;
20203 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
20204 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20205 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20206 md_number_to_chars (buf, newval, INSN_SIZE);
20207 else
20208 put_thumb32_insn (buf, newval);
20209 break;
20211 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
20212 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
20213 if (value < -255 || value > 255)
20214 as_bad_where (fixP->fx_file, fixP->fx_line,
20215 _("co-processor offset out of range"));
20216 value *= 4;
20217 goto cp_off_common;
20219 case BFD_RELOC_ARM_THUMB_OFFSET:
20220 newval = md_chars_to_number (buf, THUMB_SIZE);
20221 /* Exactly what ranges, and where the offset is inserted depends
20222 on the type of instruction, we can establish this from the
20223 top 4 bits. */
20224 switch (newval >> 12)
20226 case 4: /* PC load. */
20227 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20228 forced to zero for these loads; md_pcrel_from has already
20229 compensated for this. */
20230 if (value & 3)
20231 as_bad_where (fixP->fx_file, fixP->fx_line,
20232 _("invalid offset, target not word aligned (0x%08lX)"),
20233 (((unsigned long) fixP->fx_frag->fr_address
20234 + (unsigned long) fixP->fx_where) & ~3)
20235 + (unsigned long) value);
20237 if (value & ~0x3fc)
20238 as_bad_where (fixP->fx_file, fixP->fx_line,
20239 _("invalid offset, value too big (0x%08lX)"),
20240 (long) value);
20242 newval |= value >> 2;
20243 break;
20245 case 9: /* SP load/store. */
20246 if (value & ~0x3fc)
20247 as_bad_where (fixP->fx_file, fixP->fx_line,
20248 _("invalid offset, value too big (0x%08lX)"),
20249 (long) value);
20250 newval |= value >> 2;
20251 break;
20253 case 6: /* Word load/store. */
20254 if (value & ~0x7c)
20255 as_bad_where (fixP->fx_file, fixP->fx_line,
20256 _("invalid offset, value too big (0x%08lX)"),
20257 (long) value);
20258 newval |= value << 4; /* 6 - 2. */
20259 break;
20261 case 7: /* Byte load/store. */
20262 if (value & ~0x1f)
20263 as_bad_where (fixP->fx_file, fixP->fx_line,
20264 _("invalid offset, value too big (0x%08lX)"),
20265 (long) value);
20266 newval |= value << 6;
20267 break;
20269 case 8: /* Halfword load/store. */
20270 if (value & ~0x3e)
20271 as_bad_where (fixP->fx_file, fixP->fx_line,
20272 _("invalid offset, value too big (0x%08lX)"),
20273 (long) value);
20274 newval |= value << 5; /* 6 - 1. */
20275 break;
20277 default:
20278 as_bad_where (fixP->fx_file, fixP->fx_line,
20279 "Unable to process relocation for thumb opcode: %lx",
20280 (unsigned long) newval);
20281 break;
20283 md_number_to_chars (buf, newval, THUMB_SIZE);
20284 break;
20286 case BFD_RELOC_ARM_THUMB_ADD:
20287 /* This is a complicated relocation, since we use it for all of
20288 the following immediate relocations:
20290 3bit ADD/SUB
20291 8bit ADD/SUB
20292 9bit ADD/SUB SP word-aligned
20293 10bit ADD PC/SP word-aligned
20295 The type of instruction being processed is encoded in the
20296 instruction field:
20298 0x8000 SUB
20299 0x00F0 Rd
20300 0x000F Rs
20302 newval = md_chars_to_number (buf, THUMB_SIZE);
20304 int rd = (newval >> 4) & 0xf;
20305 int rs = newval & 0xf;
20306 int subtract = !!(newval & 0x8000);
20308 /* Check for HI regs, only very restricted cases allowed:
20309 Adjusting SP, and using PC or SP to get an address. */
20310 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
20311 || (rs > 7 && rs != REG_SP && rs != REG_PC))
20312 as_bad_where (fixP->fx_file, fixP->fx_line,
20313 _("invalid Hi register with immediate"));
20315 /* If value is negative, choose the opposite instruction. */
20316 if (value < 0)
20318 value = -value;
20319 subtract = !subtract;
20320 if (value < 0)
20321 as_bad_where (fixP->fx_file, fixP->fx_line,
20322 _("immediate value out of range"));
20325 if (rd == REG_SP)
20327 if (value & ~0x1fc)
20328 as_bad_where (fixP->fx_file, fixP->fx_line,
20329 _("invalid immediate for stack address calculation"));
20330 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
20331 newval |= value >> 2;
20333 else if (rs == REG_PC || rs == REG_SP)
20335 if (subtract || value & ~0x3fc)
20336 as_bad_where (fixP->fx_file, fixP->fx_line,
20337 _("invalid immediate for address calculation (value = 0x%08lX)"),
20338 (unsigned long) value);
20339 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
20340 newval |= rd << 8;
20341 newval |= value >> 2;
20343 else if (rs == rd)
20345 if (value & ~0xff)
20346 as_bad_where (fixP->fx_file, fixP->fx_line,
20347 _("immediate value out of range"));
20348 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
20349 newval |= (rd << 8) | value;
20351 else
20353 if (value & ~0x7)
20354 as_bad_where (fixP->fx_file, fixP->fx_line,
20355 _("immediate value out of range"));
20356 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
20357 newval |= rd | (rs << 3) | (value << 6);
20360 md_number_to_chars (buf, newval, THUMB_SIZE);
20361 break;
20363 case BFD_RELOC_ARM_THUMB_IMM:
20364 newval = md_chars_to_number (buf, THUMB_SIZE);
20365 if (value < 0 || value > 255)
20366 as_bad_where (fixP->fx_file, fixP->fx_line,
20367 _("invalid immediate: %ld is out of range"),
20368 (long) value);
20369 newval |= value;
20370 md_number_to_chars (buf, newval, THUMB_SIZE);
20371 break;
20373 case BFD_RELOC_ARM_THUMB_SHIFT:
20374 /* 5bit shift value (0..32). LSL cannot take 32. */
20375 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
20376 temp = newval & 0xf800;
20377 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
20378 as_bad_where (fixP->fx_file, fixP->fx_line,
20379 _("invalid shift value: %ld"), (long) value);
20380 /* Shifts of zero must be encoded as LSL. */
20381 if (value == 0)
20382 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
20383 /* Shifts of 32 are encoded as zero. */
20384 else if (value == 32)
20385 value = 0;
20386 newval |= value << 6;
20387 md_number_to_chars (buf, newval, THUMB_SIZE);
20388 break;
20390 case BFD_RELOC_VTABLE_INHERIT:
20391 case BFD_RELOC_VTABLE_ENTRY:
20392 fixP->fx_done = 0;
20393 return;
20395 case BFD_RELOC_ARM_MOVW:
20396 case BFD_RELOC_ARM_MOVT:
20397 case BFD_RELOC_ARM_THUMB_MOVW:
20398 case BFD_RELOC_ARM_THUMB_MOVT:
20399 if (fixP->fx_done || !seg->use_rela_p)
20401 /* REL format relocations are limited to a 16-bit addend. */
20402 if (!fixP->fx_done)
20404 if (value < -0x8000 || value > 0x7fff)
20405 as_bad_where (fixP->fx_file, fixP->fx_line,
20406 _("offset out of range"));
20408 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
20409 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20411 value >>= 16;
20414 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
20415 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20417 newval = get_thumb32_insn (buf);
20418 newval &= 0xfbf08f00;
20419 newval |= (value & 0xf000) << 4;
20420 newval |= (value & 0x0800) << 15;
20421 newval |= (value & 0x0700) << 4;
20422 newval |= (value & 0x00ff);
20423 put_thumb32_insn (buf, newval);
20425 else
20427 newval = md_chars_to_number (buf, 4);
20428 newval &= 0xfff0f000;
20429 newval |= value & 0x0fff;
20430 newval |= (value & 0xf000) << 4;
20431 md_number_to_chars (buf, newval, 4);
20434 return;
20436 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20437 case BFD_RELOC_ARM_ALU_PC_G0:
20438 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20439 case BFD_RELOC_ARM_ALU_PC_G1:
20440 case BFD_RELOC_ARM_ALU_PC_G2:
20441 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20442 case BFD_RELOC_ARM_ALU_SB_G0:
20443 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20444 case BFD_RELOC_ARM_ALU_SB_G1:
20445 case BFD_RELOC_ARM_ALU_SB_G2:
20446 gas_assert (!fixP->fx_done);
20447 if (!seg->use_rela_p)
20449 bfd_vma insn;
20450 bfd_vma encoded_addend;
20451 bfd_vma addend_abs = abs (value);
20453 /* Check that the absolute value of the addend can be
20454 expressed as an 8-bit constant plus a rotation. */
20455 encoded_addend = encode_arm_immediate (addend_abs);
20456 if (encoded_addend == (unsigned int) FAIL)
20457 as_bad_where (fixP->fx_file, fixP->fx_line,
20458 _("the offset 0x%08lX is not representable"),
20459 (unsigned long) addend_abs);
20461 /* Extract the instruction. */
20462 insn = md_chars_to_number (buf, INSN_SIZE);
20464 /* If the addend is positive, use an ADD instruction.
20465 Otherwise use a SUB. Take care not to destroy the S bit. */
20466 insn &= 0xff1fffff;
20467 if (value < 0)
20468 insn |= 1 << 22;
20469 else
20470 insn |= 1 << 23;
20472 /* Place the encoded addend into the first 12 bits of the
20473 instruction. */
20474 insn &= 0xfffff000;
20475 insn |= encoded_addend;
20477 /* Update the instruction. */
20478 md_number_to_chars (buf, insn, INSN_SIZE);
20480 break;
20482 case BFD_RELOC_ARM_LDR_PC_G0:
20483 case BFD_RELOC_ARM_LDR_PC_G1:
20484 case BFD_RELOC_ARM_LDR_PC_G2:
20485 case BFD_RELOC_ARM_LDR_SB_G0:
20486 case BFD_RELOC_ARM_LDR_SB_G1:
20487 case BFD_RELOC_ARM_LDR_SB_G2:
20488 gas_assert (!fixP->fx_done);
20489 if (!seg->use_rela_p)
20491 bfd_vma insn;
20492 bfd_vma addend_abs = abs (value);
20494 /* Check that the absolute value of the addend can be
20495 encoded in 12 bits. */
20496 if (addend_abs >= 0x1000)
20497 as_bad_where (fixP->fx_file, fixP->fx_line,
20498 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20499 (unsigned long) addend_abs);
20501 /* Extract the instruction. */
20502 insn = md_chars_to_number (buf, INSN_SIZE);
20504 /* If the addend is negative, clear bit 23 of the instruction.
20505 Otherwise set it. */
20506 if (value < 0)
20507 insn &= ~(1 << 23);
20508 else
20509 insn |= 1 << 23;
20511 /* Place the absolute value of the addend into the first 12 bits
20512 of the instruction. */
20513 insn &= 0xfffff000;
20514 insn |= addend_abs;
20516 /* Update the instruction. */
20517 md_number_to_chars (buf, insn, INSN_SIZE);
20519 break;
20521 case BFD_RELOC_ARM_LDRS_PC_G0:
20522 case BFD_RELOC_ARM_LDRS_PC_G1:
20523 case BFD_RELOC_ARM_LDRS_PC_G2:
20524 case BFD_RELOC_ARM_LDRS_SB_G0:
20525 case BFD_RELOC_ARM_LDRS_SB_G1:
20526 case BFD_RELOC_ARM_LDRS_SB_G2:
20527 gas_assert (!fixP->fx_done);
20528 if (!seg->use_rela_p)
20530 bfd_vma insn;
20531 bfd_vma addend_abs = abs (value);
20533 /* Check that the absolute value of the addend can be
20534 encoded in 8 bits. */
20535 if (addend_abs >= 0x100)
20536 as_bad_where (fixP->fx_file, fixP->fx_line,
20537 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20538 (unsigned long) addend_abs);
20540 /* Extract the instruction. */
20541 insn = md_chars_to_number (buf, INSN_SIZE);
20543 /* If the addend is negative, clear bit 23 of the instruction.
20544 Otherwise set it. */
20545 if (value < 0)
20546 insn &= ~(1 << 23);
20547 else
20548 insn |= 1 << 23;
20550 /* Place the first four bits of the absolute value of the addend
20551 into the first 4 bits of the instruction, and the remaining
20552 four into bits 8 .. 11. */
20553 insn &= 0xfffff0f0;
20554 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
20556 /* Update the instruction. */
20557 md_number_to_chars (buf, insn, INSN_SIZE);
20559 break;
20561 case BFD_RELOC_ARM_LDC_PC_G0:
20562 case BFD_RELOC_ARM_LDC_PC_G1:
20563 case BFD_RELOC_ARM_LDC_PC_G2:
20564 case BFD_RELOC_ARM_LDC_SB_G0:
20565 case BFD_RELOC_ARM_LDC_SB_G1:
20566 case BFD_RELOC_ARM_LDC_SB_G2:
20567 gas_assert (!fixP->fx_done);
20568 if (!seg->use_rela_p)
20570 bfd_vma insn;
20571 bfd_vma addend_abs = abs (value);
20573 /* Check that the absolute value of the addend is a multiple of
20574 four and, when divided by four, fits in 8 bits. */
20575 if (addend_abs & 0x3)
20576 as_bad_where (fixP->fx_file, fixP->fx_line,
20577 _("bad offset 0x%08lX (must be word-aligned)"),
20578 (unsigned long) addend_abs);
20580 if ((addend_abs >> 2) > 0xff)
20581 as_bad_where (fixP->fx_file, fixP->fx_line,
20582 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20583 (unsigned long) addend_abs);
20585 /* Extract the instruction. */
20586 insn = md_chars_to_number (buf, INSN_SIZE);
20588 /* If the addend is negative, clear bit 23 of the instruction.
20589 Otherwise set it. */
20590 if (value < 0)
20591 insn &= ~(1 << 23);
20592 else
20593 insn |= 1 << 23;
20595 /* Place the addend (divided by four) into the first eight
20596 bits of the instruction. */
20597 insn &= 0xfffffff0;
20598 insn |= addend_abs >> 2;
20600 /* Update the instruction. */
20601 md_number_to_chars (buf, insn, INSN_SIZE);
20603 break;
20605 case BFD_RELOC_ARM_V4BX:
20606 /* This will need to go in the object file. */
20607 fixP->fx_done = 0;
20608 break;
20610 case BFD_RELOC_UNUSED:
20611 default:
20612 as_bad_where (fixP->fx_file, fixP->fx_line,
20613 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
20617 /* Translate internal representation of relocation info to BFD target
20618 format. */
20620 arelent *
20621 tc_gen_reloc (asection *section, fixS *fixp)
20623 arelent * reloc;
20624 bfd_reloc_code_real_type code;
20626 reloc = xmalloc (sizeof (arelent));
20628 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
20629 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
20630 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
20632 if (fixp->fx_pcrel)
20634 if (section->use_rela_p)
20635 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
20636 else
20637 fixp->fx_offset = reloc->address;
20639 reloc->addend = fixp->fx_offset;
20641 switch (fixp->fx_r_type)
20643 case BFD_RELOC_8:
20644 if (fixp->fx_pcrel)
20646 code = BFD_RELOC_8_PCREL;
20647 break;
20650 case BFD_RELOC_16:
20651 if (fixp->fx_pcrel)
20653 code = BFD_RELOC_16_PCREL;
20654 break;
20657 case BFD_RELOC_32:
20658 if (fixp->fx_pcrel)
20660 code = BFD_RELOC_32_PCREL;
20661 break;
20664 case BFD_RELOC_ARM_MOVW:
20665 if (fixp->fx_pcrel)
20667 code = BFD_RELOC_ARM_MOVW_PCREL;
20668 break;
20671 case BFD_RELOC_ARM_MOVT:
20672 if (fixp->fx_pcrel)
20674 code = BFD_RELOC_ARM_MOVT_PCREL;
20675 break;
20678 case BFD_RELOC_ARM_THUMB_MOVW:
20679 if (fixp->fx_pcrel)
20681 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
20682 break;
20685 case BFD_RELOC_ARM_THUMB_MOVT:
20686 if (fixp->fx_pcrel)
20688 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
20689 break;
20692 case BFD_RELOC_NONE:
20693 case BFD_RELOC_ARM_PCREL_BRANCH:
20694 case BFD_RELOC_ARM_PCREL_BLX:
20695 case BFD_RELOC_RVA:
20696 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20697 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20698 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20699 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20700 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20701 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20702 case BFD_RELOC_VTABLE_ENTRY:
20703 case BFD_RELOC_VTABLE_INHERIT:
20704 #ifdef TE_PE
20705 case BFD_RELOC_32_SECREL:
20706 #endif
20707 code = fixp->fx_r_type;
20708 break;
20710 case BFD_RELOC_THUMB_PCREL_BLX:
20711 #ifdef OBJ_ELF
20712 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20713 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
20714 else
20715 #endif
20716 code = BFD_RELOC_THUMB_PCREL_BLX;
20717 break;
20719 case BFD_RELOC_ARM_LITERAL:
20720 case BFD_RELOC_ARM_HWLITERAL:
20721 /* If this is called then the a literal has
20722 been referenced across a section boundary. */
20723 as_bad_where (fixp->fx_file, fixp->fx_line,
20724 _("literal referenced across section boundary"));
20725 return NULL;
20727 #ifdef OBJ_ELF
20728 case BFD_RELOC_ARM_GOT32:
20729 case BFD_RELOC_ARM_GOTOFF:
20730 case BFD_RELOC_ARM_PLT32:
20731 case BFD_RELOC_ARM_TARGET1:
20732 case BFD_RELOC_ARM_ROSEGREL32:
20733 case BFD_RELOC_ARM_SBREL32:
20734 case BFD_RELOC_ARM_PREL31:
20735 case BFD_RELOC_ARM_TARGET2:
20736 case BFD_RELOC_ARM_TLS_LE32:
20737 case BFD_RELOC_ARM_TLS_LDO32:
20738 case BFD_RELOC_ARM_PCREL_CALL:
20739 case BFD_RELOC_ARM_PCREL_JUMP:
20740 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20741 case BFD_RELOC_ARM_ALU_PC_G0:
20742 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20743 case BFD_RELOC_ARM_ALU_PC_G1:
20744 case BFD_RELOC_ARM_ALU_PC_G2:
20745 case BFD_RELOC_ARM_LDR_PC_G0:
20746 case BFD_RELOC_ARM_LDR_PC_G1:
20747 case BFD_RELOC_ARM_LDR_PC_G2:
20748 case BFD_RELOC_ARM_LDRS_PC_G0:
20749 case BFD_RELOC_ARM_LDRS_PC_G1:
20750 case BFD_RELOC_ARM_LDRS_PC_G2:
20751 case BFD_RELOC_ARM_LDC_PC_G0:
20752 case BFD_RELOC_ARM_LDC_PC_G1:
20753 case BFD_RELOC_ARM_LDC_PC_G2:
20754 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20755 case BFD_RELOC_ARM_ALU_SB_G0:
20756 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20757 case BFD_RELOC_ARM_ALU_SB_G1:
20758 case BFD_RELOC_ARM_ALU_SB_G2:
20759 case BFD_RELOC_ARM_LDR_SB_G0:
20760 case BFD_RELOC_ARM_LDR_SB_G1:
20761 case BFD_RELOC_ARM_LDR_SB_G2:
20762 case BFD_RELOC_ARM_LDRS_SB_G0:
20763 case BFD_RELOC_ARM_LDRS_SB_G1:
20764 case BFD_RELOC_ARM_LDRS_SB_G2:
20765 case BFD_RELOC_ARM_LDC_SB_G0:
20766 case BFD_RELOC_ARM_LDC_SB_G1:
20767 case BFD_RELOC_ARM_LDC_SB_G2:
20768 case BFD_RELOC_ARM_V4BX:
20769 code = fixp->fx_r_type;
20770 break;
20772 case BFD_RELOC_ARM_TLS_GD32:
20773 case BFD_RELOC_ARM_TLS_IE32:
20774 case BFD_RELOC_ARM_TLS_LDM32:
20775 /* BFD will include the symbol's address in the addend.
20776 But we don't want that, so subtract it out again here. */
20777 if (!S_IS_COMMON (fixp->fx_addsy))
20778 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
20779 code = fixp->fx_r_type;
20780 break;
20781 #endif
20783 case BFD_RELOC_ARM_IMMEDIATE:
20784 as_bad_where (fixp->fx_file, fixp->fx_line,
20785 _("internal relocation (type: IMMEDIATE) not fixed up"));
20786 return NULL;
20788 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20789 as_bad_where (fixp->fx_file, fixp->fx_line,
20790 _("ADRL used for a symbol not defined in the same file"));
20791 return NULL;
20793 case BFD_RELOC_ARM_OFFSET_IMM:
20794 if (section->use_rela_p)
20796 code = fixp->fx_r_type;
20797 break;
20800 if (fixp->fx_addsy != NULL
20801 && !S_IS_DEFINED (fixp->fx_addsy)
20802 && S_IS_LOCAL (fixp->fx_addsy))
20804 as_bad_where (fixp->fx_file, fixp->fx_line,
20805 _("undefined local label `%s'"),
20806 S_GET_NAME (fixp->fx_addsy));
20807 return NULL;
20810 as_bad_where (fixp->fx_file, fixp->fx_line,
20811 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
20812 return NULL;
20814 default:
20816 char * type;
20818 switch (fixp->fx_r_type)
20820 case BFD_RELOC_NONE: type = "NONE"; break;
20821 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
20822 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
20823 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
20824 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
20825 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
20826 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
20827 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
20828 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
20829 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
20830 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
20831 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
20832 default: type = _("<unknown>"); break;
20834 as_bad_where (fixp->fx_file, fixp->fx_line,
20835 _("cannot represent %s relocation in this object file format"),
20836 type);
20837 return NULL;
20841 #ifdef OBJ_ELF
20842 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
20843 && GOT_symbol
20844 && fixp->fx_addsy == GOT_symbol)
20846 code = BFD_RELOC_ARM_GOTPC;
20847 reloc->addend = fixp->fx_offset = reloc->address;
20849 #endif
20851 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
20853 if (reloc->howto == NULL)
20855 as_bad_where (fixp->fx_file, fixp->fx_line,
20856 _("cannot represent %s relocation in this object file format"),
20857 bfd_get_reloc_code_name (code));
20858 return NULL;
20861 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
20862 vtable entry to be used in the relocation's section offset. */
20863 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
20864 reloc->address = fixp->fx_offset;
20866 return reloc;
20869 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
20871 void
20872 cons_fix_new_arm (fragS * frag,
20873 int where,
20874 int size,
20875 expressionS * exp)
20877 bfd_reloc_code_real_type type;
20878 int pcrel = 0;
20880 /* Pick a reloc.
20881 FIXME: @@ Should look at CPU word size. */
20882 switch (size)
20884 case 1:
20885 type = BFD_RELOC_8;
20886 break;
20887 case 2:
20888 type = BFD_RELOC_16;
20889 break;
20890 case 4:
20891 default:
20892 type = BFD_RELOC_32;
20893 break;
20894 case 8:
20895 type = BFD_RELOC_64;
20896 break;
20899 #ifdef TE_PE
20900 if (exp->X_op == O_secrel)
20902 exp->X_op = O_symbol;
20903 type = BFD_RELOC_32_SECREL;
20905 #endif
20907 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
20910 #if defined (OBJ_COFF)
20911 void
20912 arm_validate_fix (fixS * fixP)
20914 /* If the destination of the branch is a defined symbol which does not have
20915 the THUMB_FUNC attribute, then we must be calling a function which has
20916 the (interfacearm) attribute. We look for the Thumb entry point to that
20917 function and change the branch to refer to that function instead. */
20918 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
20919 && fixP->fx_addsy != NULL
20920 && S_IS_DEFINED (fixP->fx_addsy)
20921 && ! THUMB_IS_FUNC (fixP->fx_addsy))
20923 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
20926 #endif
20930 arm_force_relocation (struct fix * fixp)
20932 #if defined (OBJ_COFF) && defined (TE_PE)
20933 if (fixp->fx_r_type == BFD_RELOC_RVA)
20934 return 1;
20935 #endif
20937 /* In case we have a call or a branch to a function in ARM ISA mode from
20938 a thumb function or vice-versa force the relocation. These relocations
20939 are cleared off for some cores that might have blx and simple transformations
20940 are possible. */
20942 #ifdef OBJ_ELF
20943 switch (fixp->fx_r_type)
20945 case BFD_RELOC_ARM_PCREL_JUMP:
20946 case BFD_RELOC_ARM_PCREL_CALL:
20947 case BFD_RELOC_THUMB_PCREL_BLX:
20948 if (THUMB_IS_FUNC (fixp->fx_addsy))
20949 return 1;
20950 break;
20952 case BFD_RELOC_ARM_PCREL_BLX:
20953 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20954 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20955 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20956 if (ARM_IS_FUNC (fixp->fx_addsy))
20957 return 1;
20958 break;
20960 default:
20961 break;
20963 #endif
20965 /* Resolve these relocations even if the symbol is extern or weak. */
20966 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
20967 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
20968 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
20969 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
20970 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20971 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
20972 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
20973 return 0;
20975 /* Always leave these relocations for the linker. */
20976 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
20977 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
20978 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
20979 return 1;
20981 /* Always generate relocations against function symbols. */
20982 if (fixp->fx_r_type == BFD_RELOC_32
20983 && fixp->fx_addsy
20984 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
20985 return 1;
20987 return generic_force_reloc (fixp);
20990 #if defined (OBJ_ELF) || defined (OBJ_COFF)
20991 /* Relocations against function names must be left unadjusted,
20992 so that the linker can use this information to generate interworking
20993 stubs. The MIPS version of this function
20994 also prevents relocations that are mips-16 specific, but I do not
20995 know why it does this.
20997 FIXME:
20998 There is one other problem that ought to be addressed here, but
20999 which currently is not: Taking the address of a label (rather
21000 than a function) and then later jumping to that address. Such
21001 addresses also ought to have their bottom bit set (assuming that
21002 they reside in Thumb code), but at the moment they will not. */
21004 bfd_boolean
21005 arm_fix_adjustable (fixS * fixP)
21007 if (fixP->fx_addsy == NULL)
21008 return 1;
21010 /* Preserve relocations against symbols with function type. */
21011 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
21012 return FALSE;
21014 if (THUMB_IS_FUNC (fixP->fx_addsy)
21015 && fixP->fx_subsy == NULL)
21016 return FALSE;
21018 /* We need the symbol name for the VTABLE entries. */
21019 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
21020 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21021 return FALSE;
21023 /* Don't allow symbols to be discarded on GOT related relocs. */
21024 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
21025 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
21026 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
21027 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
21028 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
21029 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
21030 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
21031 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
21032 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
21033 return FALSE;
21035 /* Similarly for group relocations. */
21036 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21037 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21038 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21039 return FALSE;
21041 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21042 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
21043 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21044 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
21045 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
21046 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21047 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
21048 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
21049 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
21050 return FALSE;
21052 return TRUE;
21054 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21056 #ifdef OBJ_ELF
21058 const char *
21059 elf32_arm_target_format (void)
21061 #ifdef TE_SYMBIAN
21062 return (target_big_endian
21063 ? "elf32-bigarm-symbian"
21064 : "elf32-littlearm-symbian");
21065 #elif defined (TE_VXWORKS)
21066 return (target_big_endian
21067 ? "elf32-bigarm-vxworks"
21068 : "elf32-littlearm-vxworks");
21069 #else
21070 if (target_big_endian)
21071 return "elf32-bigarm";
21072 else
21073 return "elf32-littlearm";
21074 #endif
21077 void
21078 armelf_frob_symbol (symbolS * symp,
21079 int * puntp)
21081 elf_frob_symbol (symp, puntp);
21083 #endif
21085 /* MD interface: Finalization. */
21087 void
21088 arm_cleanup (void)
21090 literal_pool * pool;
21092 /* Ensure that all the IT blocks are properly closed. */
21093 check_it_blocks_finished ();
21095 for (pool = list_of_pools; pool; pool = pool->next)
21097 /* Put it at the end of the relevant section. */
21098 subseg_set (pool->section, pool->sub_section);
21099 #ifdef OBJ_ELF
21100 arm_elf_change_section ();
21101 #endif
21102 s_ltorg (0);
21106 #ifdef OBJ_ELF
21107 /* Remove any excess mapping symbols generated for alignment frags in
21108 SEC. We may have created a mapping symbol before a zero byte
21109 alignment; remove it if there's a mapping symbol after the
21110 alignment. */
21111 static void
21112 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
21113 void *dummy ATTRIBUTE_UNUSED)
21115 segment_info_type *seginfo = seg_info (sec);
21116 fragS *fragp;
21118 if (seginfo == NULL || seginfo->frchainP == NULL)
21119 return;
21121 for (fragp = seginfo->frchainP->frch_root;
21122 fragp != NULL;
21123 fragp = fragp->fr_next)
21125 symbolS *sym = fragp->tc_frag_data.last_map;
21126 fragS *next = fragp->fr_next;
21128 /* Variable-sized frags have been converted to fixed size by
21129 this point. But if this was variable-sized to start with,
21130 there will be a fixed-size frag after it. So don't handle
21131 next == NULL. */
21132 if (sym == NULL || next == NULL)
21133 continue;
21135 if (S_GET_VALUE (sym) < next->fr_address)
21136 /* Not at the end of this frag. */
21137 continue;
21138 know (S_GET_VALUE (sym) == next->fr_address);
21142 if (next->tc_frag_data.first_map != NULL)
21144 /* Next frag starts with a mapping symbol. Discard this
21145 one. */
21146 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21147 break;
21150 if (next->fr_next == NULL)
21152 /* This mapping symbol is at the end of the section. Discard
21153 it. */
21154 know (next->fr_fix == 0 && next->fr_var == 0);
21155 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21156 break;
21159 /* As long as we have empty frags without any mapping symbols,
21160 keep looking. */
21161 /* If the next frag is non-empty and does not start with a
21162 mapping symbol, then this mapping symbol is required. */
21163 if (next->fr_address != next->fr_next->fr_address)
21164 break;
21166 next = next->fr_next;
21168 while (next != NULL);
21171 #endif
21173 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21174 ARM ones. */
21176 void
21177 arm_adjust_symtab (void)
21179 #ifdef OBJ_COFF
21180 symbolS * sym;
21182 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21184 if (ARM_IS_THUMB (sym))
21186 if (THUMB_IS_FUNC (sym))
21188 /* Mark the symbol as a Thumb function. */
21189 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
21190 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
21191 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
21193 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
21194 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
21195 else
21196 as_bad (_("%s: unexpected function type: %d"),
21197 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
21199 else switch (S_GET_STORAGE_CLASS (sym))
21201 case C_EXT:
21202 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
21203 break;
21204 case C_STAT:
21205 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
21206 break;
21207 case C_LABEL:
21208 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
21209 break;
21210 default:
21211 /* Do nothing. */
21212 break;
21216 if (ARM_IS_INTERWORK (sym))
21217 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
21219 #endif
21220 #ifdef OBJ_ELF
21221 symbolS * sym;
21222 char bind;
21224 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21226 if (ARM_IS_THUMB (sym))
21228 elf_symbol_type * elf_sym;
21230 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
21231 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
21233 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
21234 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
21236 /* If it's a .thumb_func, declare it as so,
21237 otherwise tag label as .code 16. */
21238 if (THUMB_IS_FUNC (sym))
21239 elf_sym->internal_elf_sym.st_info =
21240 ELF_ST_INFO (bind, STT_ARM_TFUNC);
21241 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21242 elf_sym->internal_elf_sym.st_info =
21243 ELF_ST_INFO (bind, STT_ARM_16BIT);
21248 /* Remove any overlapping mapping symbols generated by alignment frags. */
21249 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
21250 #endif
21253 /* MD interface: Initialization. */
21255 static void
21256 set_constant_flonums (void)
21258 int i;
21260 for (i = 0; i < NUM_FLOAT_VALS; i++)
21261 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
21262 abort ();
21265 /* Auto-select Thumb mode if it's the only available instruction set for the
21266 given architecture. */
21268 static void
21269 autoselect_thumb_from_cpu_variant (void)
21271 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
21272 opcode_select (16);
21275 void
21276 md_begin (void)
21278 unsigned mach;
21279 unsigned int i;
21281 if ( (arm_ops_hsh = hash_new ()) == NULL
21282 || (arm_cond_hsh = hash_new ()) == NULL
21283 || (arm_shift_hsh = hash_new ()) == NULL
21284 || (arm_psr_hsh = hash_new ()) == NULL
21285 || (arm_v7m_psr_hsh = hash_new ()) == NULL
21286 || (arm_reg_hsh = hash_new ()) == NULL
21287 || (arm_reloc_hsh = hash_new ()) == NULL
21288 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
21289 as_fatal (_("virtual memory exhausted"));
21291 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
21292 hash_insert (arm_ops_hsh, insns[i].template, (void *) (insns + i));
21293 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
21294 hash_insert (arm_cond_hsh, conds[i].template, (void *) (conds + i));
21295 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
21296 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
21297 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
21298 hash_insert (arm_psr_hsh, psrs[i].template, (void *) (psrs + i));
21299 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
21300 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (void *) (v7m_psrs + i));
21301 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
21302 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
21303 for (i = 0;
21304 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
21305 i++)
21306 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
21307 (void *) (barrier_opt_names + i));
21308 #ifdef OBJ_ELF
21309 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
21310 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
21311 #endif
21313 set_constant_flonums ();
21315 /* Set the cpu variant based on the command-line options. We prefer
21316 -mcpu= over -march= if both are set (as for GCC); and we prefer
21317 -mfpu= over any other way of setting the floating point unit.
21318 Use of legacy options with new options are faulted. */
21319 if (legacy_cpu)
21321 if (mcpu_cpu_opt || march_cpu_opt)
21322 as_bad (_("use of old and new-style options to set CPU type"));
21324 mcpu_cpu_opt = legacy_cpu;
21326 else if (!mcpu_cpu_opt)
21327 mcpu_cpu_opt = march_cpu_opt;
21329 if (legacy_fpu)
21331 if (mfpu_opt)
21332 as_bad (_("use of old and new-style options to set FPU type"));
21334 mfpu_opt = legacy_fpu;
21336 else if (!mfpu_opt)
21338 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21339 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21340 /* Some environments specify a default FPU. If they don't, infer it
21341 from the processor. */
21342 if (mcpu_fpu_opt)
21343 mfpu_opt = mcpu_fpu_opt;
21344 else
21345 mfpu_opt = march_fpu_opt;
21346 #else
21347 mfpu_opt = &fpu_default;
21348 #endif
21351 if (!mfpu_opt)
21353 if (mcpu_cpu_opt != NULL)
21354 mfpu_opt = &fpu_default;
21355 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
21356 mfpu_opt = &fpu_arch_vfp_v2;
21357 else
21358 mfpu_opt = &fpu_arch_fpa;
21361 #ifdef CPU_DEFAULT
21362 if (!mcpu_cpu_opt)
21364 mcpu_cpu_opt = &cpu_default;
21365 selected_cpu = cpu_default;
21367 #else
21368 if (mcpu_cpu_opt)
21369 selected_cpu = *mcpu_cpu_opt;
21370 else
21371 mcpu_cpu_opt = &arm_arch_any;
21372 #endif
21374 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21376 autoselect_thumb_from_cpu_variant ();
21378 arm_arch_used = thumb_arch_used = arm_arch_none;
21380 #if defined OBJ_COFF || defined OBJ_ELF
21382 unsigned int flags = 0;
21384 #if defined OBJ_ELF
21385 flags = meabi_flags;
21387 switch (meabi_flags)
21389 case EF_ARM_EABI_UNKNOWN:
21390 #endif
21391 /* Set the flags in the private structure. */
21392 if (uses_apcs_26) flags |= F_APCS26;
21393 if (support_interwork) flags |= F_INTERWORK;
21394 if (uses_apcs_float) flags |= F_APCS_FLOAT;
21395 if (pic_code) flags |= F_PIC;
21396 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
21397 flags |= F_SOFT_FLOAT;
21399 switch (mfloat_abi_opt)
21401 case ARM_FLOAT_ABI_SOFT:
21402 case ARM_FLOAT_ABI_SOFTFP:
21403 flags |= F_SOFT_FLOAT;
21404 break;
21406 case ARM_FLOAT_ABI_HARD:
21407 if (flags & F_SOFT_FLOAT)
21408 as_bad (_("hard-float conflicts with specified fpu"));
21409 break;
21412 /* Using pure-endian doubles (even if soft-float). */
21413 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
21414 flags |= F_VFP_FLOAT;
21416 #if defined OBJ_ELF
21417 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
21418 flags |= EF_ARM_MAVERICK_FLOAT;
21419 break;
21421 case EF_ARM_EABI_VER4:
21422 case EF_ARM_EABI_VER5:
21423 /* No additional flags to set. */
21424 break;
21426 default:
21427 abort ();
21429 #endif
21430 bfd_set_private_flags (stdoutput, flags);
21432 /* We have run out flags in the COFF header to encode the
21433 status of ATPCS support, so instead we create a dummy,
21434 empty, debug section called .arm.atpcs. */
21435 if (atpcs)
21437 asection * sec;
21439 sec = bfd_make_section (stdoutput, ".arm.atpcs");
21441 if (sec != NULL)
21443 bfd_set_section_flags
21444 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
21445 bfd_set_section_size (stdoutput, sec, 0);
21446 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
21450 #endif
21452 /* Record the CPU type as well. */
21453 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
21454 mach = bfd_mach_arm_iWMMXt2;
21455 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
21456 mach = bfd_mach_arm_iWMMXt;
21457 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
21458 mach = bfd_mach_arm_XScale;
21459 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
21460 mach = bfd_mach_arm_ep9312;
21461 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
21462 mach = bfd_mach_arm_5TE;
21463 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
21465 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21466 mach = bfd_mach_arm_5T;
21467 else
21468 mach = bfd_mach_arm_5;
21470 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
21472 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21473 mach = bfd_mach_arm_4T;
21474 else
21475 mach = bfd_mach_arm_4;
21477 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
21478 mach = bfd_mach_arm_3M;
21479 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
21480 mach = bfd_mach_arm_3;
21481 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
21482 mach = bfd_mach_arm_2a;
21483 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
21484 mach = bfd_mach_arm_2;
21485 else
21486 mach = bfd_mach_arm_unknown;
21488 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
21491 /* Command line processing. */
21493 /* md_parse_option
21494 Invocation line includes a switch not recognized by the base assembler.
21495 See if it's a processor-specific option.
21497 This routine is somewhat complicated by the need for backwards
21498 compatibility (since older releases of gcc can't be changed).
21499 The new options try to make the interface as compatible as
21500 possible with GCC.
21502 New options (supported) are:
21504 -mcpu=<cpu name> Assemble for selected processor
21505 -march=<architecture name> Assemble for selected architecture
21506 -mfpu=<fpu architecture> Assemble for selected FPU.
21507 -EB/-mbig-endian Big-endian
21508 -EL/-mlittle-endian Little-endian
21509 -k Generate PIC code
21510 -mthumb Start in Thumb mode
21511 -mthumb-interwork Code supports ARM/Thumb interworking
21513 -m[no-]warn-deprecated Warn about deprecated features
21515 For now we will also provide support for:
21517 -mapcs-32 32-bit Program counter
21518 -mapcs-26 26-bit Program counter
21519 -macps-float Floats passed in FP registers
21520 -mapcs-reentrant Reentrant code
21521 -matpcs
21522 (sometime these will probably be replaced with -mapcs=<list of options>
21523 and -matpcs=<list of options>)
21525 The remaining options are only supported for back-wards compatibility.
21526 Cpu variants, the arm part is optional:
21527 -m[arm]1 Currently not supported.
21528 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21529 -m[arm]3 Arm 3 processor
21530 -m[arm]6[xx], Arm 6 processors
21531 -m[arm]7[xx][t][[d]m] Arm 7 processors
21532 -m[arm]8[10] Arm 8 processors
21533 -m[arm]9[20][tdmi] Arm 9 processors
21534 -mstrongarm[110[0]] StrongARM processors
21535 -mxscale XScale processors
21536 -m[arm]v[2345[t[e]]] Arm architectures
21537 -mall All (except the ARM1)
21538 FP variants:
21539 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21540 -mfpe-old (No float load/store multiples)
21541 -mvfpxd VFP Single precision
21542 -mvfp All VFP
21543 -mno-fpu Disable all floating point instructions
21545 The following CPU names are recognized:
21546 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21547 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21548 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21549 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21550 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21551 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21552 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21556 const char * md_shortopts = "m:k";
21558 #ifdef ARM_BI_ENDIAN
21559 #define OPTION_EB (OPTION_MD_BASE + 0)
21560 #define OPTION_EL (OPTION_MD_BASE + 1)
21561 #else
21562 #if TARGET_BYTES_BIG_ENDIAN
21563 #define OPTION_EB (OPTION_MD_BASE + 0)
21564 #else
21565 #define OPTION_EL (OPTION_MD_BASE + 1)
21566 #endif
21567 #endif
21568 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21570 struct option md_longopts[] =
21572 #ifdef OPTION_EB
21573 {"EB", no_argument, NULL, OPTION_EB},
21574 #endif
21575 #ifdef OPTION_EL
21576 {"EL", no_argument, NULL, OPTION_EL},
21577 #endif
21578 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
21579 {NULL, no_argument, NULL, 0}
21582 size_t md_longopts_size = sizeof (md_longopts);
21584 struct arm_option_table
21586 char *option; /* Option name to match. */
21587 char *help; /* Help information. */
21588 int *var; /* Variable to change. */
21589 int value; /* What to change it to. */
21590 char *deprecated; /* If non-null, print this message. */
21593 struct arm_option_table arm_opts[] =
21595 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
21596 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
21597 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21598 &support_interwork, 1, NULL},
21599 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
21600 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
21601 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
21602 1, NULL},
21603 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
21604 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
21605 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
21606 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
21607 NULL},
21609 /* These are recognized by the assembler, but have no affect on code. */
21610 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
21611 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
21613 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
21614 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21615 &warn_on_deprecated, 0, NULL},
21616 {NULL, NULL, NULL, 0, NULL}
21619 struct arm_legacy_option_table
21621 char *option; /* Option name to match. */
21622 const arm_feature_set **var; /* Variable to change. */
21623 const arm_feature_set value; /* What to change it to. */
21624 char *deprecated; /* If non-null, print this message. */
21627 const struct arm_legacy_option_table arm_legacy_opts[] =
21629 /* DON'T add any new processors to this list -- we want the whole list
21630 to go away... Add them to the processors table instead. */
21631 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21632 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21633 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21634 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21635 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21636 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21637 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21638 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21639 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21640 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21641 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21642 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21643 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21644 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21645 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21646 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21647 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21648 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21649 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21650 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21651 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21652 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21653 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21654 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21655 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21656 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21657 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21658 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21659 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21660 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21661 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21662 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21663 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21664 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21665 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21666 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21667 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21668 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21669 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21670 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21671 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21672 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21673 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21674 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21675 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21676 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21677 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21678 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21679 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21680 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21681 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21682 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21683 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21684 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21685 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21686 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21687 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21688 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21689 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21690 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21691 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21692 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21693 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21694 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21695 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21696 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21697 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21698 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21699 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
21700 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
21701 N_("use -mcpu=strongarm110")},
21702 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
21703 N_("use -mcpu=strongarm1100")},
21704 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
21705 N_("use -mcpu=strongarm1110")},
21706 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
21707 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
21708 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
21710 /* Architecture variants -- don't add any more to this list either. */
21711 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21712 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21713 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21714 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21715 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21716 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21717 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21718 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21719 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21720 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21721 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21722 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21723 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21724 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21725 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21726 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21727 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21728 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21730 /* Floating point variants -- don't add any more to this list either. */
21731 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
21732 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
21733 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
21734 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
21735 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
21737 {NULL, NULL, ARM_ARCH_NONE, NULL}
21740 struct arm_cpu_option_table
21742 char *name;
21743 const arm_feature_set value;
21744 /* For some CPUs we assume an FPU unless the user explicitly sets
21745 -mfpu=... */
21746 const arm_feature_set default_fpu;
21747 /* The canonical name of the CPU, or NULL to use NAME converted to upper
21748 case. */
21749 const char *canonical_name;
21752 /* This list should, at a minimum, contain all the cpu names
21753 recognized by GCC. */
21754 static const struct arm_cpu_option_table arm_cpus[] =
21756 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
21757 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
21758 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
21759 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21760 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21761 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21762 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21763 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21764 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21765 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21766 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21767 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21768 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21769 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21770 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21771 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21772 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21773 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21774 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21775 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21776 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21777 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21778 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21779 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21780 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21781 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21782 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21783 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21784 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21785 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21786 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21787 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21788 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21789 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21790 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21791 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21792 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21793 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21794 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21795 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
21796 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21797 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21798 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21799 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21800 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21801 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21802 /* For V5 or later processors we default to using VFP; but the user
21803 should really set the FPU type explicitly. */
21804 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
21805 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21806 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
21807 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
21808 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
21809 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
21810 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
21811 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21812 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
21813 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
21814 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21815 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21816 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
21817 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
21818 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21819 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
21820 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
21821 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21822 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21823 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
21824 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
21825 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
21826 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
21827 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
21828 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
21829 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
21830 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
21831 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
21832 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
21833 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
21834 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
21835 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
21836 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
21837 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
21838 | FPU_NEON_EXT_V1),
21839 NULL},
21840 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
21841 | FPU_NEON_EXT_V1),
21842 NULL},
21843 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
21844 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
21845 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
21846 {"cortex-m0", ARM_ARCH_V6M, FPU_NONE, NULL},
21847 /* ??? XSCALE is really an architecture. */
21848 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
21849 /* ??? iwmmxt is not a processor. */
21850 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
21851 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
21852 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
21853 /* Maverick */
21854 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
21855 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
21858 struct arm_arch_option_table
21860 char *name;
21861 const arm_feature_set value;
21862 const arm_feature_set default_fpu;
21865 /* This list should, at a minimum, contain all the architecture names
21866 recognized by GCC. */
21867 static const struct arm_arch_option_table arm_archs[] =
21869 {"all", ARM_ANY, FPU_ARCH_FPA},
21870 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
21871 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
21872 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
21873 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
21874 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
21875 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
21876 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
21877 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
21878 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
21879 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
21880 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
21881 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
21882 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
21883 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
21884 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
21885 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
21886 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
21887 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
21888 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
21889 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
21890 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
21891 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
21892 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
21893 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
21894 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
21895 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
21896 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
21897 /* The official spelling of the ARMv7 profile variants is the dashed form.
21898 Accept the non-dashed form for compatibility with old toolchains. */
21899 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
21900 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
21901 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
21902 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
21903 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
21904 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
21905 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
21906 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
21907 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
21908 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
21911 /* ISA extensions in the co-processor space. */
21912 struct arm_option_cpu_value_table
21914 char *name;
21915 const arm_feature_set value;
21918 static const struct arm_option_cpu_value_table arm_extensions[] =
21920 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
21921 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
21922 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
21923 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
21924 {NULL, ARM_ARCH_NONE}
21927 /* This list should, at a minimum, contain all the fpu names
21928 recognized by GCC. */
21929 static const struct arm_option_cpu_value_table arm_fpus[] =
21931 {"softfpa", FPU_NONE},
21932 {"fpe", FPU_ARCH_FPE},
21933 {"fpe2", FPU_ARCH_FPE},
21934 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
21935 {"fpa", FPU_ARCH_FPA},
21936 {"fpa10", FPU_ARCH_FPA},
21937 {"fpa11", FPU_ARCH_FPA},
21938 {"arm7500fe", FPU_ARCH_FPA},
21939 {"softvfp", FPU_ARCH_VFP},
21940 {"softvfp+vfp", FPU_ARCH_VFP_V2},
21941 {"vfp", FPU_ARCH_VFP_V2},
21942 {"vfp9", FPU_ARCH_VFP_V2},
21943 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
21944 {"vfp10", FPU_ARCH_VFP_V2},
21945 {"vfp10-r0", FPU_ARCH_VFP_V1},
21946 {"vfpxd", FPU_ARCH_VFP_V1xD},
21947 {"vfpv2", FPU_ARCH_VFP_V2},
21948 {"vfpv3", FPU_ARCH_VFP_V3},
21949 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
21950 {"arm1020t", FPU_ARCH_VFP_V1},
21951 {"arm1020e", FPU_ARCH_VFP_V2},
21952 {"arm1136jfs", FPU_ARCH_VFP_V2},
21953 {"arm1136jf-s", FPU_ARCH_VFP_V2},
21954 {"maverick", FPU_ARCH_MAVERICK},
21955 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
21956 {"neon-fp16", FPU_ARCH_NEON_FP16},
21957 {NULL, ARM_ARCH_NONE}
21960 struct arm_option_value_table
21962 char *name;
21963 long value;
21966 static const struct arm_option_value_table arm_float_abis[] =
21968 {"hard", ARM_FLOAT_ABI_HARD},
21969 {"softfp", ARM_FLOAT_ABI_SOFTFP},
21970 {"soft", ARM_FLOAT_ABI_SOFT},
21971 {NULL, 0}
21974 #ifdef OBJ_ELF
21975 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
21976 static const struct arm_option_value_table arm_eabis[] =
21978 {"gnu", EF_ARM_EABI_UNKNOWN},
21979 {"4", EF_ARM_EABI_VER4},
21980 {"5", EF_ARM_EABI_VER5},
21981 {NULL, 0}
21983 #endif
21985 struct arm_long_option_table
21987 char * option; /* Substring to match. */
21988 char * help; /* Help information. */
21989 int (* func) (char * subopt); /* Function to decode sub-option. */
21990 char * deprecated; /* If non-null, print this message. */
21993 static bfd_boolean
21994 arm_parse_extension (char * str, const arm_feature_set **opt_p)
21996 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
21998 /* Copy the feature set, so that we can modify it. */
21999 *ext_set = **opt_p;
22000 *opt_p = ext_set;
22002 while (str != NULL && *str != 0)
22004 const struct arm_option_cpu_value_table * opt;
22005 char * ext;
22006 int optlen;
22008 if (*str != '+')
22010 as_bad (_("invalid architectural extension"));
22011 return FALSE;
22014 str++;
22015 ext = strchr (str, '+');
22017 if (ext != NULL)
22018 optlen = ext - str;
22019 else
22020 optlen = strlen (str);
22022 if (optlen == 0)
22024 as_bad (_("missing architectural extension"));
22025 return FALSE;
22028 for (opt = arm_extensions; opt->name != NULL; opt++)
22029 if (strncmp (opt->name, str, optlen) == 0)
22031 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
22032 break;
22035 if (opt->name == NULL)
22037 as_bad (_("unknown architectural extension `%s'"), str);
22038 return FALSE;
22041 str = ext;
22044 return TRUE;
22047 static bfd_boolean
22048 arm_parse_cpu (char * str)
22050 const struct arm_cpu_option_table * opt;
22051 char * ext = strchr (str, '+');
22052 int optlen;
22054 if (ext != NULL)
22055 optlen = ext - str;
22056 else
22057 optlen = strlen (str);
22059 if (optlen == 0)
22061 as_bad (_("missing cpu name `%s'"), str);
22062 return FALSE;
22065 for (opt = arm_cpus; opt->name != NULL; opt++)
22066 if (strncmp (opt->name, str, optlen) == 0)
22068 mcpu_cpu_opt = &opt->value;
22069 mcpu_fpu_opt = &opt->default_fpu;
22070 if (opt->canonical_name)
22071 strcpy (selected_cpu_name, opt->canonical_name);
22072 else
22074 int i;
22076 for (i = 0; i < optlen; i++)
22077 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22078 selected_cpu_name[i] = 0;
22081 if (ext != NULL)
22082 return arm_parse_extension (ext, &mcpu_cpu_opt);
22084 return TRUE;
22087 as_bad (_("unknown cpu `%s'"), str);
22088 return FALSE;
22091 static bfd_boolean
22092 arm_parse_arch (char * str)
22094 const struct arm_arch_option_table *opt;
22095 char *ext = strchr (str, '+');
22096 int optlen;
22098 if (ext != NULL)
22099 optlen = ext - str;
22100 else
22101 optlen = strlen (str);
22103 if (optlen == 0)
22105 as_bad (_("missing architecture name `%s'"), str);
22106 return FALSE;
22109 for (opt = arm_archs; opt->name != NULL; opt++)
22110 if (streq (opt->name, str))
22112 march_cpu_opt = &opt->value;
22113 march_fpu_opt = &opt->default_fpu;
22114 strcpy (selected_cpu_name, opt->name);
22116 if (ext != NULL)
22117 return arm_parse_extension (ext, &march_cpu_opt);
22119 return TRUE;
22122 as_bad (_("unknown architecture `%s'\n"), str);
22123 return FALSE;
22126 static bfd_boolean
22127 arm_parse_fpu (char * str)
22129 const struct arm_option_cpu_value_table * opt;
22131 for (opt = arm_fpus; opt->name != NULL; opt++)
22132 if (streq (opt->name, str))
22134 mfpu_opt = &opt->value;
22135 return TRUE;
22138 as_bad (_("unknown floating point format `%s'\n"), str);
22139 return FALSE;
22142 static bfd_boolean
22143 arm_parse_float_abi (char * str)
22145 const struct arm_option_value_table * opt;
22147 for (opt = arm_float_abis; opt->name != NULL; opt++)
22148 if (streq (opt->name, str))
22150 mfloat_abi_opt = opt->value;
22151 return TRUE;
22154 as_bad (_("unknown floating point abi `%s'\n"), str);
22155 return FALSE;
22158 #ifdef OBJ_ELF
22159 static bfd_boolean
22160 arm_parse_eabi (char * str)
22162 const struct arm_option_value_table *opt;
22164 for (opt = arm_eabis; opt->name != NULL; opt++)
22165 if (streq (opt->name, str))
22167 meabi_flags = opt->value;
22168 return TRUE;
22170 as_bad (_("unknown EABI `%s'\n"), str);
22171 return FALSE;
22173 #endif
22175 static bfd_boolean
22176 arm_parse_it_mode (char * str)
22178 bfd_boolean ret = TRUE;
22180 if (streq ("arm", str))
22181 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
22182 else if (streq ("thumb", str))
22183 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
22184 else if (streq ("always", str))
22185 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
22186 else if (streq ("never", str))
22187 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
22188 else
22190 as_bad (_("unknown implicit IT mode `%s', should be "\
22191 "arm, thumb, always, or never."), str);
22192 ret = FALSE;
22195 return ret;
22198 struct arm_long_option_table arm_long_opts[] =
22200 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22201 arm_parse_cpu, NULL},
22202 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22203 arm_parse_arch, NULL},
22204 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22205 arm_parse_fpu, NULL},
22206 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22207 arm_parse_float_abi, NULL},
22208 #ifdef OBJ_ELF
22209 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22210 arm_parse_eabi, NULL},
22211 #endif
22212 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22213 arm_parse_it_mode, NULL},
22214 {NULL, NULL, 0, NULL}
22218 md_parse_option (int c, char * arg)
22220 struct arm_option_table *opt;
22221 const struct arm_legacy_option_table *fopt;
22222 struct arm_long_option_table *lopt;
22224 switch (c)
22226 #ifdef OPTION_EB
22227 case OPTION_EB:
22228 target_big_endian = 1;
22229 break;
22230 #endif
22232 #ifdef OPTION_EL
22233 case OPTION_EL:
22234 target_big_endian = 0;
22235 break;
22236 #endif
22238 case OPTION_FIX_V4BX:
22239 fix_v4bx = TRUE;
22240 break;
22242 case 'a':
22243 /* Listing option. Just ignore these, we don't support additional
22244 ones. */
22245 return 0;
22247 default:
22248 for (opt = arm_opts; opt->option != NULL; opt++)
22250 if (c == opt->option[0]
22251 && ((arg == NULL && opt->option[1] == 0)
22252 || streq (arg, opt->option + 1)))
22254 /* If the option is deprecated, tell the user. */
22255 if (warn_on_deprecated && opt->deprecated != NULL)
22256 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22257 arg ? arg : "", _(opt->deprecated));
22259 if (opt->var != NULL)
22260 *opt->var = opt->value;
22262 return 1;
22266 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
22268 if (c == fopt->option[0]
22269 && ((arg == NULL && fopt->option[1] == 0)
22270 || streq (arg, fopt->option + 1)))
22272 /* If the option is deprecated, tell the user. */
22273 if (warn_on_deprecated && fopt->deprecated != NULL)
22274 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22275 arg ? arg : "", _(fopt->deprecated));
22277 if (fopt->var != NULL)
22278 *fopt->var = &fopt->value;
22280 return 1;
22284 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22286 /* These options are expected to have an argument. */
22287 if (c == lopt->option[0]
22288 && arg != NULL
22289 && strncmp (arg, lopt->option + 1,
22290 strlen (lopt->option + 1)) == 0)
22292 /* If the option is deprecated, tell the user. */
22293 if (warn_on_deprecated && lopt->deprecated != NULL)
22294 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
22295 _(lopt->deprecated));
22297 /* Call the sup-option parser. */
22298 return lopt->func (arg + strlen (lopt->option) - 1);
22302 return 0;
22305 return 1;
22308 void
22309 md_show_usage (FILE * fp)
22311 struct arm_option_table *opt;
22312 struct arm_long_option_table *lopt;
22314 fprintf (fp, _(" ARM-specific assembler options:\n"));
22316 for (opt = arm_opts; opt->option != NULL; opt++)
22317 if (opt->help != NULL)
22318 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
22320 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22321 if (lopt->help != NULL)
22322 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
22324 #ifdef OPTION_EB
22325 fprintf (fp, _("\
22326 -EB assemble code for a big-endian cpu\n"));
22327 #endif
22329 #ifdef OPTION_EL
22330 fprintf (fp, _("\
22331 -EL assemble code for a little-endian cpu\n"));
22332 #endif
22334 fprintf (fp, _("\
22335 --fix-v4bx Allow BX in ARMv4 code\n"));
22339 #ifdef OBJ_ELF
22340 typedef struct
22342 int val;
22343 arm_feature_set flags;
22344 } cpu_arch_ver_table;
22346 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22347 least features first. */
22348 static const cpu_arch_ver_table cpu_arch_ver[] =
22350 {1, ARM_ARCH_V4},
22351 {2, ARM_ARCH_V4T},
22352 {3, ARM_ARCH_V5},
22353 {3, ARM_ARCH_V5T},
22354 {4, ARM_ARCH_V5TE},
22355 {5, ARM_ARCH_V5TEJ},
22356 {6, ARM_ARCH_V6},
22357 {7, ARM_ARCH_V6Z},
22358 {9, ARM_ARCH_V6K},
22359 {11, ARM_ARCH_V6M},
22360 {8, ARM_ARCH_V6T2},
22361 {10, ARM_ARCH_V7A},
22362 {10, ARM_ARCH_V7R},
22363 {10, ARM_ARCH_V7M},
22364 {0, ARM_ARCH_NONE}
22367 /* Set an attribute if it has not already been set by the user. */
22368 static void
22369 aeabi_set_attribute_int (int tag, int value)
22371 if (tag < 1
22372 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22373 || !attributes_set_explicitly[tag])
22374 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
22377 static void
22378 aeabi_set_attribute_string (int tag, const char *value)
22380 if (tag < 1
22381 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22382 || !attributes_set_explicitly[tag])
22383 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
22386 /* Set the public EABI object attributes. */
22387 static void
22388 aeabi_set_public_attributes (void)
22390 int arch;
22391 arm_feature_set flags;
22392 arm_feature_set tmp;
22393 const cpu_arch_ver_table *p;
22395 /* Choose the architecture based on the capabilities of the requested cpu
22396 (if any) and/or the instructions actually used. */
22397 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
22398 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
22399 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
22400 /*Allow the user to override the reported architecture. */
22401 if (object_arch)
22403 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
22404 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
22407 tmp = flags;
22408 arch = 0;
22409 for (p = cpu_arch_ver; p->val; p++)
22411 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
22413 arch = p->val;
22414 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
22418 /* Tag_CPU_name. */
22419 if (selected_cpu_name[0])
22421 char *p;
22423 p = selected_cpu_name;
22424 if (strncmp (p, "armv", 4) == 0)
22426 int i;
22428 p += 4;
22429 for (i = 0; p[i]; i++)
22430 p[i] = TOUPPER (p[i]);
22432 aeabi_set_attribute_string (Tag_CPU_name, p);
22434 /* Tag_CPU_arch. */
22435 aeabi_set_attribute_int (Tag_CPU_arch, arch);
22436 /* Tag_CPU_arch_profile. */
22437 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
22438 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
22439 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
22440 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
22441 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
22442 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
22443 /* Tag_ARM_ISA_use. */
22444 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
22445 || arch == 0)
22446 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
22447 /* Tag_THUMB_ISA_use. */
22448 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
22449 || arch == 0)
22450 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
22451 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
22452 /* Tag_VFP_arch. */
22453 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
22454 aeabi_set_attribute_int (Tag_VFP_arch, 3);
22455 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3))
22456 aeabi_set_attribute_int (Tag_VFP_arch, 4);
22457 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
22458 aeabi_set_attribute_int (Tag_VFP_arch, 2);
22459 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
22460 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
22461 aeabi_set_attribute_int (Tag_VFP_arch, 1);
22462 /* Tag_WMMX_arch. */
22463 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
22464 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
22465 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
22466 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
22467 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22468 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
22469 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
22470 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22471 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_fp16))
22472 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
22475 /* Add the default contents for the .ARM.attributes section. */
22476 void
22477 arm_md_end (void)
22479 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22480 return;
22482 aeabi_set_public_attributes ();
22484 #endif /* OBJ_ELF */
22487 /* Parse a .cpu directive. */
22489 static void
22490 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
22492 const struct arm_cpu_option_table *opt;
22493 char *name;
22494 char saved_char;
22496 name = input_line_pointer;
22497 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22498 input_line_pointer++;
22499 saved_char = *input_line_pointer;
22500 *input_line_pointer = 0;
22502 /* Skip the first "all" entry. */
22503 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
22504 if (streq (opt->name, name))
22506 mcpu_cpu_opt = &opt->value;
22507 selected_cpu = opt->value;
22508 if (opt->canonical_name)
22509 strcpy (selected_cpu_name, opt->canonical_name);
22510 else
22512 int i;
22513 for (i = 0; opt->name[i]; i++)
22514 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22515 selected_cpu_name[i] = 0;
22517 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22518 *input_line_pointer = saved_char;
22519 demand_empty_rest_of_line ();
22520 return;
22522 as_bad (_("unknown cpu `%s'"), name);
22523 *input_line_pointer = saved_char;
22524 ignore_rest_of_line ();
22528 /* Parse a .arch directive. */
22530 static void
22531 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
22533 const struct arm_arch_option_table *opt;
22534 char saved_char;
22535 char *name;
22537 name = input_line_pointer;
22538 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22539 input_line_pointer++;
22540 saved_char = *input_line_pointer;
22541 *input_line_pointer = 0;
22543 /* Skip the first "all" entry. */
22544 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22545 if (streq (opt->name, name))
22547 mcpu_cpu_opt = &opt->value;
22548 selected_cpu = opt->value;
22549 strcpy (selected_cpu_name, opt->name);
22550 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22551 *input_line_pointer = saved_char;
22552 demand_empty_rest_of_line ();
22553 return;
22556 as_bad (_("unknown architecture `%s'\n"), name);
22557 *input_line_pointer = saved_char;
22558 ignore_rest_of_line ();
22562 /* Parse a .object_arch directive. */
22564 static void
22565 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
22567 const struct arm_arch_option_table *opt;
22568 char saved_char;
22569 char *name;
22571 name = input_line_pointer;
22572 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22573 input_line_pointer++;
22574 saved_char = *input_line_pointer;
22575 *input_line_pointer = 0;
22577 /* Skip the first "all" entry. */
22578 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22579 if (streq (opt->name, name))
22581 object_arch = &opt->value;
22582 *input_line_pointer = saved_char;
22583 demand_empty_rest_of_line ();
22584 return;
22587 as_bad (_("unknown architecture `%s'\n"), name);
22588 *input_line_pointer = saved_char;
22589 ignore_rest_of_line ();
22592 /* Parse a .fpu directive. */
22594 static void
22595 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
22597 const struct arm_option_cpu_value_table *opt;
22598 char saved_char;
22599 char *name;
22601 name = input_line_pointer;
22602 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22603 input_line_pointer++;
22604 saved_char = *input_line_pointer;
22605 *input_line_pointer = 0;
22607 for (opt = arm_fpus; opt->name != NULL; opt++)
22608 if (streq (opt->name, name))
22610 mfpu_opt = &opt->value;
22611 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22612 *input_line_pointer = saved_char;
22613 demand_empty_rest_of_line ();
22614 return;
22617 as_bad (_("unknown floating point format `%s'\n"), name);
22618 *input_line_pointer = saved_char;
22619 ignore_rest_of_line ();
22622 /* Copy symbol information. */
22624 void
22625 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
22627 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
22630 #ifdef OBJ_ELF
22631 /* Given a symbolic attribute NAME, return the proper integer value.
22632 Returns -1 if the attribute is not known. */
22635 arm_convert_symbolic_attribute (const char *name)
22637 static const struct
22639 const char * name;
22640 const int tag;
22642 attribute_table[] =
22644 /* When you modify this table you should
22645 also modify the list in doc/c-arm.texi. */
22646 #define T(tag) {#tag, tag}
22647 T (Tag_CPU_raw_name),
22648 T (Tag_CPU_name),
22649 T (Tag_CPU_arch),
22650 T (Tag_CPU_arch_profile),
22651 T (Tag_ARM_ISA_use),
22652 T (Tag_THUMB_ISA_use),
22653 T (Tag_VFP_arch),
22654 T (Tag_WMMX_arch),
22655 T (Tag_Advanced_SIMD_arch),
22656 T (Tag_PCS_config),
22657 T (Tag_ABI_PCS_R9_use),
22658 T (Tag_ABI_PCS_RW_data),
22659 T (Tag_ABI_PCS_RO_data),
22660 T (Tag_ABI_PCS_GOT_use),
22661 T (Tag_ABI_PCS_wchar_t),
22662 T (Tag_ABI_FP_rounding),
22663 T (Tag_ABI_FP_denormal),
22664 T (Tag_ABI_FP_exceptions),
22665 T (Tag_ABI_FP_user_exceptions),
22666 T (Tag_ABI_FP_number_model),
22667 T (Tag_ABI_align8_needed),
22668 T (Tag_ABI_align8_preserved),
22669 T (Tag_ABI_enum_size),
22670 T (Tag_ABI_HardFP_use),
22671 T (Tag_ABI_VFP_args),
22672 T (Tag_ABI_WMMX_args),
22673 T (Tag_ABI_optimization_goals),
22674 T (Tag_ABI_FP_optimization_goals),
22675 T (Tag_compatibility),
22676 T (Tag_CPU_unaligned_access),
22677 T (Tag_VFP_HP_extension),
22678 T (Tag_ABI_FP_16bit_format),
22679 T (Tag_nodefaults),
22680 T (Tag_also_compatible_with),
22681 T (Tag_conformance),
22682 T (Tag_T2EE_use),
22683 T (Tag_Virtualization_use),
22684 T (Tag_MPextension_use)
22685 #undef T
22687 unsigned int i;
22689 if (name == NULL)
22690 return -1;
22692 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
22693 if (streq (name, attribute_table[i].name))
22694 return attribute_table[i].tag;
22696 return -1;
22700 /* Apply sym value for relocations only in the case that
22701 they are for local symbols and you have the respective
22702 architectural feature for blx and simple switches. */
22704 arm_apply_sym_value (struct fix * fixP)
22706 if (fixP->fx_addsy
22707 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22708 && !S_IS_EXTERNAL (fixP->fx_addsy))
22710 switch (fixP->fx_r_type)
22712 case BFD_RELOC_ARM_PCREL_BLX:
22713 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22714 if (ARM_IS_FUNC (fixP->fx_addsy))
22715 return 1;
22716 break;
22718 case BFD_RELOC_ARM_PCREL_CALL:
22719 case BFD_RELOC_THUMB_PCREL_BLX:
22720 if (THUMB_IS_FUNC (fixP->fx_addsy))
22721 return 1;
22722 break;
22724 default:
22725 break;
22729 return 0;
22731 #endif /* OBJ_ELF */