Add support for ARM half-precision conversion instructions.
[binutils.git] / gas / config / tc-arm.c
blob37f81128578e0b5cd78ed1e05c21f931c4828478
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
36 #include "opcode/arm.h"
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
43 #include "dwarf2dbg.h"
45 #define WARN_DEPRECATED 1
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
53 static struct
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
87 #endif /* OBJ_ELF */
89 /* Results from operand parsing worker functions. */
91 typedef enum
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
98 enum arm_float_abi
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
134 #define streq(a, b) (strcmp (a, b) == 0)
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146 static int fix_v4bx = FALSE;
148 /* Variables that we set while parsing command-line options. Once all
149 options have been read we re-process these values to set the real
150 assembly flags. */
151 static const arm_feature_set *legacy_cpu = NULL;
152 static const arm_feature_set *legacy_fpu = NULL;
154 static const arm_feature_set *mcpu_cpu_opt = NULL;
155 static const arm_feature_set *mcpu_fpu_opt = NULL;
156 static const arm_feature_set *march_cpu_opt = NULL;
157 static const arm_feature_set *march_fpu_opt = NULL;
158 static const arm_feature_set *mfpu_opt = NULL;
159 static const arm_feature_set *object_arch = NULL;
161 /* Constants for known architecture features. */
162 static const arm_feature_set fpu_default = FPU_DEFAULT;
163 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
164 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
165 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
166 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
167 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
168 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
169 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
170 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
172 #ifdef CPU_DEFAULT
173 static const arm_feature_set cpu_default = CPU_DEFAULT;
174 #endif
176 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
178 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
179 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
180 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
181 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
182 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
183 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
184 static const arm_feature_set arm_ext_v4t_5 =
185 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
186 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
187 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
188 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
189 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
190 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
191 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
192 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
193 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
194 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
195 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
196 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
201 static const arm_feature_set arm_ext_m =
202 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
204 static const arm_feature_set arm_arch_any = ARM_ANY;
205 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
206 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
207 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
209 static const arm_feature_set arm_cext_iwmmxt2 =
210 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
211 static const arm_feature_set arm_cext_iwmmxt =
212 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
213 static const arm_feature_set arm_cext_xscale =
214 ARM_FEATURE (0, ARM_CEXT_XSCALE);
215 static const arm_feature_set arm_cext_maverick =
216 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
217 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
218 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v1xd =
220 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
221 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
222 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
224 static const arm_feature_set fpu_vfp_ext_d32 =
225 ARM_FEATURE (0, FPU_VFP_EXT_D32);
226 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
227 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
228 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_neon_fp16 = ARM_FEATURE (0, FPU_NEON_FP16);
231 static int mfloat_abi_opt = -1;
232 /* Record user cpu selection for object attributes. */
233 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
234 /* Must be long enough to hold any of the names in arm_cpus. */
235 static char selected_cpu_name[16];
236 #ifdef OBJ_ELF
237 # ifdef EABI_DEFAULT
238 static int meabi_flags = EABI_DEFAULT;
239 # else
240 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
241 # endif
243 bfd_boolean
244 arm_is_eabi (void)
246 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
248 #endif
250 #ifdef OBJ_ELF
251 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
252 symbolS * GOT_symbol;
253 #endif
255 /* 0: assemble for ARM,
256 1: assemble for Thumb,
257 2: assemble for Thumb even though target CPU does not support thumb
258 instructions. */
259 static int thumb_mode = 0;
261 /* If unified_syntax is true, we are processing the new unified
262 ARM/Thumb syntax. Important differences from the old ARM mode:
264 - Immediate operands do not require a # prefix.
265 - Conditional affixes always appear at the end of the
266 instruction. (For backward compatibility, those instructions
267 that formerly had them in the middle, continue to accept them
268 there.)
269 - The IT instruction may appear, and if it does is validated
270 against subsequent conditional affixes. It does not generate
271 machine code.
273 Important differences from the old Thumb mode:
275 - Immediate operands do not require a # prefix.
276 - Most of the V6T2 instructions are only available in unified mode.
277 - The .N and .W suffixes are recognized and honored (it is an error
278 if they cannot be honored).
279 - All instructions set the flags if and only if they have an 's' affix.
280 - Conditional affixes may be used. They are validated against
281 preceding IT instructions. Unlike ARM mode, you cannot use a
282 conditional affix except in the scope of an IT instruction. */
284 static bfd_boolean unified_syntax = FALSE;
286 enum neon_el_type
288 NT_invtype,
289 NT_untyped,
290 NT_integer,
291 NT_float,
292 NT_poly,
293 NT_signed,
294 NT_unsigned
297 struct neon_type_el
299 enum neon_el_type type;
300 unsigned size;
303 #define NEON_MAX_TYPE_ELS 4
305 struct neon_type
307 struct neon_type_el el[NEON_MAX_TYPE_ELS];
308 unsigned elems;
311 struct arm_it
313 const char * error;
314 unsigned long instruction;
315 int size;
316 int size_req;
317 int cond;
318 /* "uncond_value" is set to the value in place of the conditional field in
319 unconditional versions of the instruction, or -1 if nothing is
320 appropriate. */
321 int uncond_value;
322 struct neon_type vectype;
323 /* Set to the opcode if the instruction needs relaxation.
324 Zero if the instruction is not relaxed. */
325 unsigned long relax;
326 struct
328 bfd_reloc_code_real_type type;
329 expressionS exp;
330 int pc_rel;
331 } reloc;
333 struct
335 unsigned reg;
336 signed int imm;
337 struct neon_type_el vectype;
338 unsigned present : 1; /* Operand present. */
339 unsigned isreg : 1; /* Operand was a register. */
340 unsigned immisreg : 1; /* .imm field is a second register. */
341 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
342 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
343 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
344 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
345 instructions. This allows us to disambiguate ARM <-> vector insns. */
346 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
347 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
348 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
349 unsigned issingle : 1; /* Operand is VFP single-precision register. */
350 unsigned hasreloc : 1; /* Operand has relocation suffix. */
351 unsigned writeback : 1; /* Operand has trailing ! */
352 unsigned preind : 1; /* Preindexed address. */
353 unsigned postind : 1; /* Postindexed address. */
354 unsigned negative : 1; /* Index register was negated. */
355 unsigned shifted : 1; /* Shift applied to operation. */
356 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
357 } operands[6];
360 static struct arm_it inst;
362 #define NUM_FLOAT_VALS 8
364 const char * fp_const[] =
366 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
369 /* Number of littlenums required to hold an extended precision number. */
370 #define MAX_LITTLENUMS 6
372 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
374 #define FAIL (-1)
375 #define SUCCESS (0)
377 #define SUFF_S 1
378 #define SUFF_D 2
379 #define SUFF_E 3
380 #define SUFF_P 4
382 #define CP_T_X 0x00008000
383 #define CP_T_Y 0x00400000
385 #define CONDS_BIT 0x00100000
386 #define LOAD_BIT 0x00100000
388 #define DOUBLE_LOAD_FLAG 0x00000001
390 struct asm_cond
392 const char * template;
393 unsigned long value;
396 #define COND_ALWAYS 0xE
398 struct asm_psr
400 const char *template;
401 unsigned long field;
404 struct asm_barrier_opt
406 const char *template;
407 unsigned long value;
410 /* The bit that distinguishes CPSR and SPSR. */
411 #define SPSR_BIT (1 << 22)
413 /* The individual PSR flag bits. */
414 #define PSR_c (1 << 16)
415 #define PSR_x (1 << 17)
416 #define PSR_s (1 << 18)
417 #define PSR_f (1 << 19)
419 struct reloc_entry
421 char *name;
422 bfd_reloc_code_real_type reloc;
425 enum vfp_reg_pos
427 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
428 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
431 enum vfp_ldstm_type
433 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
436 /* Bits for DEFINED field in neon_typed_alias. */
437 #define NTA_HASTYPE 1
438 #define NTA_HASINDEX 2
440 struct neon_typed_alias
442 unsigned char defined;
443 unsigned char index;
444 struct neon_type_el eltype;
447 /* ARM register categories. This includes coprocessor numbers and various
448 architecture extensions' registers. */
449 enum arm_reg_type
451 REG_TYPE_RN,
452 REG_TYPE_CP,
453 REG_TYPE_CN,
454 REG_TYPE_FN,
455 REG_TYPE_VFS,
456 REG_TYPE_VFD,
457 REG_TYPE_NQ,
458 REG_TYPE_VFSD,
459 REG_TYPE_NDQ,
460 REG_TYPE_NSDQ,
461 REG_TYPE_VFC,
462 REG_TYPE_MVF,
463 REG_TYPE_MVD,
464 REG_TYPE_MVFX,
465 REG_TYPE_MVDX,
466 REG_TYPE_MVAX,
467 REG_TYPE_DSPSC,
468 REG_TYPE_MMXWR,
469 REG_TYPE_MMXWC,
470 REG_TYPE_MMXWCG,
471 REG_TYPE_XSCALE,
474 /* Structure for a hash table entry for a register.
475 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
476 information which states whether a vector type or index is specified (for a
477 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
478 struct reg_entry
480 const char *name;
481 unsigned char number;
482 unsigned char type;
483 unsigned char builtin;
484 struct neon_typed_alias *neon;
487 /* Diagnostics used when we don't get a register of the expected type. */
488 const char *const reg_expected_msgs[] =
490 N_("ARM register expected"),
491 N_("bad or missing co-processor number"),
492 N_("co-processor register expected"),
493 N_("FPA register expected"),
494 N_("VFP single precision register expected"),
495 N_("VFP/Neon double precision register expected"),
496 N_("Neon quad precision register expected"),
497 N_("VFP single or double precision register expected"),
498 N_("Neon double or quad precision register expected"),
499 N_("VFP single, double or Neon quad precision register expected"),
500 N_("VFP system register expected"),
501 N_("Maverick MVF register expected"),
502 N_("Maverick MVD register expected"),
503 N_("Maverick MVFX register expected"),
504 N_("Maverick MVDX register expected"),
505 N_("Maverick MVAX register expected"),
506 N_("Maverick DSPSC register expected"),
507 N_("iWMMXt data register expected"),
508 N_("iWMMXt control register expected"),
509 N_("iWMMXt scalar register expected"),
510 N_("XScale accumulator register expected"),
513 /* Some well known registers that we refer to directly elsewhere. */
514 #define REG_SP 13
515 #define REG_LR 14
516 #define REG_PC 15
518 /* ARM instructions take 4bytes in the object file, Thumb instructions
519 take 2: */
520 #define INSN_SIZE 4
522 struct asm_opcode
524 /* Basic string to match. */
525 const char *template;
527 /* Parameters to instruction. */
528 unsigned char operands[8];
530 /* Conditional tag - see opcode_lookup. */
531 unsigned int tag : 4;
533 /* Basic instruction code. */
534 unsigned int avalue : 28;
536 /* Thumb-format instruction code. */
537 unsigned int tvalue;
539 /* Which architecture variant provides this instruction. */
540 const arm_feature_set *avariant;
541 const arm_feature_set *tvariant;
543 /* Function to call to encode instruction in ARM format. */
544 void (* aencode) (void);
546 /* Function to call to encode instruction in Thumb format. */
547 void (* tencode) (void);
550 /* Defines for various bits that we will want to toggle. */
551 #define INST_IMMEDIATE 0x02000000
552 #define OFFSET_REG 0x02000000
553 #define HWOFFSET_IMM 0x00400000
554 #define SHIFT_BY_REG 0x00000010
555 #define PRE_INDEX 0x01000000
556 #define INDEX_UP 0x00800000
557 #define WRITE_BACK 0x00200000
558 #define LDM_TYPE_2_OR_3 0x00400000
559 #define CPSI_MMOD 0x00020000
561 #define LITERAL_MASK 0xf000f000
562 #define OPCODE_MASK 0xfe1fffff
563 #define V4_STR_BIT 0x00000020
565 #define T2_SUBS_PC_LR 0xf3de8f00
567 #define DATA_OP_SHIFT 21
569 #define T2_OPCODE_MASK 0xfe1fffff
570 #define T2_DATA_OP_SHIFT 21
572 /* Codes to distinguish the arithmetic instructions. */
573 #define OPCODE_AND 0
574 #define OPCODE_EOR 1
575 #define OPCODE_SUB 2
576 #define OPCODE_RSB 3
577 #define OPCODE_ADD 4
578 #define OPCODE_ADC 5
579 #define OPCODE_SBC 6
580 #define OPCODE_RSC 7
581 #define OPCODE_TST 8
582 #define OPCODE_TEQ 9
583 #define OPCODE_CMP 10
584 #define OPCODE_CMN 11
585 #define OPCODE_ORR 12
586 #define OPCODE_MOV 13
587 #define OPCODE_BIC 14
588 #define OPCODE_MVN 15
590 #define T2_OPCODE_AND 0
591 #define T2_OPCODE_BIC 1
592 #define T2_OPCODE_ORR 2
593 #define T2_OPCODE_ORN 3
594 #define T2_OPCODE_EOR 4
595 #define T2_OPCODE_ADD 8
596 #define T2_OPCODE_ADC 10
597 #define T2_OPCODE_SBC 11
598 #define T2_OPCODE_SUB 13
599 #define T2_OPCODE_RSB 14
601 #define T_OPCODE_MUL 0x4340
602 #define T_OPCODE_TST 0x4200
603 #define T_OPCODE_CMN 0x42c0
604 #define T_OPCODE_NEG 0x4240
605 #define T_OPCODE_MVN 0x43c0
607 #define T_OPCODE_ADD_R3 0x1800
608 #define T_OPCODE_SUB_R3 0x1a00
609 #define T_OPCODE_ADD_HI 0x4400
610 #define T_OPCODE_ADD_ST 0xb000
611 #define T_OPCODE_SUB_ST 0xb080
612 #define T_OPCODE_ADD_SP 0xa800
613 #define T_OPCODE_ADD_PC 0xa000
614 #define T_OPCODE_ADD_I8 0x3000
615 #define T_OPCODE_SUB_I8 0x3800
616 #define T_OPCODE_ADD_I3 0x1c00
617 #define T_OPCODE_SUB_I3 0x1e00
619 #define T_OPCODE_ASR_R 0x4100
620 #define T_OPCODE_LSL_R 0x4080
621 #define T_OPCODE_LSR_R 0x40c0
622 #define T_OPCODE_ROR_R 0x41c0
623 #define T_OPCODE_ASR_I 0x1000
624 #define T_OPCODE_LSL_I 0x0000
625 #define T_OPCODE_LSR_I 0x0800
627 #define T_OPCODE_MOV_I8 0x2000
628 #define T_OPCODE_CMP_I8 0x2800
629 #define T_OPCODE_CMP_LR 0x4280
630 #define T_OPCODE_MOV_HR 0x4600
631 #define T_OPCODE_CMP_HR 0x4500
633 #define T_OPCODE_LDR_PC 0x4800
634 #define T_OPCODE_LDR_SP 0x9800
635 #define T_OPCODE_STR_SP 0x9000
636 #define T_OPCODE_LDR_IW 0x6800
637 #define T_OPCODE_STR_IW 0x6000
638 #define T_OPCODE_LDR_IH 0x8800
639 #define T_OPCODE_STR_IH 0x8000
640 #define T_OPCODE_LDR_IB 0x7800
641 #define T_OPCODE_STR_IB 0x7000
642 #define T_OPCODE_LDR_RW 0x5800
643 #define T_OPCODE_STR_RW 0x5000
644 #define T_OPCODE_LDR_RH 0x5a00
645 #define T_OPCODE_STR_RH 0x5200
646 #define T_OPCODE_LDR_RB 0x5c00
647 #define T_OPCODE_STR_RB 0x5400
649 #define T_OPCODE_PUSH 0xb400
650 #define T_OPCODE_POP 0xbc00
652 #define T_OPCODE_BRANCH 0xe000
654 #define THUMB_SIZE 2 /* Size of thumb instruction. */
655 #define THUMB_PP_PC_LR 0x0100
656 #define THUMB_LOAD_BIT 0x0800
657 #define THUMB2_LOAD_BIT 0x00100000
659 #define BAD_ARGS _("bad arguments to instruction")
660 #define BAD_PC _("r15 not allowed here")
661 #define BAD_COND _("instruction cannot be conditional")
662 #define BAD_OVERLAP _("registers may not be the same")
663 #define BAD_HIREG _("lo register required")
664 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
665 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
666 #define BAD_BRANCH _("branch must be last instruction in IT block")
667 #define BAD_NOT_IT _("instruction not allowed in IT block")
668 #define BAD_FPU _("selected FPU does not support instruction")
670 static struct hash_control *arm_ops_hsh;
671 static struct hash_control *arm_cond_hsh;
672 static struct hash_control *arm_shift_hsh;
673 static struct hash_control *arm_psr_hsh;
674 static struct hash_control *arm_v7m_psr_hsh;
675 static struct hash_control *arm_reg_hsh;
676 static struct hash_control *arm_reloc_hsh;
677 static struct hash_control *arm_barrier_opt_hsh;
679 /* Stuff needed to resolve the label ambiguity
682 label: <insn>
683 may differ from:
685 label:
686 <insn> */
688 symbolS * last_label_seen;
689 static int label_is_thumb_function_name = FALSE;
691 /* Literal pool structure. Held on a per-section
692 and per-sub-section basis. */
694 #define MAX_LITERAL_POOL_SIZE 1024
695 typedef struct literal_pool
697 expressionS literals [MAX_LITERAL_POOL_SIZE];
698 unsigned int next_free_entry;
699 unsigned int id;
700 symbolS * symbol;
701 segT section;
702 subsegT sub_section;
703 struct literal_pool * next;
704 } literal_pool;
706 /* Pointer to a linked list of literal pools. */
707 literal_pool * list_of_pools = NULL;
709 /* State variables for IT block handling. */
710 static bfd_boolean current_it_mask = 0;
711 static int current_cc;
713 /* Pure syntax. */
715 /* This array holds the chars that always start a comment. If the
716 pre-processor is disabled, these aren't very useful. */
717 const char comment_chars[] = "@";
719 /* This array holds the chars that only start a comment at the beginning of
720 a line. If the line seems to have the form '# 123 filename'
721 .line and .file directives will appear in the pre-processed output. */
722 /* Note that input_file.c hand checks for '#' at the beginning of the
723 first line of the input file. This is because the compiler outputs
724 #NO_APP at the beginning of its output. */
725 /* Also note that comments like this one will always work. */
726 const char line_comment_chars[] = "#";
728 const char line_separator_chars[] = ";";
730 /* Chars that can be used to separate mant
731 from exp in floating point numbers. */
732 const char EXP_CHARS[] = "eE";
734 /* Chars that mean this number is a floating point constant. */
735 /* As in 0f12.456 */
736 /* or 0d1.2345e12 */
738 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
740 /* Prefix characters that indicate the start of an immediate
741 value. */
742 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
744 /* Separator character handling. */
746 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
748 static inline int
749 skip_past_char (char ** str, char c)
751 if (**str == c)
753 (*str)++;
754 return SUCCESS;
756 else
757 return FAIL;
759 #define skip_past_comma(str) skip_past_char (str, ',')
761 /* Arithmetic expressions (possibly involving symbols). */
763 /* Return TRUE if anything in the expression is a bignum. */
765 static int
766 walk_no_bignums (symbolS * sp)
768 if (symbol_get_value_expression (sp)->X_op == O_big)
769 return 1;
771 if (symbol_get_value_expression (sp)->X_add_symbol)
773 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
774 || (symbol_get_value_expression (sp)->X_op_symbol
775 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
778 return 0;
781 static int in_my_get_expression = 0;
783 /* Third argument to my_get_expression. */
784 #define GE_NO_PREFIX 0
785 #define GE_IMM_PREFIX 1
786 #define GE_OPT_PREFIX 2
787 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
788 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
789 #define GE_OPT_PREFIX_BIG 3
791 static int
792 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
794 char * save_in;
795 segT seg;
797 /* In unified syntax, all prefixes are optional. */
798 if (unified_syntax)
799 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
800 : GE_OPT_PREFIX;
802 switch (prefix_mode)
804 case GE_NO_PREFIX: break;
805 case GE_IMM_PREFIX:
806 if (!is_immediate_prefix (**str))
808 inst.error = _("immediate expression requires a # prefix");
809 return FAIL;
811 (*str)++;
812 break;
813 case GE_OPT_PREFIX:
814 case GE_OPT_PREFIX_BIG:
815 if (is_immediate_prefix (**str))
816 (*str)++;
817 break;
818 default: abort ();
821 memset (ep, 0, sizeof (expressionS));
823 save_in = input_line_pointer;
824 input_line_pointer = *str;
825 in_my_get_expression = 1;
826 seg = expression (ep);
827 in_my_get_expression = 0;
829 if (ep->X_op == O_illegal)
831 /* We found a bad expression in md_operand(). */
832 *str = input_line_pointer;
833 input_line_pointer = save_in;
834 if (inst.error == NULL)
835 inst.error = _("bad expression");
836 return 1;
839 #ifdef OBJ_AOUT
840 if (seg != absolute_section
841 && seg != text_section
842 && seg != data_section
843 && seg != bss_section
844 && seg != undefined_section)
846 inst.error = _("bad segment");
847 *str = input_line_pointer;
848 input_line_pointer = save_in;
849 return 1;
851 #endif
853 /* Get rid of any bignums now, so that we don't generate an error for which
854 we can't establish a line number later on. Big numbers are never valid
855 in instructions, which is where this routine is always called. */
856 if (prefix_mode != GE_OPT_PREFIX_BIG
857 && (ep->X_op == O_big
858 || (ep->X_add_symbol
859 && (walk_no_bignums (ep->X_add_symbol)
860 || (ep->X_op_symbol
861 && walk_no_bignums (ep->X_op_symbol))))))
863 inst.error = _("invalid constant");
864 *str = input_line_pointer;
865 input_line_pointer = save_in;
866 return 1;
869 *str = input_line_pointer;
870 input_line_pointer = save_in;
871 return 0;
874 /* Turn a string in input_line_pointer into a floating point constant
875 of type TYPE, and store the appropriate bytes in *LITP. The number
876 of LITTLENUMS emitted is stored in *SIZEP. An error message is
877 returned, or NULL on OK.
879 Note that fp constants aren't represent in the normal way on the ARM.
880 In big endian mode, things are as expected. However, in little endian
881 mode fp constants are big-endian word-wise, and little-endian byte-wise
882 within the words. For example, (double) 1.1 in big endian mode is
883 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
884 the byte sequence 99 99 f1 3f 9a 99 99 99.
886 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
888 char *
889 md_atof (int type, char * litP, int * sizeP)
891 int prec;
892 LITTLENUM_TYPE words[MAX_LITTLENUMS];
893 char *t;
894 int i;
896 switch (type)
898 case 'f':
899 case 'F':
900 case 's':
901 case 'S':
902 prec = 2;
903 break;
905 case 'd':
906 case 'D':
907 case 'r':
908 case 'R':
909 prec = 4;
910 break;
912 case 'x':
913 case 'X':
914 prec = 5;
915 break;
917 case 'p':
918 case 'P':
919 prec = 5;
920 break;
922 default:
923 *sizeP = 0;
924 return _("Unrecognized or unsupported floating point constant");
927 t = atof_ieee (input_line_pointer, type, words);
928 if (t)
929 input_line_pointer = t;
930 *sizeP = prec * sizeof (LITTLENUM_TYPE);
932 if (target_big_endian)
934 for (i = 0; i < prec; i++)
936 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
937 litP += sizeof (LITTLENUM_TYPE);
940 else
942 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
943 for (i = prec - 1; i >= 0; i--)
945 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
946 litP += sizeof (LITTLENUM_TYPE);
948 else
949 /* For a 4 byte float the order of elements in `words' is 1 0.
950 For an 8 byte float the order is 1 0 3 2. */
951 for (i = 0; i < prec; i += 2)
953 md_number_to_chars (litP, (valueT) words[i + 1],
954 sizeof (LITTLENUM_TYPE));
955 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
956 (valueT) words[i], sizeof (LITTLENUM_TYPE));
957 litP += 2 * sizeof (LITTLENUM_TYPE);
961 return NULL;
964 /* We handle all bad expressions here, so that we can report the faulty
965 instruction in the error message. */
966 void
967 md_operand (expressionS * expr)
969 if (in_my_get_expression)
970 expr->X_op = O_illegal;
973 /* Immediate values. */
975 /* Generic immediate-value read function for use in directives.
976 Accepts anything that 'expression' can fold to a constant.
977 *val receives the number. */
978 #ifdef OBJ_ELF
979 static int
980 immediate_for_directive (int *val)
982 expressionS exp;
983 exp.X_op = O_illegal;
985 if (is_immediate_prefix (*input_line_pointer))
987 input_line_pointer++;
988 expression (&exp);
991 if (exp.X_op != O_constant)
993 as_bad (_("expected #constant"));
994 ignore_rest_of_line ();
995 return FAIL;
997 *val = exp.X_add_number;
998 return SUCCESS;
1000 #endif
1002 /* Register parsing. */
1004 /* Generic register parser. CCP points to what should be the
1005 beginning of a register name. If it is indeed a valid register
1006 name, advance CCP over it and return the reg_entry structure;
1007 otherwise return NULL. Does not issue diagnostics. */
1009 static struct reg_entry *
1010 arm_reg_parse_multi (char **ccp)
1012 char *start = *ccp;
1013 char *p;
1014 struct reg_entry *reg;
1016 #ifdef REGISTER_PREFIX
1017 if (*start != REGISTER_PREFIX)
1018 return NULL;
1019 start++;
1020 #endif
1021 #ifdef OPTIONAL_REGISTER_PREFIX
1022 if (*start == OPTIONAL_REGISTER_PREFIX)
1023 start++;
1024 #endif
1026 p = start;
1027 if (!ISALPHA (*p) || !is_name_beginner (*p))
1028 return NULL;
1031 p++;
1032 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1034 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1036 if (!reg)
1037 return NULL;
1039 *ccp = p;
1040 return reg;
1043 static int
1044 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1045 enum arm_reg_type type)
1047 /* Alternative syntaxes are accepted for a few register classes. */
1048 switch (type)
1050 case REG_TYPE_MVF:
1051 case REG_TYPE_MVD:
1052 case REG_TYPE_MVFX:
1053 case REG_TYPE_MVDX:
1054 /* Generic coprocessor register names are allowed for these. */
1055 if (reg && reg->type == REG_TYPE_CN)
1056 return reg->number;
1057 break;
1059 case REG_TYPE_CP:
1060 /* For backward compatibility, a bare number is valid here. */
1062 unsigned long processor = strtoul (start, ccp, 10);
1063 if (*ccp != start && processor <= 15)
1064 return processor;
1067 case REG_TYPE_MMXWC:
1068 /* WC includes WCG. ??? I'm not sure this is true for all
1069 instructions that take WC registers. */
1070 if (reg && reg->type == REG_TYPE_MMXWCG)
1071 return reg->number;
1072 break;
1074 default:
1075 break;
1078 return FAIL;
1081 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1082 return value is the register number or FAIL. */
1084 static int
1085 arm_reg_parse (char **ccp, enum arm_reg_type type)
1087 char *start = *ccp;
1088 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1089 int ret;
1091 /* Do not allow a scalar (reg+index) to parse as a register. */
1092 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1093 return FAIL;
1095 if (reg && reg->type == type)
1096 return reg->number;
1098 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1099 return ret;
1101 *ccp = start;
1102 return FAIL;
1105 /* Parse a Neon type specifier. *STR should point at the leading '.'
1106 character. Does no verification at this stage that the type fits the opcode
1107 properly. E.g.,
1109 .i32.i32.s16
1110 .s32.f32
1111 .u16
1113 Can all be legally parsed by this function.
1115 Fills in neon_type struct pointer with parsed information, and updates STR
1116 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1117 type, FAIL if not. */
1119 static int
1120 parse_neon_type (struct neon_type *type, char **str)
1122 char *ptr = *str;
1124 if (type)
1125 type->elems = 0;
1127 while (type->elems < NEON_MAX_TYPE_ELS)
1129 enum neon_el_type thistype = NT_untyped;
1130 unsigned thissize = -1u;
1132 if (*ptr != '.')
1133 break;
1135 ptr++;
1137 /* Just a size without an explicit type. */
1138 if (ISDIGIT (*ptr))
1139 goto parsesize;
1141 switch (TOLOWER (*ptr))
1143 case 'i': thistype = NT_integer; break;
1144 case 'f': thistype = NT_float; break;
1145 case 'p': thistype = NT_poly; break;
1146 case 's': thistype = NT_signed; break;
1147 case 'u': thistype = NT_unsigned; break;
1148 case 'd':
1149 thistype = NT_float;
1150 thissize = 64;
1151 ptr++;
1152 goto done;
1153 default:
1154 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1155 return FAIL;
1158 ptr++;
1160 /* .f is an abbreviation for .f32. */
1161 if (thistype == NT_float && !ISDIGIT (*ptr))
1162 thissize = 32;
1163 else
1165 parsesize:
1166 thissize = strtoul (ptr, &ptr, 10);
1168 if (thissize != 8 && thissize != 16 && thissize != 32
1169 && thissize != 64)
1171 as_bad (_("bad size %d in type specifier"), thissize);
1172 return FAIL;
1176 done:
1177 if (type)
1179 type->el[type->elems].type = thistype;
1180 type->el[type->elems].size = thissize;
1181 type->elems++;
1185 /* Empty/missing type is not a successful parse. */
1186 if (type->elems == 0)
1187 return FAIL;
1189 *str = ptr;
1191 return SUCCESS;
1194 /* Errors may be set multiple times during parsing or bit encoding
1195 (particularly in the Neon bits), but usually the earliest error which is set
1196 will be the most meaningful. Avoid overwriting it with later (cascading)
1197 errors by calling this function. */
1199 static void
1200 first_error (const char *err)
1202 if (!inst.error)
1203 inst.error = err;
1206 /* Parse a single type, e.g. ".s32", leading period included. */
1207 static int
1208 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1210 char *str = *ccp;
1211 struct neon_type optype;
1213 if (*str == '.')
1215 if (parse_neon_type (&optype, &str) == SUCCESS)
1217 if (optype.elems == 1)
1218 *vectype = optype.el[0];
1219 else
1221 first_error (_("only one type should be specified for operand"));
1222 return FAIL;
1225 else
1227 first_error (_("vector type expected"));
1228 return FAIL;
1231 else
1232 return FAIL;
1234 *ccp = str;
1236 return SUCCESS;
1239 /* Special meanings for indices (which have a range of 0-7), which will fit into
1240 a 4-bit integer. */
1242 #define NEON_ALL_LANES 15
1243 #define NEON_INTERLEAVE_LANES 14
1245 /* Parse either a register or a scalar, with an optional type. Return the
1246 register number, and optionally fill in the actual type of the register
1247 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1248 type/index information in *TYPEINFO. */
1250 static int
1251 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1252 enum arm_reg_type *rtype,
1253 struct neon_typed_alias *typeinfo)
1255 char *str = *ccp;
1256 struct reg_entry *reg = arm_reg_parse_multi (&str);
1257 struct neon_typed_alias atype;
1258 struct neon_type_el parsetype;
1260 atype.defined = 0;
1261 atype.index = -1;
1262 atype.eltype.type = NT_invtype;
1263 atype.eltype.size = -1;
1265 /* Try alternate syntax for some types of register. Note these are mutually
1266 exclusive with the Neon syntax extensions. */
1267 if (reg == NULL)
1269 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1270 if (altreg != FAIL)
1271 *ccp = str;
1272 if (typeinfo)
1273 *typeinfo = atype;
1274 return altreg;
1277 /* Undo polymorphism when a set of register types may be accepted. */
1278 if ((type == REG_TYPE_NDQ
1279 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1280 || (type == REG_TYPE_VFSD
1281 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1282 || (type == REG_TYPE_NSDQ
1283 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1284 || reg->type == REG_TYPE_NQ))
1285 || (type == REG_TYPE_MMXWC
1286 && (reg->type == REG_TYPE_MMXWCG)))
1287 type = reg->type;
1289 if (type != reg->type)
1290 return FAIL;
1292 if (reg->neon)
1293 atype = *reg->neon;
1295 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1297 if ((atype.defined & NTA_HASTYPE) != 0)
1299 first_error (_("can't redefine type for operand"));
1300 return FAIL;
1302 atype.defined |= NTA_HASTYPE;
1303 atype.eltype = parsetype;
1306 if (skip_past_char (&str, '[') == SUCCESS)
1308 if (type != REG_TYPE_VFD)
1310 first_error (_("only D registers may be indexed"));
1311 return FAIL;
1314 if ((atype.defined & NTA_HASINDEX) != 0)
1316 first_error (_("can't change index for operand"));
1317 return FAIL;
1320 atype.defined |= NTA_HASINDEX;
1322 if (skip_past_char (&str, ']') == SUCCESS)
1323 atype.index = NEON_ALL_LANES;
1324 else
1326 expressionS exp;
1328 my_get_expression (&exp, &str, GE_NO_PREFIX);
1330 if (exp.X_op != O_constant)
1332 first_error (_("constant expression required"));
1333 return FAIL;
1336 if (skip_past_char (&str, ']') == FAIL)
1337 return FAIL;
1339 atype.index = exp.X_add_number;
1343 if (typeinfo)
1344 *typeinfo = atype;
1346 if (rtype)
1347 *rtype = type;
1349 *ccp = str;
1351 return reg->number;
1354 /* Like arm_reg_parse, but allow allow the following extra features:
1355 - If RTYPE is non-zero, return the (possibly restricted) type of the
1356 register (e.g. Neon double or quad reg when either has been requested).
1357 - If this is a Neon vector type with additional type information, fill
1358 in the struct pointed to by VECTYPE (if non-NULL).
1359 This function will fault on encountering a scalar. */
1361 static int
1362 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1363 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1365 struct neon_typed_alias atype;
1366 char *str = *ccp;
1367 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1369 if (reg == FAIL)
1370 return FAIL;
1372 /* Do not allow a scalar (reg+index) to parse as a register. */
1373 if ((atype.defined & NTA_HASINDEX) != 0)
1375 first_error (_("register operand expected, but got scalar"));
1376 return FAIL;
1379 if (vectype)
1380 *vectype = atype.eltype;
1382 *ccp = str;
1384 return reg;
1387 #define NEON_SCALAR_REG(X) ((X) >> 4)
1388 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1390 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1391 have enough information to be able to do a good job bounds-checking. So, we
1392 just do easy checks here, and do further checks later. */
1394 static int
1395 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1397 int reg;
1398 char *str = *ccp;
1399 struct neon_typed_alias atype;
1401 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1403 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1404 return FAIL;
1406 if (atype.index == NEON_ALL_LANES)
1408 first_error (_("scalar must have an index"));
1409 return FAIL;
1411 else if (atype.index >= 64 / elsize)
1413 first_error (_("scalar index out of range"));
1414 return FAIL;
1417 if (type)
1418 *type = atype.eltype;
1420 *ccp = str;
1422 return reg * 16 + atype.index;
1425 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1426 static long
1427 parse_reg_list (char ** strp)
1429 char * str = * strp;
1430 long range = 0;
1431 int another_range;
1433 /* We come back here if we get ranges concatenated by '+' or '|'. */
1436 another_range = 0;
1438 if (*str == '{')
1440 int in_range = 0;
1441 int cur_reg = -1;
1443 str++;
1446 int reg;
1448 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1450 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1451 return FAIL;
1454 if (in_range)
1456 int i;
1458 if (reg <= cur_reg)
1460 first_error (_("bad range in register list"));
1461 return FAIL;
1464 for (i = cur_reg + 1; i < reg; i++)
1466 if (range & (1 << i))
1467 as_tsktsk
1468 (_("Warning: duplicated register (r%d) in register list"),
1470 else
1471 range |= 1 << i;
1473 in_range = 0;
1476 if (range & (1 << reg))
1477 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1478 reg);
1479 else if (reg <= cur_reg)
1480 as_tsktsk (_("Warning: register range not in ascending order"));
1482 range |= 1 << reg;
1483 cur_reg = reg;
1485 while (skip_past_comma (&str) != FAIL
1486 || (in_range = 1, *str++ == '-'));
1487 str--;
1489 if (*str++ != '}')
1491 first_error (_("missing `}'"));
1492 return FAIL;
1495 else
1497 expressionS expr;
1499 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1500 return FAIL;
1502 if (expr.X_op == O_constant)
1504 if (expr.X_add_number
1505 != (expr.X_add_number & 0x0000ffff))
1507 inst.error = _("invalid register mask");
1508 return FAIL;
1511 if ((range & expr.X_add_number) != 0)
1513 int regno = range & expr.X_add_number;
1515 regno &= -regno;
1516 regno = (1 << regno) - 1;
1517 as_tsktsk
1518 (_("Warning: duplicated register (r%d) in register list"),
1519 regno);
1522 range |= expr.X_add_number;
1524 else
1526 if (inst.reloc.type != 0)
1528 inst.error = _("expression too complex");
1529 return FAIL;
1532 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1533 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1534 inst.reloc.pc_rel = 0;
1538 if (*str == '|' || *str == '+')
1540 str++;
1541 another_range = 1;
1544 while (another_range);
1546 *strp = str;
1547 return range;
1550 /* Types of registers in a list. */
1552 enum reg_list_els
1554 REGLIST_VFP_S,
1555 REGLIST_VFP_D,
1556 REGLIST_NEON_D
1559 /* Parse a VFP register list. If the string is invalid return FAIL.
1560 Otherwise return the number of registers, and set PBASE to the first
1561 register. Parses registers of type ETYPE.
1562 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1563 - Q registers can be used to specify pairs of D registers
1564 - { } can be omitted from around a singleton register list
1565 FIXME: This is not implemented, as it would require backtracking in
1566 some cases, e.g.:
1567 vtbl.8 d3,d4,d5
1568 This could be done (the meaning isn't really ambiguous), but doesn't
1569 fit in well with the current parsing framework.
1570 - 32 D registers may be used (also true for VFPv3).
1571 FIXME: Types are ignored in these register lists, which is probably a
1572 bug. */
1574 static int
1575 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1577 char *str = *ccp;
1578 int base_reg;
1579 int new_base;
1580 enum arm_reg_type regtype = 0;
1581 int max_regs = 0;
1582 int count = 0;
1583 int warned = 0;
1584 unsigned long mask = 0;
1585 int i;
1587 if (*str != '{')
1589 inst.error = _("expecting {");
1590 return FAIL;
1593 str++;
1595 switch (etype)
1597 case REGLIST_VFP_S:
1598 regtype = REG_TYPE_VFS;
1599 max_regs = 32;
1600 break;
1602 case REGLIST_VFP_D:
1603 regtype = REG_TYPE_VFD;
1604 break;
1606 case REGLIST_NEON_D:
1607 regtype = REG_TYPE_NDQ;
1608 break;
1611 if (etype != REGLIST_VFP_S)
1613 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1614 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1616 max_regs = 32;
1617 if (thumb_mode)
1618 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1619 fpu_vfp_ext_d32);
1620 else
1621 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1622 fpu_vfp_ext_d32);
1624 else
1625 max_regs = 16;
1628 base_reg = max_regs;
1632 int setmask = 1, addregs = 1;
1634 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1636 if (new_base == FAIL)
1638 first_error (_(reg_expected_msgs[regtype]));
1639 return FAIL;
1642 if (new_base >= max_regs)
1644 first_error (_("register out of range in list"));
1645 return FAIL;
1648 /* Note: a value of 2 * n is returned for the register Q<n>. */
1649 if (regtype == REG_TYPE_NQ)
1651 setmask = 3;
1652 addregs = 2;
1655 if (new_base < base_reg)
1656 base_reg = new_base;
1658 if (mask & (setmask << new_base))
1660 first_error (_("invalid register list"));
1661 return FAIL;
1664 if ((mask >> new_base) != 0 && ! warned)
1666 as_tsktsk (_("register list not in ascending order"));
1667 warned = 1;
1670 mask |= setmask << new_base;
1671 count += addregs;
1673 if (*str == '-') /* We have the start of a range expression */
1675 int high_range;
1677 str++;
1679 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1680 == FAIL)
1682 inst.error = gettext (reg_expected_msgs[regtype]);
1683 return FAIL;
1686 if (high_range >= max_regs)
1688 first_error (_("register out of range in list"));
1689 return FAIL;
1692 if (regtype == REG_TYPE_NQ)
1693 high_range = high_range + 1;
1695 if (high_range <= new_base)
1697 inst.error = _("register range not in ascending order");
1698 return FAIL;
1701 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1703 if (mask & (setmask << new_base))
1705 inst.error = _("invalid register list");
1706 return FAIL;
1709 mask |= setmask << new_base;
1710 count += addregs;
1714 while (skip_past_comma (&str) != FAIL);
1716 str++;
1718 /* Sanity check -- should have raised a parse error above. */
1719 if (count == 0 || count > max_regs)
1720 abort ();
1722 *pbase = base_reg;
1724 /* Final test -- the registers must be consecutive. */
1725 mask >>= base_reg;
1726 for (i = 0; i < count; i++)
1728 if ((mask & (1u << i)) == 0)
1730 inst.error = _("non-contiguous register range");
1731 return FAIL;
1735 *ccp = str;
1737 return count;
1740 /* True if two alias types are the same. */
1742 static int
1743 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1745 if (!a && !b)
1746 return 1;
1748 if (!a || !b)
1749 return 0;
1751 if (a->defined != b->defined)
1752 return 0;
1754 if ((a->defined & NTA_HASTYPE) != 0
1755 && (a->eltype.type != b->eltype.type
1756 || a->eltype.size != b->eltype.size))
1757 return 0;
1759 if ((a->defined & NTA_HASINDEX) != 0
1760 && (a->index != b->index))
1761 return 0;
1763 return 1;
1766 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1767 The base register is put in *PBASE.
1768 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1769 the return value.
1770 The register stride (minus one) is put in bit 4 of the return value.
1771 Bits [6:5] encode the list length (minus one).
1772 The type of the list elements is put in *ELTYPE, if non-NULL. */
1774 #define NEON_LANE(X) ((X) & 0xf)
1775 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1776 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1778 static int
1779 parse_neon_el_struct_list (char **str, unsigned *pbase,
1780 struct neon_type_el *eltype)
1782 char *ptr = *str;
1783 int base_reg = -1;
1784 int reg_incr = -1;
1785 int count = 0;
1786 int lane = -1;
1787 int leading_brace = 0;
1788 enum arm_reg_type rtype = REG_TYPE_NDQ;
1789 int addregs = 1;
1790 const char *const incr_error = "register stride must be 1 or 2";
1791 const char *const type_error = "mismatched element/structure types in list";
1792 struct neon_typed_alias firsttype;
1794 if (skip_past_char (&ptr, '{') == SUCCESS)
1795 leading_brace = 1;
1799 struct neon_typed_alias atype;
1800 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1802 if (getreg == FAIL)
1804 first_error (_(reg_expected_msgs[rtype]));
1805 return FAIL;
1808 if (base_reg == -1)
1810 base_reg = getreg;
1811 if (rtype == REG_TYPE_NQ)
1813 reg_incr = 1;
1814 addregs = 2;
1816 firsttype = atype;
1818 else if (reg_incr == -1)
1820 reg_incr = getreg - base_reg;
1821 if (reg_incr < 1 || reg_incr > 2)
1823 first_error (_(incr_error));
1824 return FAIL;
1827 else if (getreg != base_reg + reg_incr * count)
1829 first_error (_(incr_error));
1830 return FAIL;
1833 if (!neon_alias_types_same (&atype, &firsttype))
1835 first_error (_(type_error));
1836 return FAIL;
1839 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1840 modes. */
1841 if (ptr[0] == '-')
1843 struct neon_typed_alias htype;
1844 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1845 if (lane == -1)
1846 lane = NEON_INTERLEAVE_LANES;
1847 else if (lane != NEON_INTERLEAVE_LANES)
1849 first_error (_(type_error));
1850 return FAIL;
1852 if (reg_incr == -1)
1853 reg_incr = 1;
1854 else if (reg_incr != 1)
1856 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1857 return FAIL;
1859 ptr++;
1860 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1861 if (hireg == FAIL)
1863 first_error (_(reg_expected_msgs[rtype]));
1864 return FAIL;
1866 if (!neon_alias_types_same (&htype, &firsttype))
1868 first_error (_(type_error));
1869 return FAIL;
1871 count += hireg + dregs - getreg;
1872 continue;
1875 /* If we're using Q registers, we can't use [] or [n] syntax. */
1876 if (rtype == REG_TYPE_NQ)
1878 count += 2;
1879 continue;
1882 if ((atype.defined & NTA_HASINDEX) != 0)
1884 if (lane == -1)
1885 lane = atype.index;
1886 else if (lane != atype.index)
1888 first_error (_(type_error));
1889 return FAIL;
1892 else if (lane == -1)
1893 lane = NEON_INTERLEAVE_LANES;
1894 else if (lane != NEON_INTERLEAVE_LANES)
1896 first_error (_(type_error));
1897 return FAIL;
1899 count++;
1901 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1903 /* No lane set by [x]. We must be interleaving structures. */
1904 if (lane == -1)
1905 lane = NEON_INTERLEAVE_LANES;
1907 /* Sanity check. */
1908 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1909 || (count > 1 && reg_incr == -1))
1911 first_error (_("error parsing element/structure list"));
1912 return FAIL;
1915 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1917 first_error (_("expected }"));
1918 return FAIL;
1921 if (reg_incr == -1)
1922 reg_incr = 1;
1924 if (eltype)
1925 *eltype = firsttype.eltype;
1927 *pbase = base_reg;
1928 *str = ptr;
1930 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1933 /* Parse an explicit relocation suffix on an expression. This is
1934 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1935 arm_reloc_hsh contains no entries, so this function can only
1936 succeed if there is no () after the word. Returns -1 on error,
1937 BFD_RELOC_UNUSED if there wasn't any suffix. */
1938 static int
1939 parse_reloc (char **str)
1941 struct reloc_entry *r;
1942 char *p, *q;
1944 if (**str != '(')
1945 return BFD_RELOC_UNUSED;
1947 p = *str + 1;
1948 q = p;
1950 while (*q && *q != ')' && *q != ',')
1951 q++;
1952 if (*q != ')')
1953 return -1;
1955 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1956 return -1;
1958 *str = q + 1;
1959 return r->reloc;
1962 /* Directives: register aliases. */
1964 static struct reg_entry *
1965 insert_reg_alias (char *str, int number, int type)
1967 struct reg_entry *new;
1968 const char *name;
1970 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1972 if (new->builtin)
1973 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1975 /* Only warn about a redefinition if it's not defined as the
1976 same register. */
1977 else if (new->number != number || new->type != type)
1978 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1980 return NULL;
1983 name = xstrdup (str);
1984 new = xmalloc (sizeof (struct reg_entry));
1986 new->name = name;
1987 new->number = number;
1988 new->type = type;
1989 new->builtin = FALSE;
1990 new->neon = NULL;
1992 if (hash_insert (arm_reg_hsh, name, (void *) new))
1993 abort ();
1995 return new;
1998 static void
1999 insert_neon_reg_alias (char *str, int number, int type,
2000 struct neon_typed_alias *atype)
2002 struct reg_entry *reg = insert_reg_alias (str, number, type);
2004 if (!reg)
2006 first_error (_("attempt to redefine typed alias"));
2007 return;
2010 if (atype)
2012 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2013 *reg->neon = *atype;
2017 /* Look for the .req directive. This is of the form:
2019 new_register_name .req existing_register_name
2021 If we find one, or if it looks sufficiently like one that we want to
2022 handle any error here, return TRUE. Otherwise return FALSE. */
2024 static bfd_boolean
2025 create_register_alias (char * newname, char *p)
2027 struct reg_entry *old;
2028 char *oldname, *nbuf;
2029 size_t nlen;
2031 /* The input scrubber ensures that whitespace after the mnemonic is
2032 collapsed to single spaces. */
2033 oldname = p;
2034 if (strncmp (oldname, " .req ", 6) != 0)
2035 return FALSE;
2037 oldname += 6;
2038 if (*oldname == '\0')
2039 return FALSE;
2041 old = hash_find (arm_reg_hsh, oldname);
2042 if (!old)
2044 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2045 return TRUE;
2048 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2049 the desired alias name, and p points to its end. If not, then
2050 the desired alias name is in the global original_case_string. */
2051 #ifdef TC_CASE_SENSITIVE
2052 nlen = p - newname;
2053 #else
2054 newname = original_case_string;
2055 nlen = strlen (newname);
2056 #endif
2058 nbuf = alloca (nlen + 1);
2059 memcpy (nbuf, newname, nlen);
2060 nbuf[nlen] = '\0';
2062 /* Create aliases under the new name as stated; an all-lowercase
2063 version of the new name; and an all-uppercase version of the new
2064 name. */
2065 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2067 for (p = nbuf; *p; p++)
2068 *p = TOUPPER (*p);
2070 if (strncmp (nbuf, newname, nlen))
2072 /* If this attempt to create an additional alias fails, do not bother
2073 trying to create the all-lower case alias. We will fail and issue
2074 a second, duplicate error message. This situation arises when the
2075 programmer does something like:
2076 foo .req r0
2077 Foo .req r1
2078 The second .req creates the "Foo" alias but then fails to create
2079 the artificial FOO alias because it has already been created by the
2080 first .req. */
2081 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2082 return TRUE;
2085 for (p = nbuf; *p; p++)
2086 *p = TOLOWER (*p);
2088 if (strncmp (nbuf, newname, nlen))
2089 insert_reg_alias (nbuf, old->number, old->type);
2092 return TRUE;
2095 /* Create a Neon typed/indexed register alias using directives, e.g.:
2096 X .dn d5.s32[1]
2097 Y .qn 6.s16
2098 Z .dn d7
2099 T .dn Z[0]
2100 These typed registers can be used instead of the types specified after the
2101 Neon mnemonic, so long as all operands given have types. Types can also be
2102 specified directly, e.g.:
2103 vadd d0.s32, d1.s32, d2.s32 */
2105 static int
2106 create_neon_reg_alias (char *newname, char *p)
2108 enum arm_reg_type basetype;
2109 struct reg_entry *basereg;
2110 struct reg_entry mybasereg;
2111 struct neon_type ntype;
2112 struct neon_typed_alias typeinfo;
2113 char *namebuf, *nameend;
2114 int namelen;
2116 typeinfo.defined = 0;
2117 typeinfo.eltype.type = NT_invtype;
2118 typeinfo.eltype.size = -1;
2119 typeinfo.index = -1;
2121 nameend = p;
2123 if (strncmp (p, " .dn ", 5) == 0)
2124 basetype = REG_TYPE_VFD;
2125 else if (strncmp (p, " .qn ", 5) == 0)
2126 basetype = REG_TYPE_NQ;
2127 else
2128 return 0;
2130 p += 5;
2132 if (*p == '\0')
2133 return 0;
2135 basereg = arm_reg_parse_multi (&p);
2137 if (basereg && basereg->type != basetype)
2139 as_bad (_("bad type for register"));
2140 return 0;
2143 if (basereg == NULL)
2145 expressionS exp;
2146 /* Try parsing as an integer. */
2147 my_get_expression (&exp, &p, GE_NO_PREFIX);
2148 if (exp.X_op != O_constant)
2150 as_bad (_("expression must be constant"));
2151 return 0;
2153 basereg = &mybasereg;
2154 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2155 : exp.X_add_number;
2156 basereg->neon = 0;
2159 if (basereg->neon)
2160 typeinfo = *basereg->neon;
2162 if (parse_neon_type (&ntype, &p) == SUCCESS)
2164 /* We got a type. */
2165 if (typeinfo.defined & NTA_HASTYPE)
2167 as_bad (_("can't redefine the type of a register alias"));
2168 return 0;
2171 typeinfo.defined |= NTA_HASTYPE;
2172 if (ntype.elems != 1)
2174 as_bad (_("you must specify a single type only"));
2175 return 0;
2177 typeinfo.eltype = ntype.el[0];
2180 if (skip_past_char (&p, '[') == SUCCESS)
2182 expressionS exp;
2183 /* We got a scalar index. */
2185 if (typeinfo.defined & NTA_HASINDEX)
2187 as_bad (_("can't redefine the index of a scalar alias"));
2188 return 0;
2191 my_get_expression (&exp, &p, GE_NO_PREFIX);
2193 if (exp.X_op != O_constant)
2195 as_bad (_("scalar index must be constant"));
2196 return 0;
2199 typeinfo.defined |= NTA_HASINDEX;
2200 typeinfo.index = exp.X_add_number;
2202 if (skip_past_char (&p, ']') == FAIL)
2204 as_bad (_("expecting ]"));
2205 return 0;
2209 namelen = nameend - newname;
2210 namebuf = alloca (namelen + 1);
2211 strncpy (namebuf, newname, namelen);
2212 namebuf[namelen] = '\0';
2214 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2215 typeinfo.defined != 0 ? &typeinfo : NULL);
2217 /* Insert name in all uppercase. */
2218 for (p = namebuf; *p; p++)
2219 *p = TOUPPER (*p);
2221 if (strncmp (namebuf, newname, namelen))
2222 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2223 typeinfo.defined != 0 ? &typeinfo : NULL);
2225 /* Insert name in all lowercase. */
2226 for (p = namebuf; *p; p++)
2227 *p = TOLOWER (*p);
2229 if (strncmp (namebuf, newname, namelen))
2230 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2231 typeinfo.defined != 0 ? &typeinfo : NULL);
2233 return 1;
2236 /* Should never be called, as .req goes between the alias and the
2237 register name, not at the beginning of the line. */
2238 static void
2239 s_req (int a ATTRIBUTE_UNUSED)
2241 as_bad (_("invalid syntax for .req directive"));
2244 static void
2245 s_dn (int a ATTRIBUTE_UNUSED)
2247 as_bad (_("invalid syntax for .dn directive"));
2250 static void
2251 s_qn (int a ATTRIBUTE_UNUSED)
2253 as_bad (_("invalid syntax for .qn directive"));
2256 /* The .unreq directive deletes an alias which was previously defined
2257 by .req. For example:
2259 my_alias .req r11
2260 .unreq my_alias */
2262 static void
2263 s_unreq (int a ATTRIBUTE_UNUSED)
2265 char * name;
2266 char saved_char;
2268 name = input_line_pointer;
2270 while (*input_line_pointer != 0
2271 && *input_line_pointer != ' '
2272 && *input_line_pointer != '\n')
2273 ++input_line_pointer;
2275 saved_char = *input_line_pointer;
2276 *input_line_pointer = 0;
2278 if (!*name)
2279 as_bad (_("invalid syntax for .unreq directive"));
2280 else
2282 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2284 if (!reg)
2285 as_bad (_("unknown register alias '%s'"), name);
2286 else if (reg->builtin)
2287 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2288 name);
2289 else
2291 char * p;
2292 char * nbuf;
2294 hash_delete (arm_reg_hsh, name, FALSE);
2295 free ((char *) reg->name);
2296 if (reg->neon)
2297 free (reg->neon);
2298 free (reg);
2300 /* Also locate the all upper case and all lower case versions.
2301 Do not complain if we cannot find one or the other as it
2302 was probably deleted above. */
2304 nbuf = strdup (name);
2305 for (p = nbuf; *p; p++)
2306 *p = TOUPPER (*p);
2307 reg = hash_find (arm_reg_hsh, nbuf);
2308 if (reg)
2310 hash_delete (arm_reg_hsh, nbuf, FALSE);
2311 free ((char *) reg->name);
2312 if (reg->neon)
2313 free (reg->neon);
2314 free (reg);
2317 for (p = nbuf; *p; p++)
2318 *p = TOLOWER (*p);
2319 reg = hash_find (arm_reg_hsh, nbuf);
2320 if (reg)
2322 hash_delete (arm_reg_hsh, nbuf, FALSE);
2323 free ((char *) reg->name);
2324 if (reg->neon)
2325 free (reg->neon);
2326 free (reg);
2329 free (nbuf);
2333 *input_line_pointer = saved_char;
2334 demand_empty_rest_of_line ();
2337 /* Directives: Instruction set selection. */
2339 #ifdef OBJ_ELF
2340 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2341 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2342 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2343 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2345 static enum mstate mapstate = MAP_UNDEFINED;
2347 void
2348 mapping_state (enum mstate state)
2350 symbolS * symbolP;
2351 const char * symname;
2352 int type;
2354 if (mapstate == state)
2355 /* The mapping symbol has already been emitted.
2356 There is nothing else to do. */
2357 return;
2359 mapstate = state;
2361 switch (state)
2363 case MAP_DATA:
2364 symname = "$d";
2365 type = BSF_NO_FLAGS;
2366 break;
2367 case MAP_ARM:
2368 symname = "$a";
2369 type = BSF_NO_FLAGS;
2370 break;
2371 case MAP_THUMB:
2372 symname = "$t";
2373 type = BSF_NO_FLAGS;
2374 break;
2375 case MAP_UNDEFINED:
2376 return;
2377 default:
2378 abort ();
2381 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2383 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2384 symbol_table_insert (symbolP);
2385 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2387 switch (state)
2389 case MAP_ARM:
2390 THUMB_SET_FUNC (symbolP, 0);
2391 ARM_SET_THUMB (symbolP, 0);
2392 ARM_SET_INTERWORK (symbolP, support_interwork);
2393 break;
2395 case MAP_THUMB:
2396 THUMB_SET_FUNC (symbolP, 1);
2397 ARM_SET_THUMB (symbolP, 1);
2398 ARM_SET_INTERWORK (symbolP, support_interwork);
2399 break;
2401 case MAP_DATA:
2402 default:
2403 return;
2406 #else
2407 #define mapping_state(x) /* nothing */
2408 #endif
2410 /* Find the real, Thumb encoded start of a Thumb function. */
2412 static symbolS *
2413 find_real_start (symbolS * symbolP)
2415 char * real_start;
2416 const char * name = S_GET_NAME (symbolP);
2417 symbolS * new_target;
2419 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2420 #define STUB_NAME ".real_start_of"
2422 if (name == NULL)
2423 abort ();
2425 /* The compiler may generate BL instructions to local labels because
2426 it needs to perform a branch to a far away location. These labels
2427 do not have a corresponding ".real_start_of" label. We check
2428 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2429 the ".real_start_of" convention for nonlocal branches. */
2430 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2431 return symbolP;
2433 real_start = ACONCAT ((STUB_NAME, name, NULL));
2434 new_target = symbol_find (real_start);
2436 if (new_target == NULL)
2438 as_warn (_("Failed to find real start of function: %s\n"), name);
2439 new_target = symbolP;
2442 return new_target;
2445 static void
2446 opcode_select (int width)
2448 switch (width)
2450 case 16:
2451 if (! thumb_mode)
2453 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2454 as_bad (_("selected processor does not support THUMB opcodes"));
2456 thumb_mode = 1;
2457 /* No need to force the alignment, since we will have been
2458 coming from ARM mode, which is word-aligned. */
2459 record_alignment (now_seg, 1);
2461 mapping_state (MAP_THUMB);
2462 break;
2464 case 32:
2465 if (thumb_mode)
2467 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2468 as_bad (_("selected processor does not support ARM opcodes"));
2470 thumb_mode = 0;
2472 if (!need_pass_2)
2473 frag_align (2, 0, 0);
2475 record_alignment (now_seg, 1);
2477 mapping_state (MAP_ARM);
2478 break;
2480 default:
2481 as_bad (_("invalid instruction size selected (%d)"), width);
2485 static void
2486 s_arm (int ignore ATTRIBUTE_UNUSED)
2488 opcode_select (32);
2489 demand_empty_rest_of_line ();
2492 static void
2493 s_thumb (int ignore ATTRIBUTE_UNUSED)
2495 opcode_select (16);
2496 demand_empty_rest_of_line ();
2499 static void
2500 s_code (int unused ATTRIBUTE_UNUSED)
2502 int temp;
2504 temp = get_absolute_expression ();
2505 switch (temp)
2507 case 16:
2508 case 32:
2509 opcode_select (temp);
2510 break;
2512 default:
2513 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2517 static void
2518 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2520 /* If we are not already in thumb mode go into it, EVEN if
2521 the target processor does not support thumb instructions.
2522 This is used by gcc/config/arm/lib1funcs.asm for example
2523 to compile interworking support functions even if the
2524 target processor should not support interworking. */
2525 if (! thumb_mode)
2527 thumb_mode = 2;
2528 record_alignment (now_seg, 1);
2531 demand_empty_rest_of_line ();
2534 static void
2535 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2537 s_thumb (0);
2539 /* The following label is the name/address of the start of a Thumb function.
2540 We need to know this for the interworking support. */
2541 label_is_thumb_function_name = TRUE;
2544 /* Perform a .set directive, but also mark the alias as
2545 being a thumb function. */
2547 static void
2548 s_thumb_set (int equiv)
2550 /* XXX the following is a duplicate of the code for s_set() in read.c
2551 We cannot just call that code as we need to get at the symbol that
2552 is created. */
2553 char * name;
2554 char delim;
2555 char * end_name;
2556 symbolS * symbolP;
2558 /* Especial apologies for the random logic:
2559 This just grew, and could be parsed much more simply!
2560 Dean - in haste. */
2561 name = input_line_pointer;
2562 delim = get_symbol_end ();
2563 end_name = input_line_pointer;
2564 *end_name = delim;
2566 if (*input_line_pointer != ',')
2568 *end_name = 0;
2569 as_bad (_("expected comma after name \"%s\""), name);
2570 *end_name = delim;
2571 ignore_rest_of_line ();
2572 return;
2575 input_line_pointer++;
2576 *end_name = 0;
2578 if (name[0] == '.' && name[1] == '\0')
2580 /* XXX - this should not happen to .thumb_set. */
2581 abort ();
2584 if ((symbolP = symbol_find (name)) == NULL
2585 && (symbolP = md_undefined_symbol (name)) == NULL)
2587 #ifndef NO_LISTING
2588 /* When doing symbol listings, play games with dummy fragments living
2589 outside the normal fragment chain to record the file and line info
2590 for this symbol. */
2591 if (listing & LISTING_SYMBOLS)
2593 extern struct list_info_struct * listing_tail;
2594 fragS * dummy_frag = xmalloc (sizeof (fragS));
2596 memset (dummy_frag, 0, sizeof (fragS));
2597 dummy_frag->fr_type = rs_fill;
2598 dummy_frag->line = listing_tail;
2599 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2600 dummy_frag->fr_symbol = symbolP;
2602 else
2603 #endif
2604 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2606 #ifdef OBJ_COFF
2607 /* "set" symbols are local unless otherwise specified. */
2608 SF_SET_LOCAL (symbolP);
2609 #endif /* OBJ_COFF */
2610 } /* Make a new symbol. */
2612 symbol_table_insert (symbolP);
2614 * end_name = delim;
2616 if (equiv
2617 && S_IS_DEFINED (symbolP)
2618 && S_GET_SEGMENT (symbolP) != reg_section)
2619 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2621 pseudo_set (symbolP);
2623 demand_empty_rest_of_line ();
2625 /* XXX Now we come to the Thumb specific bit of code. */
2627 THUMB_SET_FUNC (symbolP, 1);
2628 ARM_SET_THUMB (symbolP, 1);
2629 #if defined OBJ_ELF || defined OBJ_COFF
2630 ARM_SET_INTERWORK (symbolP, support_interwork);
2631 #endif
2634 /* Directives: Mode selection. */
2636 /* .syntax [unified|divided] - choose the new unified syntax
2637 (same for Arm and Thumb encoding, modulo slight differences in what
2638 can be represented) or the old divergent syntax for each mode. */
2639 static void
2640 s_syntax (int unused ATTRIBUTE_UNUSED)
2642 char *name, delim;
2644 name = input_line_pointer;
2645 delim = get_symbol_end ();
2647 if (!strcasecmp (name, "unified"))
2648 unified_syntax = TRUE;
2649 else if (!strcasecmp (name, "divided"))
2650 unified_syntax = FALSE;
2651 else
2653 as_bad (_("unrecognized syntax mode \"%s\""), name);
2654 return;
2656 *input_line_pointer = delim;
2657 demand_empty_rest_of_line ();
2660 /* Directives: sectioning and alignment. */
2662 /* Same as s_align_ptwo but align 0 => align 2. */
2664 static void
2665 s_align (int unused ATTRIBUTE_UNUSED)
2667 int temp;
2668 bfd_boolean fill_p;
2669 long temp_fill;
2670 long max_alignment = 15;
2672 temp = get_absolute_expression ();
2673 if (temp > max_alignment)
2674 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2675 else if (temp < 0)
2677 as_bad (_("alignment negative. 0 assumed."));
2678 temp = 0;
2681 if (*input_line_pointer == ',')
2683 input_line_pointer++;
2684 temp_fill = get_absolute_expression ();
2685 fill_p = TRUE;
2687 else
2689 fill_p = FALSE;
2690 temp_fill = 0;
2693 if (!temp)
2694 temp = 2;
2696 /* Only make a frag if we HAVE to. */
2697 if (temp && !need_pass_2)
2699 if (!fill_p && subseg_text_p (now_seg))
2700 frag_align_code (temp, 0);
2701 else
2702 frag_align (temp, (int) temp_fill, 0);
2704 demand_empty_rest_of_line ();
2706 record_alignment (now_seg, temp);
2709 static void
2710 s_bss (int ignore ATTRIBUTE_UNUSED)
2712 /* We don't support putting frags in the BSS segment, we fake it by
2713 marking in_bss, then looking at s_skip for clues. */
2714 subseg_set (bss_section, 0);
2715 demand_empty_rest_of_line ();
2716 mapping_state (MAP_DATA);
2719 static void
2720 s_even (int ignore ATTRIBUTE_UNUSED)
2722 /* Never make frag if expect extra pass. */
2723 if (!need_pass_2)
2724 frag_align (1, 0, 0);
2726 record_alignment (now_seg, 1);
2728 demand_empty_rest_of_line ();
2731 /* Directives: Literal pools. */
2733 static literal_pool *
2734 find_literal_pool (void)
2736 literal_pool * pool;
2738 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2740 if (pool->section == now_seg
2741 && pool->sub_section == now_subseg)
2742 break;
2745 return pool;
2748 static literal_pool *
2749 find_or_make_literal_pool (void)
2751 /* Next literal pool ID number. */
2752 static unsigned int latest_pool_num = 1;
2753 literal_pool * pool;
2755 pool = find_literal_pool ();
2757 if (pool == NULL)
2759 /* Create a new pool. */
2760 pool = xmalloc (sizeof (* pool));
2761 if (! pool)
2762 return NULL;
2764 pool->next_free_entry = 0;
2765 pool->section = now_seg;
2766 pool->sub_section = now_subseg;
2767 pool->next = list_of_pools;
2768 pool->symbol = NULL;
2770 /* Add it to the list. */
2771 list_of_pools = pool;
2774 /* New pools, and emptied pools, will have a NULL symbol. */
2775 if (pool->symbol == NULL)
2777 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2778 (valueT) 0, &zero_address_frag);
2779 pool->id = latest_pool_num ++;
2782 /* Done. */
2783 return pool;
2786 /* Add the literal in the global 'inst'
2787 structure to the relevant literal pool. */
2789 static int
2790 add_to_lit_pool (void)
2792 literal_pool * pool;
2793 unsigned int entry;
2795 pool = find_or_make_literal_pool ();
2797 /* Check if this literal value is already in the pool. */
2798 for (entry = 0; entry < pool->next_free_entry; entry ++)
2800 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2801 && (inst.reloc.exp.X_op == O_constant)
2802 && (pool->literals[entry].X_add_number
2803 == inst.reloc.exp.X_add_number)
2804 && (pool->literals[entry].X_unsigned
2805 == inst.reloc.exp.X_unsigned))
2806 break;
2808 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2809 && (inst.reloc.exp.X_op == O_symbol)
2810 && (pool->literals[entry].X_add_number
2811 == inst.reloc.exp.X_add_number)
2812 && (pool->literals[entry].X_add_symbol
2813 == inst.reloc.exp.X_add_symbol)
2814 && (pool->literals[entry].X_op_symbol
2815 == inst.reloc.exp.X_op_symbol))
2816 break;
2819 /* Do we need to create a new entry? */
2820 if (entry == pool->next_free_entry)
2822 if (entry >= MAX_LITERAL_POOL_SIZE)
2824 inst.error = _("literal pool overflow");
2825 return FAIL;
2828 pool->literals[entry] = inst.reloc.exp;
2829 pool->next_free_entry += 1;
2832 inst.reloc.exp.X_op = O_symbol;
2833 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2834 inst.reloc.exp.X_add_symbol = pool->symbol;
2836 return SUCCESS;
2839 /* Can't use symbol_new here, so have to create a symbol and then at
2840 a later date assign it a value. Thats what these functions do. */
2842 static void
2843 symbol_locate (symbolS * symbolP,
2844 const char * name, /* It is copied, the caller can modify. */
2845 segT segment, /* Segment identifier (SEG_<something>). */
2846 valueT valu, /* Symbol value. */
2847 fragS * frag) /* Associated fragment. */
2849 unsigned int name_length;
2850 char * preserved_copy_of_name;
2852 name_length = strlen (name) + 1; /* +1 for \0. */
2853 obstack_grow (&notes, name, name_length);
2854 preserved_copy_of_name = obstack_finish (&notes);
2856 #ifdef tc_canonicalize_symbol_name
2857 preserved_copy_of_name =
2858 tc_canonicalize_symbol_name (preserved_copy_of_name);
2859 #endif
2861 S_SET_NAME (symbolP, preserved_copy_of_name);
2863 S_SET_SEGMENT (symbolP, segment);
2864 S_SET_VALUE (symbolP, valu);
2865 symbol_clear_list_pointers (symbolP);
2867 symbol_set_frag (symbolP, frag);
2869 /* Link to end of symbol chain. */
2871 extern int symbol_table_frozen;
2873 if (symbol_table_frozen)
2874 abort ();
2877 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2879 obj_symbol_new_hook (symbolP);
2881 #ifdef tc_symbol_new_hook
2882 tc_symbol_new_hook (symbolP);
2883 #endif
2885 #ifdef DEBUG_SYMS
2886 verify_symbol_chain (symbol_rootP, symbol_lastP);
2887 #endif /* DEBUG_SYMS */
2891 static void
2892 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2894 unsigned int entry;
2895 literal_pool * pool;
2896 char sym_name[20];
2898 pool = find_literal_pool ();
2899 if (pool == NULL
2900 || pool->symbol == NULL
2901 || pool->next_free_entry == 0)
2902 return;
2904 mapping_state (MAP_DATA);
2906 /* Align pool as you have word accesses.
2907 Only make a frag if we have to. */
2908 if (!need_pass_2)
2909 frag_align (2, 0, 0);
2911 record_alignment (now_seg, 2);
2913 sprintf (sym_name, "$$lit_\002%x", pool->id);
2915 symbol_locate (pool->symbol, sym_name, now_seg,
2916 (valueT) frag_now_fix (), frag_now);
2917 symbol_table_insert (pool->symbol);
2919 ARM_SET_THUMB (pool->symbol, thumb_mode);
2921 #if defined OBJ_COFF || defined OBJ_ELF
2922 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2923 #endif
2925 for (entry = 0; entry < pool->next_free_entry; entry ++)
2926 /* First output the expression in the instruction to the pool. */
2927 emit_expr (&(pool->literals[entry]), 4); /* .word */
2929 /* Mark the pool as empty. */
2930 pool->next_free_entry = 0;
2931 pool->symbol = NULL;
2934 #ifdef OBJ_ELF
2935 /* Forward declarations for functions below, in the MD interface
2936 section. */
2937 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2938 static valueT create_unwind_entry (int);
2939 static void start_unwind_section (const segT, int);
2940 static void add_unwind_opcode (valueT, int);
2941 static void flush_pending_unwind (void);
2943 /* Directives: Data. */
2945 static void
2946 s_arm_elf_cons (int nbytes)
2948 expressionS exp;
2950 #ifdef md_flush_pending_output
2951 md_flush_pending_output ();
2952 #endif
2954 if (is_it_end_of_statement ())
2956 demand_empty_rest_of_line ();
2957 return;
2960 #ifdef md_cons_align
2961 md_cons_align (nbytes);
2962 #endif
2964 mapping_state (MAP_DATA);
2967 int reloc;
2968 char *base = input_line_pointer;
2970 expression (& exp);
2972 if (exp.X_op != O_symbol)
2973 emit_expr (&exp, (unsigned int) nbytes);
2974 else
2976 char *before_reloc = input_line_pointer;
2977 reloc = parse_reloc (&input_line_pointer);
2978 if (reloc == -1)
2980 as_bad (_("unrecognized relocation suffix"));
2981 ignore_rest_of_line ();
2982 return;
2984 else if (reloc == BFD_RELOC_UNUSED)
2985 emit_expr (&exp, (unsigned int) nbytes);
2986 else
2988 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2989 int size = bfd_get_reloc_size (howto);
2991 if (reloc == BFD_RELOC_ARM_PLT32)
2993 as_bad (_("(plt) is only valid on branch targets"));
2994 reloc = BFD_RELOC_UNUSED;
2995 size = 0;
2998 if (size > nbytes)
2999 as_bad (_("%s relocations do not fit in %d bytes"),
3000 howto->name, nbytes);
3001 else
3003 /* We've parsed an expression stopping at O_symbol.
3004 But there may be more expression left now that we
3005 have parsed the relocation marker. Parse it again.
3006 XXX Surely there is a cleaner way to do this. */
3007 char *p = input_line_pointer;
3008 int offset;
3009 char *save_buf = alloca (input_line_pointer - base);
3010 memcpy (save_buf, base, input_line_pointer - base);
3011 memmove (base + (input_line_pointer - before_reloc),
3012 base, before_reloc - base);
3014 input_line_pointer = base + (input_line_pointer-before_reloc);
3015 expression (&exp);
3016 memcpy (base, save_buf, p - base);
3018 offset = nbytes - size;
3019 p = frag_more ((int) nbytes);
3020 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3021 size, &exp, 0, reloc);
3026 while (*input_line_pointer++ == ',');
3028 /* Put terminator back into stream. */
3029 input_line_pointer --;
3030 demand_empty_rest_of_line ();
3034 /* Parse a .rel31 directive. */
3036 static void
3037 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3039 expressionS exp;
3040 char *p;
3041 valueT highbit;
3043 highbit = 0;
3044 if (*input_line_pointer == '1')
3045 highbit = 0x80000000;
3046 else if (*input_line_pointer != '0')
3047 as_bad (_("expected 0 or 1"));
3049 input_line_pointer++;
3050 if (*input_line_pointer != ',')
3051 as_bad (_("missing comma"));
3052 input_line_pointer++;
3054 #ifdef md_flush_pending_output
3055 md_flush_pending_output ();
3056 #endif
3058 #ifdef md_cons_align
3059 md_cons_align (4);
3060 #endif
3062 mapping_state (MAP_DATA);
3064 expression (&exp);
3066 p = frag_more (4);
3067 md_number_to_chars (p, highbit, 4);
3068 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3069 BFD_RELOC_ARM_PREL31);
3071 demand_empty_rest_of_line ();
3074 /* Directives: AEABI stack-unwind tables. */
3076 /* Parse an unwind_fnstart directive. Simply records the current location. */
3078 static void
3079 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3081 demand_empty_rest_of_line ();
3082 /* Mark the start of the function. */
3083 unwind.proc_start = expr_build_dot ();
3085 /* Reset the rest of the unwind info. */
3086 unwind.opcode_count = 0;
3087 unwind.table_entry = NULL;
3088 unwind.personality_routine = NULL;
3089 unwind.personality_index = -1;
3090 unwind.frame_size = 0;
3091 unwind.fp_offset = 0;
3092 unwind.fp_reg = 13;
3093 unwind.fp_used = 0;
3094 unwind.sp_restored = 0;
3098 /* Parse a handlerdata directive. Creates the exception handling table entry
3099 for the function. */
3101 static void
3102 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3104 demand_empty_rest_of_line ();
3105 if (unwind.table_entry)
3106 as_bad (_("duplicate .handlerdata directive"));
3108 create_unwind_entry (1);
3111 /* Parse an unwind_fnend directive. Generates the index table entry. */
3113 static void
3114 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3116 long where;
3117 char *ptr;
3118 valueT val;
3120 demand_empty_rest_of_line ();
3122 /* Add eh table entry. */
3123 if (unwind.table_entry == NULL)
3124 val = create_unwind_entry (0);
3125 else
3126 val = 0;
3128 /* Add index table entry. This is two words. */
3129 start_unwind_section (unwind.saved_seg, 1);
3130 frag_align (2, 0, 0);
3131 record_alignment (now_seg, 2);
3133 ptr = frag_more (8);
3134 where = frag_now_fix () - 8;
3136 /* Self relative offset of the function start. */
3137 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3138 BFD_RELOC_ARM_PREL31);
3140 /* Indicate dependency on EHABI-defined personality routines to the
3141 linker, if it hasn't been done already. */
3142 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3143 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3145 static const char *const name[] =
3147 "__aeabi_unwind_cpp_pr0",
3148 "__aeabi_unwind_cpp_pr1",
3149 "__aeabi_unwind_cpp_pr2"
3151 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3152 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3153 marked_pr_dependency |= 1 << unwind.personality_index;
3154 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3155 = marked_pr_dependency;
3158 if (val)
3159 /* Inline exception table entry. */
3160 md_number_to_chars (ptr + 4, val, 4);
3161 else
3162 /* Self relative offset of the table entry. */
3163 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3164 BFD_RELOC_ARM_PREL31);
3166 /* Restore the original section. */
3167 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3171 /* Parse an unwind_cantunwind directive. */
3173 static void
3174 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3176 demand_empty_rest_of_line ();
3177 if (unwind.personality_routine || unwind.personality_index != -1)
3178 as_bad (_("personality routine specified for cantunwind frame"));
3180 unwind.personality_index = -2;
3184 /* Parse a personalityindex directive. */
3186 static void
3187 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3189 expressionS exp;
3191 if (unwind.personality_routine || unwind.personality_index != -1)
3192 as_bad (_("duplicate .personalityindex directive"));
3194 expression (&exp);
3196 if (exp.X_op != O_constant
3197 || exp.X_add_number < 0 || exp.X_add_number > 15)
3199 as_bad (_("bad personality routine number"));
3200 ignore_rest_of_line ();
3201 return;
3204 unwind.personality_index = exp.X_add_number;
3206 demand_empty_rest_of_line ();
3210 /* Parse a personality directive. */
3212 static void
3213 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3215 char *name, *p, c;
3217 if (unwind.personality_routine || unwind.personality_index != -1)
3218 as_bad (_("duplicate .personality directive"));
3220 name = input_line_pointer;
3221 c = get_symbol_end ();
3222 p = input_line_pointer;
3223 unwind.personality_routine = symbol_find_or_make (name);
3224 *p = c;
3225 demand_empty_rest_of_line ();
3229 /* Parse a directive saving core registers. */
3231 static void
3232 s_arm_unwind_save_core (void)
3234 valueT op;
3235 long range;
3236 int n;
3238 range = parse_reg_list (&input_line_pointer);
3239 if (range == FAIL)
3241 as_bad (_("expected register list"));
3242 ignore_rest_of_line ();
3243 return;
3246 demand_empty_rest_of_line ();
3248 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3249 into .unwind_save {..., sp...}. We aren't bothered about the value of
3250 ip because it is clobbered by calls. */
3251 if (unwind.sp_restored && unwind.fp_reg == 12
3252 && (range & 0x3000) == 0x1000)
3254 unwind.opcode_count--;
3255 unwind.sp_restored = 0;
3256 range = (range | 0x2000) & ~0x1000;
3257 unwind.pending_offset = 0;
3260 /* Pop r4-r15. */
3261 if (range & 0xfff0)
3263 /* See if we can use the short opcodes. These pop a block of up to 8
3264 registers starting with r4, plus maybe r14. */
3265 for (n = 0; n < 8; n++)
3267 /* Break at the first non-saved register. */
3268 if ((range & (1 << (n + 4))) == 0)
3269 break;
3271 /* See if there are any other bits set. */
3272 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3274 /* Use the long form. */
3275 op = 0x8000 | ((range >> 4) & 0xfff);
3276 add_unwind_opcode (op, 2);
3278 else
3280 /* Use the short form. */
3281 if (range & 0x4000)
3282 op = 0xa8; /* Pop r14. */
3283 else
3284 op = 0xa0; /* Do not pop r14. */
3285 op |= (n - 1);
3286 add_unwind_opcode (op, 1);
3290 /* Pop r0-r3. */
3291 if (range & 0xf)
3293 op = 0xb100 | (range & 0xf);
3294 add_unwind_opcode (op, 2);
3297 /* Record the number of bytes pushed. */
3298 for (n = 0; n < 16; n++)
3300 if (range & (1 << n))
3301 unwind.frame_size += 4;
3306 /* Parse a directive saving FPA registers. */
3308 static void
3309 s_arm_unwind_save_fpa (int reg)
3311 expressionS exp;
3312 int num_regs;
3313 valueT op;
3315 /* Get Number of registers to transfer. */
3316 if (skip_past_comma (&input_line_pointer) != FAIL)
3317 expression (&exp);
3318 else
3319 exp.X_op = O_illegal;
3321 if (exp.X_op != O_constant)
3323 as_bad (_("expected , <constant>"));
3324 ignore_rest_of_line ();
3325 return;
3328 num_regs = exp.X_add_number;
3330 if (num_regs < 1 || num_regs > 4)
3332 as_bad (_("number of registers must be in the range [1:4]"));
3333 ignore_rest_of_line ();
3334 return;
3337 demand_empty_rest_of_line ();
3339 if (reg == 4)
3341 /* Short form. */
3342 op = 0xb4 | (num_regs - 1);
3343 add_unwind_opcode (op, 1);
3345 else
3347 /* Long form. */
3348 op = 0xc800 | (reg << 4) | (num_regs - 1);
3349 add_unwind_opcode (op, 2);
3351 unwind.frame_size += num_regs * 12;
3355 /* Parse a directive saving VFP registers for ARMv6 and above. */
3357 static void
3358 s_arm_unwind_save_vfp_armv6 (void)
3360 int count;
3361 unsigned int start;
3362 valueT op;
3363 int num_vfpv3_regs = 0;
3364 int num_regs_below_16;
3366 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3367 if (count == FAIL)
3369 as_bad (_("expected register list"));
3370 ignore_rest_of_line ();
3371 return;
3374 demand_empty_rest_of_line ();
3376 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3377 than FSTMX/FLDMX-style ones). */
3379 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3380 if (start >= 16)
3381 num_vfpv3_regs = count;
3382 else if (start + count > 16)
3383 num_vfpv3_regs = start + count - 16;
3385 if (num_vfpv3_regs > 0)
3387 int start_offset = start > 16 ? start - 16 : 0;
3388 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3389 add_unwind_opcode (op, 2);
3392 /* Generate opcode for registers numbered in the range 0 .. 15. */
3393 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3394 assert (num_regs_below_16 + num_vfpv3_regs == count);
3395 if (num_regs_below_16 > 0)
3397 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3398 add_unwind_opcode (op, 2);
3401 unwind.frame_size += count * 8;
3405 /* Parse a directive saving VFP registers for pre-ARMv6. */
3407 static void
3408 s_arm_unwind_save_vfp (void)
3410 int count;
3411 unsigned int reg;
3412 valueT op;
3414 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3415 if (count == FAIL)
3417 as_bad (_("expected register list"));
3418 ignore_rest_of_line ();
3419 return;
3422 demand_empty_rest_of_line ();
3424 if (reg == 8)
3426 /* Short form. */
3427 op = 0xb8 | (count - 1);
3428 add_unwind_opcode (op, 1);
3430 else
3432 /* Long form. */
3433 op = 0xb300 | (reg << 4) | (count - 1);
3434 add_unwind_opcode (op, 2);
3436 unwind.frame_size += count * 8 + 4;
3440 /* Parse a directive saving iWMMXt data registers. */
3442 static void
3443 s_arm_unwind_save_mmxwr (void)
3445 int reg;
3446 int hi_reg;
3447 int i;
3448 unsigned mask = 0;
3449 valueT op;
3451 if (*input_line_pointer == '{')
3452 input_line_pointer++;
3456 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3458 if (reg == FAIL)
3460 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3461 goto error;
3464 if (mask >> reg)
3465 as_tsktsk (_("register list not in ascending order"));
3466 mask |= 1 << reg;
3468 if (*input_line_pointer == '-')
3470 input_line_pointer++;
3471 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3472 if (hi_reg == FAIL)
3474 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3475 goto error;
3477 else if (reg >= hi_reg)
3479 as_bad (_("bad register range"));
3480 goto error;
3482 for (; reg < hi_reg; reg++)
3483 mask |= 1 << reg;
3486 while (skip_past_comma (&input_line_pointer) != FAIL);
3488 if (*input_line_pointer == '}')
3489 input_line_pointer++;
3491 demand_empty_rest_of_line ();
3493 /* Generate any deferred opcodes because we're going to be looking at
3494 the list. */
3495 flush_pending_unwind ();
3497 for (i = 0; i < 16; i++)
3499 if (mask & (1 << i))
3500 unwind.frame_size += 8;
3503 /* Attempt to combine with a previous opcode. We do this because gcc
3504 likes to output separate unwind directives for a single block of
3505 registers. */
3506 if (unwind.opcode_count > 0)
3508 i = unwind.opcodes[unwind.opcode_count - 1];
3509 if ((i & 0xf8) == 0xc0)
3511 i &= 7;
3512 /* Only merge if the blocks are contiguous. */
3513 if (i < 6)
3515 if ((mask & 0xfe00) == (1 << 9))
3517 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3518 unwind.opcode_count--;
3521 else if (i == 6 && unwind.opcode_count >= 2)
3523 i = unwind.opcodes[unwind.opcode_count - 2];
3524 reg = i >> 4;
3525 i &= 0xf;
3527 op = 0xffff << (reg - 1);
3528 if (reg > 0
3529 && ((mask & op) == (1u << (reg - 1))))
3531 op = (1 << (reg + i + 1)) - 1;
3532 op &= ~((1 << reg) - 1);
3533 mask |= op;
3534 unwind.opcode_count -= 2;
3540 hi_reg = 15;
3541 /* We want to generate opcodes in the order the registers have been
3542 saved, ie. descending order. */
3543 for (reg = 15; reg >= -1; reg--)
3545 /* Save registers in blocks. */
3546 if (reg < 0
3547 || !(mask & (1 << reg)))
3549 /* We found an unsaved reg. Generate opcodes to save the
3550 preceding block. */
3551 if (reg != hi_reg)
3553 if (reg == 9)
3555 /* Short form. */
3556 op = 0xc0 | (hi_reg - 10);
3557 add_unwind_opcode (op, 1);
3559 else
3561 /* Long form. */
3562 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3563 add_unwind_opcode (op, 2);
3566 hi_reg = reg - 1;
3570 return;
3571 error:
3572 ignore_rest_of_line ();
3575 static void
3576 s_arm_unwind_save_mmxwcg (void)
3578 int reg;
3579 int hi_reg;
3580 unsigned mask = 0;
3581 valueT op;
3583 if (*input_line_pointer == '{')
3584 input_line_pointer++;
3588 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3590 if (reg == FAIL)
3592 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3593 goto error;
3596 reg -= 8;
3597 if (mask >> reg)
3598 as_tsktsk (_("register list not in ascending order"));
3599 mask |= 1 << reg;
3601 if (*input_line_pointer == '-')
3603 input_line_pointer++;
3604 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3605 if (hi_reg == FAIL)
3607 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3608 goto error;
3610 else if (reg >= hi_reg)
3612 as_bad (_("bad register range"));
3613 goto error;
3615 for (; reg < hi_reg; reg++)
3616 mask |= 1 << reg;
3619 while (skip_past_comma (&input_line_pointer) != FAIL);
3621 if (*input_line_pointer == '}')
3622 input_line_pointer++;
3624 demand_empty_rest_of_line ();
3626 /* Generate any deferred opcodes because we're going to be looking at
3627 the list. */
3628 flush_pending_unwind ();
3630 for (reg = 0; reg < 16; reg++)
3632 if (mask & (1 << reg))
3633 unwind.frame_size += 4;
3635 op = 0xc700 | mask;
3636 add_unwind_opcode (op, 2);
3637 return;
3638 error:
3639 ignore_rest_of_line ();
3643 /* Parse an unwind_save directive.
3644 If the argument is non-zero, this is a .vsave directive. */
3646 static void
3647 s_arm_unwind_save (int arch_v6)
3649 char *peek;
3650 struct reg_entry *reg;
3651 bfd_boolean had_brace = FALSE;
3653 /* Figure out what sort of save we have. */
3654 peek = input_line_pointer;
3656 if (*peek == '{')
3658 had_brace = TRUE;
3659 peek++;
3662 reg = arm_reg_parse_multi (&peek);
3664 if (!reg)
3666 as_bad (_("register expected"));
3667 ignore_rest_of_line ();
3668 return;
3671 switch (reg->type)
3673 case REG_TYPE_FN:
3674 if (had_brace)
3676 as_bad (_("FPA .unwind_save does not take a register list"));
3677 ignore_rest_of_line ();
3678 return;
3680 input_line_pointer = peek;
3681 s_arm_unwind_save_fpa (reg->number);
3682 return;
3684 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3685 case REG_TYPE_VFD:
3686 if (arch_v6)
3687 s_arm_unwind_save_vfp_armv6 ();
3688 else
3689 s_arm_unwind_save_vfp ();
3690 return;
3691 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3692 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3694 default:
3695 as_bad (_(".unwind_save does not support this kind of register"));
3696 ignore_rest_of_line ();
3701 /* Parse an unwind_movsp directive. */
3703 static void
3704 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3706 int reg;
3707 valueT op;
3708 int offset;
3710 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3711 if (reg == FAIL)
3713 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
3714 ignore_rest_of_line ();
3715 return;
3718 /* Optional constant. */
3719 if (skip_past_comma (&input_line_pointer) != FAIL)
3721 if (immediate_for_directive (&offset) == FAIL)
3722 return;
3724 else
3725 offset = 0;
3727 demand_empty_rest_of_line ();
3729 if (reg == REG_SP || reg == REG_PC)
3731 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3732 return;
3735 if (unwind.fp_reg != REG_SP)
3736 as_bad (_("unexpected .unwind_movsp directive"));
3738 /* Generate opcode to restore the value. */
3739 op = 0x90 | reg;
3740 add_unwind_opcode (op, 1);
3742 /* Record the information for later. */
3743 unwind.fp_reg = reg;
3744 unwind.fp_offset = unwind.frame_size - offset;
3745 unwind.sp_restored = 1;
3748 /* Parse an unwind_pad directive. */
3750 static void
3751 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3753 int offset;
3755 if (immediate_for_directive (&offset) == FAIL)
3756 return;
3758 if (offset & 3)
3760 as_bad (_("stack increment must be multiple of 4"));
3761 ignore_rest_of_line ();
3762 return;
3765 /* Don't generate any opcodes, just record the details for later. */
3766 unwind.frame_size += offset;
3767 unwind.pending_offset += offset;
3769 demand_empty_rest_of_line ();
3772 /* Parse an unwind_setfp directive. */
3774 static void
3775 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3777 int sp_reg;
3778 int fp_reg;
3779 int offset;
3781 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3782 if (skip_past_comma (&input_line_pointer) == FAIL)
3783 sp_reg = FAIL;
3784 else
3785 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3787 if (fp_reg == FAIL || sp_reg == FAIL)
3789 as_bad (_("expected <reg>, <reg>"));
3790 ignore_rest_of_line ();
3791 return;
3794 /* Optional constant. */
3795 if (skip_past_comma (&input_line_pointer) != FAIL)
3797 if (immediate_for_directive (&offset) == FAIL)
3798 return;
3800 else
3801 offset = 0;
3803 demand_empty_rest_of_line ();
3805 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3807 as_bad (_("register must be either sp or set by a previous"
3808 "unwind_movsp directive"));
3809 return;
3812 /* Don't generate any opcodes, just record the information for later. */
3813 unwind.fp_reg = fp_reg;
3814 unwind.fp_used = 1;
3815 if (sp_reg == 13)
3816 unwind.fp_offset = unwind.frame_size - offset;
3817 else
3818 unwind.fp_offset -= offset;
3821 /* Parse an unwind_raw directive. */
3823 static void
3824 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3826 expressionS exp;
3827 /* This is an arbitrary limit. */
3828 unsigned char op[16];
3829 int count;
3831 expression (&exp);
3832 if (exp.X_op == O_constant
3833 && skip_past_comma (&input_line_pointer) != FAIL)
3835 unwind.frame_size += exp.X_add_number;
3836 expression (&exp);
3838 else
3839 exp.X_op = O_illegal;
3841 if (exp.X_op != O_constant)
3843 as_bad (_("expected <offset>, <opcode>"));
3844 ignore_rest_of_line ();
3845 return;
3848 count = 0;
3850 /* Parse the opcode. */
3851 for (;;)
3853 if (count >= 16)
3855 as_bad (_("unwind opcode too long"));
3856 ignore_rest_of_line ();
3858 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3860 as_bad (_("invalid unwind opcode"));
3861 ignore_rest_of_line ();
3862 return;
3864 op[count++] = exp.X_add_number;
3866 /* Parse the next byte. */
3867 if (skip_past_comma (&input_line_pointer) == FAIL)
3868 break;
3870 expression (&exp);
3873 /* Add the opcode bytes in reverse order. */
3874 while (count--)
3875 add_unwind_opcode (op[count], 1);
3877 demand_empty_rest_of_line ();
3881 /* Parse a .eabi_attribute directive. */
3883 static void
3884 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3886 s_vendor_attribute (OBJ_ATTR_PROC);
3888 #endif /* OBJ_ELF */
3890 static void s_arm_arch (int);
3891 static void s_arm_object_arch (int);
3892 static void s_arm_cpu (int);
3893 static void s_arm_fpu (int);
3895 #ifdef TE_PE
3897 static void
3898 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3900 expressionS exp;
3904 expression (&exp);
3905 if (exp.X_op == O_symbol)
3906 exp.X_op = O_secrel;
3908 emit_expr (&exp, 4);
3910 while (*input_line_pointer++ == ',');
3912 input_line_pointer--;
3913 demand_empty_rest_of_line ();
3915 #endif /* TE_PE */
3917 /* This table describes all the machine specific pseudo-ops the assembler
3918 has to support. The fields are:
3919 pseudo-op name without dot
3920 function to call to execute this pseudo-op
3921 Integer arg to pass to the function. */
3923 const pseudo_typeS md_pseudo_table[] =
3925 /* Never called because '.req' does not start a line. */
3926 { "req", s_req, 0 },
3927 /* Following two are likewise never called. */
3928 { "dn", s_dn, 0 },
3929 { "qn", s_qn, 0 },
3930 { "unreq", s_unreq, 0 },
3931 { "bss", s_bss, 0 },
3932 { "align", s_align, 0 },
3933 { "arm", s_arm, 0 },
3934 { "thumb", s_thumb, 0 },
3935 { "code", s_code, 0 },
3936 { "force_thumb", s_force_thumb, 0 },
3937 { "thumb_func", s_thumb_func, 0 },
3938 { "thumb_set", s_thumb_set, 0 },
3939 { "even", s_even, 0 },
3940 { "ltorg", s_ltorg, 0 },
3941 { "pool", s_ltorg, 0 },
3942 { "syntax", s_syntax, 0 },
3943 { "cpu", s_arm_cpu, 0 },
3944 { "arch", s_arm_arch, 0 },
3945 { "object_arch", s_arm_object_arch, 0 },
3946 { "fpu", s_arm_fpu, 0 },
3947 #ifdef OBJ_ELF
3948 { "word", s_arm_elf_cons, 4 },
3949 { "long", s_arm_elf_cons, 4 },
3950 { "rel31", s_arm_rel31, 0 },
3951 { "fnstart", s_arm_unwind_fnstart, 0 },
3952 { "fnend", s_arm_unwind_fnend, 0 },
3953 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3954 { "personality", s_arm_unwind_personality, 0 },
3955 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3956 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3957 { "save", s_arm_unwind_save, 0 },
3958 { "vsave", s_arm_unwind_save, 1 },
3959 { "movsp", s_arm_unwind_movsp, 0 },
3960 { "pad", s_arm_unwind_pad, 0 },
3961 { "setfp", s_arm_unwind_setfp, 0 },
3962 { "unwind_raw", s_arm_unwind_raw, 0 },
3963 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3964 #else
3965 { "word", cons, 4},
3967 /* These are used for dwarf. */
3968 {"2byte", cons, 2},
3969 {"4byte", cons, 4},
3970 {"8byte", cons, 8},
3971 /* These are used for dwarf2. */
3972 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3973 { "loc", dwarf2_directive_loc, 0 },
3974 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3975 #endif
3976 { "extend", float_cons, 'x' },
3977 { "ldouble", float_cons, 'x' },
3978 { "packed", float_cons, 'p' },
3979 #ifdef TE_PE
3980 {"secrel32", pe_directive_secrel, 0},
3981 #endif
3982 { 0, 0, 0 }
3985 /* Parser functions used exclusively in instruction operands. */
3987 /* Generic immediate-value read function for use in insn parsing.
3988 STR points to the beginning of the immediate (the leading #);
3989 VAL receives the value; if the value is outside [MIN, MAX]
3990 issue an error. PREFIX_OPT is true if the immediate prefix is
3991 optional. */
3993 static int
3994 parse_immediate (char **str, int *val, int min, int max,
3995 bfd_boolean prefix_opt)
3997 expressionS exp;
3998 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3999 if (exp.X_op != O_constant)
4001 inst.error = _("constant expression required");
4002 return FAIL;
4005 if (exp.X_add_number < min || exp.X_add_number > max)
4007 inst.error = _("immediate value out of range");
4008 return FAIL;
4011 *val = exp.X_add_number;
4012 return SUCCESS;
4015 /* Less-generic immediate-value read function with the possibility of loading a
4016 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4017 instructions. Puts the result directly in inst.operands[i]. */
4019 static int
4020 parse_big_immediate (char **str, int i)
4022 expressionS exp;
4023 char *ptr = *str;
4025 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4027 if (exp.X_op == O_constant)
4029 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4030 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4031 O_constant. We have to be careful not to break compilation for
4032 32-bit X_add_number, though. */
4033 if ((exp.X_add_number & ~0xffffffffl) != 0)
4035 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4036 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4037 inst.operands[i].regisimm = 1;
4040 else if (exp.X_op == O_big
4041 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4042 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4044 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4045 /* Bignums have their least significant bits in
4046 generic_bignum[0]. Make sure we put 32 bits in imm and
4047 32 bits in reg, in a (hopefully) portable way. */
4048 assert (parts != 0);
4049 inst.operands[i].imm = 0;
4050 for (j = 0; j < parts; j++, idx++)
4051 inst.operands[i].imm |= generic_bignum[idx]
4052 << (LITTLENUM_NUMBER_OF_BITS * j);
4053 inst.operands[i].reg = 0;
4054 for (j = 0; j < parts; j++, idx++)
4055 inst.operands[i].reg |= generic_bignum[idx]
4056 << (LITTLENUM_NUMBER_OF_BITS * j);
4057 inst.operands[i].regisimm = 1;
4059 else
4060 return FAIL;
4062 *str = ptr;
4064 return SUCCESS;
4067 /* Returns the pseudo-register number of an FPA immediate constant,
4068 or FAIL if there isn't a valid constant here. */
4070 static int
4071 parse_fpa_immediate (char ** str)
4073 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4074 char * save_in;
4075 expressionS exp;
4076 int i;
4077 int j;
4079 /* First try and match exact strings, this is to guarantee
4080 that some formats will work even for cross assembly. */
4082 for (i = 0; fp_const[i]; i++)
4084 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4086 char *start = *str;
4088 *str += strlen (fp_const[i]);
4089 if (is_end_of_line[(unsigned char) **str])
4090 return i + 8;
4091 *str = start;
4095 /* Just because we didn't get a match doesn't mean that the constant
4096 isn't valid, just that it is in a format that we don't
4097 automatically recognize. Try parsing it with the standard
4098 expression routines. */
4100 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4102 /* Look for a raw floating point number. */
4103 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4104 && is_end_of_line[(unsigned char) *save_in])
4106 for (i = 0; i < NUM_FLOAT_VALS; i++)
4108 for (j = 0; j < MAX_LITTLENUMS; j++)
4110 if (words[j] != fp_values[i][j])
4111 break;
4114 if (j == MAX_LITTLENUMS)
4116 *str = save_in;
4117 return i + 8;
4122 /* Try and parse a more complex expression, this will probably fail
4123 unless the code uses a floating point prefix (eg "0f"). */
4124 save_in = input_line_pointer;
4125 input_line_pointer = *str;
4126 if (expression (&exp) == absolute_section
4127 && exp.X_op == O_big
4128 && exp.X_add_number < 0)
4130 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4131 Ditto for 15. */
4132 if (gen_to_words (words, 5, (long) 15) == 0)
4134 for (i = 0; i < NUM_FLOAT_VALS; i++)
4136 for (j = 0; j < MAX_LITTLENUMS; j++)
4138 if (words[j] != fp_values[i][j])
4139 break;
4142 if (j == MAX_LITTLENUMS)
4144 *str = input_line_pointer;
4145 input_line_pointer = save_in;
4146 return i + 8;
4152 *str = input_line_pointer;
4153 input_line_pointer = save_in;
4154 inst.error = _("invalid FPA immediate expression");
4155 return FAIL;
4158 /* Returns 1 if a number has "quarter-precision" float format
4159 0baBbbbbbc defgh000 00000000 00000000. */
4161 static int
4162 is_quarter_float (unsigned imm)
4164 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4165 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4168 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4169 0baBbbbbbc defgh000 00000000 00000000.
4170 The zero and minus-zero cases need special handling, since they can't be
4171 encoded in the "quarter-precision" float format, but can nonetheless be
4172 loaded as integer constants. */
4174 static unsigned
4175 parse_qfloat_immediate (char **ccp, int *immed)
4177 char *str = *ccp;
4178 char *fpnum;
4179 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4180 int found_fpchar = 0;
4182 skip_past_char (&str, '#');
4184 /* We must not accidentally parse an integer as a floating-point number. Make
4185 sure that the value we parse is not an integer by checking for special
4186 characters '.' or 'e'.
4187 FIXME: This is a horrible hack, but doing better is tricky because type
4188 information isn't in a very usable state at parse time. */
4189 fpnum = str;
4190 skip_whitespace (fpnum);
4192 if (strncmp (fpnum, "0x", 2) == 0)
4193 return FAIL;
4194 else
4196 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4197 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4199 found_fpchar = 1;
4200 break;
4203 if (!found_fpchar)
4204 return FAIL;
4207 if ((str = atof_ieee (str, 's', words)) != NULL)
4209 unsigned fpword = 0;
4210 int i;
4212 /* Our FP word must be 32 bits (single-precision FP). */
4213 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4215 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4216 fpword |= words[i];
4219 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4220 *immed = fpword;
4221 else
4222 return FAIL;
4224 *ccp = str;
4226 return SUCCESS;
4229 return FAIL;
4232 /* Shift operands. */
4233 enum shift_kind
4235 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4238 struct asm_shift_name
4240 const char *name;
4241 enum shift_kind kind;
4244 /* Third argument to parse_shift. */
4245 enum parse_shift_mode
4247 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4248 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4249 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4250 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4251 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4254 /* Parse a <shift> specifier on an ARM data processing instruction.
4255 This has three forms:
4257 (LSL|LSR|ASL|ASR|ROR) Rs
4258 (LSL|LSR|ASL|ASR|ROR) #imm
4261 Note that ASL is assimilated to LSL in the instruction encoding, and
4262 RRX to ROR #0 (which cannot be written as such). */
4264 static int
4265 parse_shift (char **str, int i, enum parse_shift_mode mode)
4267 const struct asm_shift_name *shift_name;
4268 enum shift_kind shift;
4269 char *s = *str;
4270 char *p = s;
4271 int reg;
4273 for (p = *str; ISALPHA (*p); p++)
4276 if (p == *str)
4278 inst.error = _("shift expression expected");
4279 return FAIL;
4282 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4284 if (shift_name == NULL)
4286 inst.error = _("shift expression expected");
4287 return FAIL;
4290 shift = shift_name->kind;
4292 switch (mode)
4294 case NO_SHIFT_RESTRICT:
4295 case SHIFT_IMMEDIATE: break;
4297 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4298 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4300 inst.error = _("'LSL' or 'ASR' required");
4301 return FAIL;
4303 break;
4305 case SHIFT_LSL_IMMEDIATE:
4306 if (shift != SHIFT_LSL)
4308 inst.error = _("'LSL' required");
4309 return FAIL;
4311 break;
4313 case SHIFT_ASR_IMMEDIATE:
4314 if (shift != SHIFT_ASR)
4316 inst.error = _("'ASR' required");
4317 return FAIL;
4319 break;
4321 default: abort ();
4324 if (shift != SHIFT_RRX)
4326 /* Whitespace can appear here if the next thing is a bare digit. */
4327 skip_whitespace (p);
4329 if (mode == NO_SHIFT_RESTRICT
4330 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4332 inst.operands[i].imm = reg;
4333 inst.operands[i].immisreg = 1;
4335 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4336 return FAIL;
4338 inst.operands[i].shift_kind = shift;
4339 inst.operands[i].shifted = 1;
4340 *str = p;
4341 return SUCCESS;
4344 /* Parse a <shifter_operand> for an ARM data processing instruction:
4346 #<immediate>
4347 #<immediate>, <rotate>
4348 <Rm>
4349 <Rm>, <shift>
4351 where <shift> is defined by parse_shift above, and <rotate> is a
4352 multiple of 2 between 0 and 30. Validation of immediate operands
4353 is deferred to md_apply_fix. */
4355 static int
4356 parse_shifter_operand (char **str, int i)
4358 int value;
4359 expressionS expr;
4361 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4363 inst.operands[i].reg = value;
4364 inst.operands[i].isreg = 1;
4366 /* parse_shift will override this if appropriate */
4367 inst.reloc.exp.X_op = O_constant;
4368 inst.reloc.exp.X_add_number = 0;
4370 if (skip_past_comma (str) == FAIL)
4371 return SUCCESS;
4373 /* Shift operation on register. */
4374 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4377 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4378 return FAIL;
4380 if (skip_past_comma (str) == SUCCESS)
4382 /* #x, y -- ie explicit rotation by Y. */
4383 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4384 return FAIL;
4386 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4388 inst.error = _("constant expression expected");
4389 return FAIL;
4392 value = expr.X_add_number;
4393 if (value < 0 || value > 30 || value % 2 != 0)
4395 inst.error = _("invalid rotation");
4396 return FAIL;
4398 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4400 inst.error = _("invalid constant");
4401 return FAIL;
4404 /* Convert to decoded value. md_apply_fix will put it back. */
4405 inst.reloc.exp.X_add_number
4406 = (((inst.reloc.exp.X_add_number << (32 - value))
4407 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4410 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4411 inst.reloc.pc_rel = 0;
4412 return SUCCESS;
4415 /* Group relocation information. Each entry in the table contains the
4416 textual name of the relocation as may appear in assembler source
4417 and must end with a colon.
4418 Along with this textual name are the relocation codes to be used if
4419 the corresponding instruction is an ALU instruction (ADD or SUB only),
4420 an LDR, an LDRS, or an LDC. */
4422 struct group_reloc_table_entry
4424 const char *name;
4425 int alu_code;
4426 int ldr_code;
4427 int ldrs_code;
4428 int ldc_code;
4431 typedef enum
4433 /* Varieties of non-ALU group relocation. */
4435 GROUP_LDR,
4436 GROUP_LDRS,
4437 GROUP_LDC
4438 } group_reloc_type;
4440 static struct group_reloc_table_entry group_reloc_table[] =
4441 { /* Program counter relative: */
4442 { "pc_g0_nc",
4443 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4444 0, /* LDR */
4445 0, /* LDRS */
4446 0 }, /* LDC */
4447 { "pc_g0",
4448 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4449 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4450 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4451 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4452 { "pc_g1_nc",
4453 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4454 0, /* LDR */
4455 0, /* LDRS */
4456 0 }, /* LDC */
4457 { "pc_g1",
4458 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4459 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4460 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4461 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4462 { "pc_g2",
4463 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4464 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4465 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4466 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4467 /* Section base relative */
4468 { "sb_g0_nc",
4469 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4470 0, /* LDR */
4471 0, /* LDRS */
4472 0 }, /* LDC */
4473 { "sb_g0",
4474 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4475 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4476 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4477 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4478 { "sb_g1_nc",
4479 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4480 0, /* LDR */
4481 0, /* LDRS */
4482 0 }, /* LDC */
4483 { "sb_g1",
4484 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4485 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4486 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4487 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4488 { "sb_g2",
4489 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4490 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4491 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4492 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4494 /* Given the address of a pointer pointing to the textual name of a group
4495 relocation as may appear in assembler source, attempt to find its details
4496 in group_reloc_table. The pointer will be updated to the character after
4497 the trailing colon. On failure, FAIL will be returned; SUCCESS
4498 otherwise. On success, *entry will be updated to point at the relevant
4499 group_reloc_table entry. */
4501 static int
4502 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4504 unsigned int i;
4505 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4507 int length = strlen (group_reloc_table[i].name);
4509 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4510 && (*str)[length] == ':')
4512 *out = &group_reloc_table[i];
4513 *str += (length + 1);
4514 return SUCCESS;
4518 return FAIL;
4521 /* Parse a <shifter_operand> for an ARM data processing instruction
4522 (as for parse_shifter_operand) where group relocations are allowed:
4524 #<immediate>
4525 #<immediate>, <rotate>
4526 #:<group_reloc>:<expression>
4527 <Rm>
4528 <Rm>, <shift>
4530 where <group_reloc> is one of the strings defined in group_reloc_table.
4531 The hashes are optional.
4533 Everything else is as for parse_shifter_operand. */
4535 static parse_operand_result
4536 parse_shifter_operand_group_reloc (char **str, int i)
4538 /* Determine if we have the sequence of characters #: or just :
4539 coming next. If we do, then we check for a group relocation.
4540 If we don't, punt the whole lot to parse_shifter_operand. */
4542 if (((*str)[0] == '#' && (*str)[1] == ':')
4543 || (*str)[0] == ':')
4545 struct group_reloc_table_entry *entry;
4547 if ((*str)[0] == '#')
4548 (*str) += 2;
4549 else
4550 (*str)++;
4552 /* Try to parse a group relocation. Anything else is an error. */
4553 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4555 inst.error = _("unknown group relocation");
4556 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4559 /* We now have the group relocation table entry corresponding to
4560 the name in the assembler source. Next, we parse the expression. */
4561 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4562 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4564 /* Record the relocation type (always the ALU variant here). */
4565 inst.reloc.type = entry->alu_code;
4566 assert (inst.reloc.type != 0);
4568 return PARSE_OPERAND_SUCCESS;
4570 else
4571 return parse_shifter_operand (str, i) == SUCCESS
4572 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4574 /* Never reached. */
4577 /* Parse all forms of an ARM address expression. Information is written
4578 to inst.operands[i] and/or inst.reloc.
4580 Preindexed addressing (.preind=1):
4582 [Rn, #offset] .reg=Rn .reloc.exp=offset
4583 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4584 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4585 .shift_kind=shift .reloc.exp=shift_imm
4587 These three may have a trailing ! which causes .writeback to be set also.
4589 Postindexed addressing (.postind=1, .writeback=1):
4591 [Rn], #offset .reg=Rn .reloc.exp=offset
4592 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4593 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4594 .shift_kind=shift .reloc.exp=shift_imm
4596 Unindexed addressing (.preind=0, .postind=0):
4598 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4600 Other:
4602 [Rn]{!} shorthand for [Rn,#0]{!}
4603 =immediate .isreg=0 .reloc.exp=immediate
4604 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4606 It is the caller's responsibility to check for addressing modes not
4607 supported by the instruction, and to set inst.reloc.type. */
4609 static parse_operand_result
4610 parse_address_main (char **str, int i, int group_relocations,
4611 group_reloc_type group_type)
4613 char *p = *str;
4614 int reg;
4616 if (skip_past_char (&p, '[') == FAIL)
4618 if (skip_past_char (&p, '=') == FAIL)
4620 /* bare address - translate to PC-relative offset */
4621 inst.reloc.pc_rel = 1;
4622 inst.operands[i].reg = REG_PC;
4623 inst.operands[i].isreg = 1;
4624 inst.operands[i].preind = 1;
4626 /* else a load-constant pseudo op, no special treatment needed here */
4628 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4629 return PARSE_OPERAND_FAIL;
4631 *str = p;
4632 return PARSE_OPERAND_SUCCESS;
4635 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4637 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4638 return PARSE_OPERAND_FAIL;
4640 inst.operands[i].reg = reg;
4641 inst.operands[i].isreg = 1;
4643 if (skip_past_comma (&p) == SUCCESS)
4645 inst.operands[i].preind = 1;
4647 if (*p == '+') p++;
4648 else if (*p == '-') p++, inst.operands[i].negative = 1;
4650 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4652 inst.operands[i].imm = reg;
4653 inst.operands[i].immisreg = 1;
4655 if (skip_past_comma (&p) == SUCCESS)
4656 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4657 return PARSE_OPERAND_FAIL;
4659 else if (skip_past_char (&p, ':') == SUCCESS)
4661 /* FIXME: '@' should be used here, but it's filtered out by generic
4662 code before we get to see it here. This may be subject to
4663 change. */
4664 expressionS exp;
4665 my_get_expression (&exp, &p, GE_NO_PREFIX);
4666 if (exp.X_op != O_constant)
4668 inst.error = _("alignment must be constant");
4669 return PARSE_OPERAND_FAIL;
4671 inst.operands[i].imm = exp.X_add_number << 8;
4672 inst.operands[i].immisalign = 1;
4673 /* Alignments are not pre-indexes. */
4674 inst.operands[i].preind = 0;
4676 else
4678 if (inst.operands[i].negative)
4680 inst.operands[i].negative = 0;
4681 p--;
4684 if (group_relocations
4685 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4687 struct group_reloc_table_entry *entry;
4689 /* Skip over the #: or : sequence. */
4690 if (*p == '#')
4691 p += 2;
4692 else
4693 p++;
4695 /* Try to parse a group relocation. Anything else is an
4696 error. */
4697 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4699 inst.error = _("unknown group relocation");
4700 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4703 /* We now have the group relocation table entry corresponding to
4704 the name in the assembler source. Next, we parse the
4705 expression. */
4706 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4707 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4709 /* Record the relocation type. */
4710 switch (group_type)
4712 case GROUP_LDR:
4713 inst.reloc.type = entry->ldr_code;
4714 break;
4716 case GROUP_LDRS:
4717 inst.reloc.type = entry->ldrs_code;
4718 break;
4720 case GROUP_LDC:
4721 inst.reloc.type = entry->ldc_code;
4722 break;
4724 default:
4725 assert (0);
4728 if (inst.reloc.type == 0)
4730 inst.error = _("this group relocation is not allowed on this instruction");
4731 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4734 else
4735 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4736 return PARSE_OPERAND_FAIL;
4740 if (skip_past_char (&p, ']') == FAIL)
4742 inst.error = _("']' expected");
4743 return PARSE_OPERAND_FAIL;
4746 if (skip_past_char (&p, '!') == SUCCESS)
4747 inst.operands[i].writeback = 1;
4749 else if (skip_past_comma (&p) == SUCCESS)
4751 if (skip_past_char (&p, '{') == SUCCESS)
4753 /* [Rn], {expr} - unindexed, with option */
4754 if (parse_immediate (&p, &inst.operands[i].imm,
4755 0, 255, TRUE) == FAIL)
4756 return PARSE_OPERAND_FAIL;
4758 if (skip_past_char (&p, '}') == FAIL)
4760 inst.error = _("'}' expected at end of 'option' field");
4761 return PARSE_OPERAND_FAIL;
4763 if (inst.operands[i].preind)
4765 inst.error = _("cannot combine index with option");
4766 return PARSE_OPERAND_FAIL;
4768 *str = p;
4769 return PARSE_OPERAND_SUCCESS;
4771 else
4773 inst.operands[i].postind = 1;
4774 inst.operands[i].writeback = 1;
4776 if (inst.operands[i].preind)
4778 inst.error = _("cannot combine pre- and post-indexing");
4779 return PARSE_OPERAND_FAIL;
4782 if (*p == '+') p++;
4783 else if (*p == '-') p++, inst.operands[i].negative = 1;
4785 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4787 /* We might be using the immediate for alignment already. If we
4788 are, OR the register number into the low-order bits. */
4789 if (inst.operands[i].immisalign)
4790 inst.operands[i].imm |= reg;
4791 else
4792 inst.operands[i].imm = reg;
4793 inst.operands[i].immisreg = 1;
4795 if (skip_past_comma (&p) == SUCCESS)
4796 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4797 return PARSE_OPERAND_FAIL;
4799 else
4801 if (inst.operands[i].negative)
4803 inst.operands[i].negative = 0;
4804 p--;
4806 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4807 return PARSE_OPERAND_FAIL;
4812 /* If at this point neither .preind nor .postind is set, we have a
4813 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4814 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4816 inst.operands[i].preind = 1;
4817 inst.reloc.exp.X_op = O_constant;
4818 inst.reloc.exp.X_add_number = 0;
4820 *str = p;
4821 return PARSE_OPERAND_SUCCESS;
4824 static int
4825 parse_address (char **str, int i)
4827 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4828 ? SUCCESS : FAIL;
4831 static parse_operand_result
4832 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4834 return parse_address_main (str, i, 1, type);
4837 /* Parse an operand for a MOVW or MOVT instruction. */
4838 static int
4839 parse_half (char **str)
4841 char * p;
4843 p = *str;
4844 skip_past_char (&p, '#');
4845 if (strncasecmp (p, ":lower16:", 9) == 0)
4846 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4847 else if (strncasecmp (p, ":upper16:", 9) == 0)
4848 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4850 if (inst.reloc.type != BFD_RELOC_UNUSED)
4852 p += 9;
4853 skip_whitespace (p);
4856 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4857 return FAIL;
4859 if (inst.reloc.type == BFD_RELOC_UNUSED)
4861 if (inst.reloc.exp.X_op != O_constant)
4863 inst.error = _("constant expression expected");
4864 return FAIL;
4866 if (inst.reloc.exp.X_add_number < 0
4867 || inst.reloc.exp.X_add_number > 0xffff)
4869 inst.error = _("immediate value out of range");
4870 return FAIL;
4873 *str = p;
4874 return SUCCESS;
4877 /* Miscellaneous. */
4879 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4880 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4881 static int
4882 parse_psr (char **str)
4884 char *p;
4885 unsigned long psr_field;
4886 const struct asm_psr *psr;
4887 char *start;
4889 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4890 feature for ease of use and backwards compatibility. */
4891 p = *str;
4892 if (strncasecmp (p, "SPSR", 4) == 0)
4893 psr_field = SPSR_BIT;
4894 else if (strncasecmp (p, "CPSR", 4) == 0)
4895 psr_field = 0;
4896 else
4898 start = p;
4900 p++;
4901 while (ISALNUM (*p) || *p == '_');
4903 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4904 if (!psr)
4905 return FAIL;
4907 *str = p;
4908 return psr->field;
4911 p += 4;
4912 if (*p == '_')
4914 /* A suffix follows. */
4915 p++;
4916 start = p;
4919 p++;
4920 while (ISALNUM (*p) || *p == '_');
4922 psr = hash_find_n (arm_psr_hsh, start, p - start);
4923 if (!psr)
4924 goto error;
4926 psr_field |= psr->field;
4928 else
4930 if (ISALNUM (*p))
4931 goto error; /* Garbage after "[CS]PSR". */
4933 psr_field |= (PSR_c | PSR_f);
4935 *str = p;
4936 return psr_field;
4938 error:
4939 inst.error = _("flag for {c}psr instruction expected");
4940 return FAIL;
4943 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4944 value suitable for splatting into the AIF field of the instruction. */
4946 static int
4947 parse_cps_flags (char **str)
4949 int val = 0;
4950 int saw_a_flag = 0;
4951 char *s = *str;
4953 for (;;)
4954 switch (*s++)
4956 case '\0': case ',':
4957 goto done;
4959 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4960 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4961 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4963 default:
4964 inst.error = _("unrecognized CPS flag");
4965 return FAIL;
4968 done:
4969 if (saw_a_flag == 0)
4971 inst.error = _("missing CPS flags");
4972 return FAIL;
4975 *str = s - 1;
4976 return val;
4979 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4980 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4982 static int
4983 parse_endian_specifier (char **str)
4985 int little_endian;
4986 char *s = *str;
4988 if (strncasecmp (s, "BE", 2))
4989 little_endian = 0;
4990 else if (strncasecmp (s, "LE", 2))
4991 little_endian = 1;
4992 else
4994 inst.error = _("valid endian specifiers are be or le");
4995 return FAIL;
4998 if (ISALNUM (s[2]) || s[2] == '_')
5000 inst.error = _("valid endian specifiers are be or le");
5001 return FAIL;
5004 *str = s + 2;
5005 return little_endian;
5008 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5009 value suitable for poking into the rotate field of an sxt or sxta
5010 instruction, or FAIL on error. */
5012 static int
5013 parse_ror (char **str)
5015 int rot;
5016 char *s = *str;
5018 if (strncasecmp (s, "ROR", 3) == 0)
5019 s += 3;
5020 else
5022 inst.error = _("missing rotation field after comma");
5023 return FAIL;
5026 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5027 return FAIL;
5029 switch (rot)
5031 case 0: *str = s; return 0x0;
5032 case 8: *str = s; return 0x1;
5033 case 16: *str = s; return 0x2;
5034 case 24: *str = s; return 0x3;
5036 default:
5037 inst.error = _("rotation can only be 0, 8, 16, or 24");
5038 return FAIL;
5042 /* Parse a conditional code (from conds[] below). The value returned is in the
5043 range 0 .. 14, or FAIL. */
5044 static int
5045 parse_cond (char **str)
5047 char *q;
5048 const struct asm_cond *c;
5049 int n;
5050 /* Condition codes are always 2 characters, so matching up to
5051 3 characters is sufficient. */
5052 char cond[3];
5054 q = *str;
5055 n = 0;
5056 while (ISALPHA (*q) && n < 3)
5058 cond[n] = TOLOWER(*q);
5059 q++;
5060 n++;
5063 c = hash_find_n (arm_cond_hsh, cond, n);
5064 if (!c)
5066 inst.error = _("condition required");
5067 return FAIL;
5070 *str = q;
5071 return c->value;
5074 /* Parse an option for a barrier instruction. Returns the encoding for the
5075 option, or FAIL. */
5076 static int
5077 parse_barrier (char **str)
5079 char *p, *q;
5080 const struct asm_barrier_opt *o;
5082 p = q = *str;
5083 while (ISALPHA (*q))
5084 q++;
5086 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5087 if (!o)
5088 return FAIL;
5090 *str = q;
5091 return o->value;
5094 /* Parse the operands of a table branch instruction. Similar to a memory
5095 operand. */
5096 static int
5097 parse_tb (char **str)
5099 char * p = *str;
5100 int reg;
5102 if (skip_past_char (&p, '[') == FAIL)
5104 inst.error = _("'[' expected");
5105 return FAIL;
5108 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5110 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5111 return FAIL;
5113 inst.operands[0].reg = reg;
5115 if (skip_past_comma (&p) == FAIL)
5117 inst.error = _("',' expected");
5118 return FAIL;
5121 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5123 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5124 return FAIL;
5126 inst.operands[0].imm = reg;
5128 if (skip_past_comma (&p) == SUCCESS)
5130 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5131 return FAIL;
5132 if (inst.reloc.exp.X_add_number != 1)
5134 inst.error = _("invalid shift");
5135 return FAIL;
5137 inst.operands[0].shifted = 1;
5140 if (skip_past_char (&p, ']') == FAIL)
5142 inst.error = _("']' expected");
5143 return FAIL;
5145 *str = p;
5146 return SUCCESS;
5149 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5150 information on the types the operands can take and how they are encoded.
5151 Up to four operands may be read; this function handles setting the
5152 ".present" field for each read operand itself.
5153 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5154 else returns FAIL. */
5156 static int
5157 parse_neon_mov (char **str, int *which_operand)
5159 int i = *which_operand, val;
5160 enum arm_reg_type rtype;
5161 char *ptr = *str;
5162 struct neon_type_el optype;
5164 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5166 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5167 inst.operands[i].reg = val;
5168 inst.operands[i].isscalar = 1;
5169 inst.operands[i].vectype = optype;
5170 inst.operands[i++].present = 1;
5172 if (skip_past_comma (&ptr) == FAIL)
5173 goto wanted_comma;
5175 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5176 goto wanted_arm;
5178 inst.operands[i].reg = val;
5179 inst.operands[i].isreg = 1;
5180 inst.operands[i].present = 1;
5182 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5183 != FAIL)
5185 /* Cases 0, 1, 2, 3, 5 (D only). */
5186 if (skip_past_comma (&ptr) == FAIL)
5187 goto wanted_comma;
5189 inst.operands[i].reg = val;
5190 inst.operands[i].isreg = 1;
5191 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5192 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5193 inst.operands[i].isvec = 1;
5194 inst.operands[i].vectype = optype;
5195 inst.operands[i++].present = 1;
5197 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5199 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5200 Case 13: VMOV <Sd>, <Rm> */
5201 inst.operands[i].reg = val;
5202 inst.operands[i].isreg = 1;
5203 inst.operands[i].present = 1;
5205 if (rtype == REG_TYPE_NQ)
5207 first_error (_("can't use Neon quad register here"));
5208 return FAIL;
5210 else if (rtype != REG_TYPE_VFS)
5212 i++;
5213 if (skip_past_comma (&ptr) == FAIL)
5214 goto wanted_comma;
5215 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5216 goto wanted_arm;
5217 inst.operands[i].reg = val;
5218 inst.operands[i].isreg = 1;
5219 inst.operands[i].present = 1;
5222 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5223 &optype)) != FAIL)
5225 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5226 Case 1: VMOV<c><q> <Dd>, <Dm>
5227 Case 8: VMOV.F32 <Sd>, <Sm>
5228 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5230 inst.operands[i].reg = val;
5231 inst.operands[i].isreg = 1;
5232 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5233 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5234 inst.operands[i].isvec = 1;
5235 inst.operands[i].vectype = optype;
5236 inst.operands[i].present = 1;
5238 if (skip_past_comma (&ptr) == SUCCESS)
5240 /* Case 15. */
5241 i++;
5243 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5244 goto wanted_arm;
5246 inst.operands[i].reg = val;
5247 inst.operands[i].isreg = 1;
5248 inst.operands[i++].present = 1;
5250 if (skip_past_comma (&ptr) == FAIL)
5251 goto wanted_comma;
5253 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5254 goto wanted_arm;
5256 inst.operands[i].reg = val;
5257 inst.operands[i].isreg = 1;
5258 inst.operands[i++].present = 1;
5261 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5262 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5263 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5264 Case 10: VMOV.F32 <Sd>, #<imm>
5265 Case 11: VMOV.F64 <Dd>, #<imm> */
5266 inst.operands[i].immisfloat = 1;
5267 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5268 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5269 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5271 else
5273 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5274 return FAIL;
5277 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5279 /* Cases 6, 7. */
5280 inst.operands[i].reg = val;
5281 inst.operands[i].isreg = 1;
5282 inst.operands[i++].present = 1;
5284 if (skip_past_comma (&ptr) == FAIL)
5285 goto wanted_comma;
5287 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5289 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5290 inst.operands[i].reg = val;
5291 inst.operands[i].isscalar = 1;
5292 inst.operands[i].present = 1;
5293 inst.operands[i].vectype = optype;
5295 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5297 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5298 inst.operands[i].reg = val;
5299 inst.operands[i].isreg = 1;
5300 inst.operands[i++].present = 1;
5302 if (skip_past_comma (&ptr) == FAIL)
5303 goto wanted_comma;
5305 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5306 == FAIL)
5308 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5309 return FAIL;
5312 inst.operands[i].reg = val;
5313 inst.operands[i].isreg = 1;
5314 inst.operands[i].isvec = 1;
5315 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5316 inst.operands[i].vectype = optype;
5317 inst.operands[i].present = 1;
5319 if (rtype == REG_TYPE_VFS)
5321 /* Case 14. */
5322 i++;
5323 if (skip_past_comma (&ptr) == FAIL)
5324 goto wanted_comma;
5325 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5326 &optype)) == FAIL)
5328 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5329 return FAIL;
5331 inst.operands[i].reg = val;
5332 inst.operands[i].isreg = 1;
5333 inst.operands[i].isvec = 1;
5334 inst.operands[i].issingle = 1;
5335 inst.operands[i].vectype = optype;
5336 inst.operands[i].present = 1;
5339 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5340 != FAIL)
5342 /* Case 13. */
5343 inst.operands[i].reg = val;
5344 inst.operands[i].isreg = 1;
5345 inst.operands[i].isvec = 1;
5346 inst.operands[i].issingle = 1;
5347 inst.operands[i].vectype = optype;
5348 inst.operands[i++].present = 1;
5351 else
5353 first_error (_("parse error"));
5354 return FAIL;
5357 /* Successfully parsed the operands. Update args. */
5358 *which_operand = i;
5359 *str = ptr;
5360 return SUCCESS;
5362 wanted_comma:
5363 first_error (_("expected comma"));
5364 return FAIL;
5366 wanted_arm:
5367 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5368 return FAIL;
5371 /* Matcher codes for parse_operands. */
5372 enum operand_parse_code
5374 OP_stop, /* end of line */
5376 OP_RR, /* ARM register */
5377 OP_RRnpc, /* ARM register, not r15 */
5378 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5379 OP_RRw, /* ARM register, not r15, optional trailing ! */
5380 OP_RCP, /* Coprocessor number */
5381 OP_RCN, /* Coprocessor register */
5382 OP_RF, /* FPA register */
5383 OP_RVS, /* VFP single precision register */
5384 OP_RVD, /* VFP double precision register (0..15) */
5385 OP_RND, /* Neon double precision register (0..31) */
5386 OP_RNQ, /* Neon quad precision register */
5387 OP_RVSD, /* VFP single or double precision register */
5388 OP_RNDQ, /* Neon double or quad precision register */
5389 OP_RNSDQ, /* Neon single, double or quad precision register */
5390 OP_RNSC, /* Neon scalar D[X] */
5391 OP_RVC, /* VFP control register */
5392 OP_RMF, /* Maverick F register */
5393 OP_RMD, /* Maverick D register */
5394 OP_RMFX, /* Maverick FX register */
5395 OP_RMDX, /* Maverick DX register */
5396 OP_RMAX, /* Maverick AX register */
5397 OP_RMDS, /* Maverick DSPSC register */
5398 OP_RIWR, /* iWMMXt wR register */
5399 OP_RIWC, /* iWMMXt wC register */
5400 OP_RIWG, /* iWMMXt wCG register */
5401 OP_RXA, /* XScale accumulator register */
5403 OP_REGLST, /* ARM register list */
5404 OP_VRSLST, /* VFP single-precision register list */
5405 OP_VRDLST, /* VFP double-precision register list */
5406 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5407 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5408 OP_NSTRLST, /* Neon element/structure list */
5410 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5411 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5412 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5413 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5414 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5415 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5416 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5417 OP_VMOV, /* Neon VMOV operands. */
5418 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5419 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5420 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5422 OP_I0, /* immediate zero */
5423 OP_I7, /* immediate value 0 .. 7 */
5424 OP_I15, /* 0 .. 15 */
5425 OP_I16, /* 1 .. 16 */
5426 OP_I16z, /* 0 .. 16 */
5427 OP_I31, /* 0 .. 31 */
5428 OP_I31w, /* 0 .. 31, optional trailing ! */
5429 OP_I32, /* 1 .. 32 */
5430 OP_I32z, /* 0 .. 32 */
5431 OP_I63, /* 0 .. 63 */
5432 OP_I63s, /* -64 .. 63 */
5433 OP_I64, /* 1 .. 64 */
5434 OP_I64z, /* 0 .. 64 */
5435 OP_I255, /* 0 .. 255 */
5437 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5438 OP_I7b, /* 0 .. 7 */
5439 OP_I15b, /* 0 .. 15 */
5440 OP_I31b, /* 0 .. 31 */
5442 OP_SH, /* shifter operand */
5443 OP_SHG, /* shifter operand with possible group relocation */
5444 OP_ADDR, /* Memory address expression (any mode) */
5445 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5446 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5447 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5448 OP_EXP, /* arbitrary expression */
5449 OP_EXPi, /* same, with optional immediate prefix */
5450 OP_EXPr, /* same, with optional relocation suffix */
5451 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5453 OP_CPSF, /* CPS flags */
5454 OP_ENDI, /* Endianness specifier */
5455 OP_PSR, /* CPSR/SPSR mask for msr */
5456 OP_COND, /* conditional code */
5457 OP_TB, /* Table branch. */
5459 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5460 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5462 OP_RRnpc_I0, /* ARM register or literal 0 */
5463 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5464 OP_RR_EXi, /* ARM register or expression with imm prefix */
5465 OP_RF_IF, /* FPA register or immediate */
5466 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5467 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5469 /* Optional operands. */
5470 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5471 OP_oI31b, /* 0 .. 31 */
5472 OP_oI32b, /* 1 .. 32 */
5473 OP_oIffffb, /* 0 .. 65535 */
5474 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5476 OP_oRR, /* ARM register */
5477 OP_oRRnpc, /* ARM register, not the PC */
5478 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5479 OP_oRND, /* Optional Neon double precision register */
5480 OP_oRNQ, /* Optional Neon quad precision register */
5481 OP_oRNDQ, /* Optional Neon double or quad precision register */
5482 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5483 OP_oSHll, /* LSL immediate */
5484 OP_oSHar, /* ASR immediate */
5485 OP_oSHllar, /* LSL or ASR immediate */
5486 OP_oROR, /* ROR 0/8/16/24 */
5487 OP_oBARRIER, /* Option argument for a barrier instruction. */
5489 OP_FIRST_OPTIONAL = OP_oI7b
5492 /* Generic instruction operand parser. This does no encoding and no
5493 semantic validation; it merely squirrels values away in the inst
5494 structure. Returns SUCCESS or FAIL depending on whether the
5495 specified grammar matched. */
5496 static int
5497 parse_operands (char *str, const unsigned char *pattern)
5499 unsigned const char *upat = pattern;
5500 char *backtrack_pos = 0;
5501 const char *backtrack_error = 0;
5502 int i, val, backtrack_index = 0;
5503 enum arm_reg_type rtype;
5504 parse_operand_result result;
5506 #define po_char_or_fail(chr) do { \
5507 if (skip_past_char (&str, chr) == FAIL) \
5508 goto bad_args; \
5509 } while (0)
5511 #define po_reg_or_fail(regtype) do { \
5512 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5513 &inst.operands[i].vectype); \
5514 if (val == FAIL) \
5516 first_error (_(reg_expected_msgs[regtype])); \
5517 goto failure; \
5519 inst.operands[i].reg = val; \
5520 inst.operands[i].isreg = 1; \
5521 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5522 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5523 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5524 || rtype == REG_TYPE_VFD \
5525 || rtype == REG_TYPE_NQ); \
5526 } while (0)
5528 #define po_reg_or_goto(regtype, label) do { \
5529 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5530 &inst.operands[i].vectype); \
5531 if (val == FAIL) \
5532 goto label; \
5534 inst.operands[i].reg = val; \
5535 inst.operands[i].isreg = 1; \
5536 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5537 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5538 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5539 || rtype == REG_TYPE_VFD \
5540 || rtype == REG_TYPE_NQ); \
5541 } while (0)
5543 #define po_imm_or_fail(min, max, popt) do { \
5544 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5545 goto failure; \
5546 inst.operands[i].imm = val; \
5547 } while (0)
5549 #define po_scalar_or_goto(elsz, label) do { \
5550 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5551 if (val == FAIL) \
5552 goto label; \
5553 inst.operands[i].reg = val; \
5554 inst.operands[i].isscalar = 1; \
5555 } while (0)
5557 #define po_misc_or_fail(expr) do { \
5558 if (expr) \
5559 goto failure; \
5560 } while (0)
5562 #define po_misc_or_fail_no_backtrack(expr) do { \
5563 result = expr; \
5564 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5565 backtrack_pos = 0; \
5566 if (result != PARSE_OPERAND_SUCCESS) \
5567 goto failure; \
5568 } while (0)
5570 skip_whitespace (str);
5572 for (i = 0; upat[i] != OP_stop; i++)
5574 if (upat[i] >= OP_FIRST_OPTIONAL)
5576 /* Remember where we are in case we need to backtrack. */
5577 assert (!backtrack_pos);
5578 backtrack_pos = str;
5579 backtrack_error = inst.error;
5580 backtrack_index = i;
5583 if (i > 0 && (i > 1 || inst.operands[0].present))
5584 po_char_or_fail (',');
5586 switch (upat[i])
5588 /* Registers */
5589 case OP_oRRnpc:
5590 case OP_RRnpc:
5591 case OP_oRR:
5592 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5593 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5594 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5595 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5596 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5597 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5598 case OP_oRND:
5599 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5600 case OP_RVC:
5601 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5602 break;
5603 /* Also accept generic coprocessor regs for unknown registers. */
5604 coproc_reg:
5605 po_reg_or_fail (REG_TYPE_CN);
5606 break;
5607 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5608 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5609 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5610 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5611 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5612 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5613 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5614 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5615 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5616 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5617 case OP_oRNQ:
5618 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5619 case OP_oRNDQ:
5620 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5621 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5622 case OP_oRNSDQ:
5623 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5625 /* Neon scalar. Using an element size of 8 means that some invalid
5626 scalars are accepted here, so deal with those in later code. */
5627 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5629 /* WARNING: We can expand to two operands here. This has the potential
5630 to totally confuse the backtracking mechanism! It will be OK at
5631 least as long as we don't try to use optional args as well,
5632 though. */
5633 case OP_NILO:
5635 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5636 inst.operands[i].present = 1;
5637 i++;
5638 skip_past_comma (&str);
5639 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5640 break;
5641 one_reg_only:
5642 /* Optional register operand was omitted. Unfortunately, it's in
5643 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5644 here (this is a bit grotty). */
5645 inst.operands[i] = inst.operands[i-1];
5646 inst.operands[i-1].present = 0;
5647 break;
5648 try_imm:
5649 /* There's a possibility of getting a 64-bit immediate here, so
5650 we need special handling. */
5651 if (parse_big_immediate (&str, i) == FAIL)
5653 inst.error = _("immediate value is out of range");
5654 goto failure;
5657 break;
5659 case OP_RNDQ_I0:
5661 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5662 break;
5663 try_imm0:
5664 po_imm_or_fail (0, 0, TRUE);
5666 break;
5668 case OP_RVSD_I0:
5669 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5670 break;
5672 case OP_RR_RNSC:
5674 po_scalar_or_goto (8, try_rr);
5675 break;
5676 try_rr:
5677 po_reg_or_fail (REG_TYPE_RN);
5679 break;
5681 case OP_RNSDQ_RNSC:
5683 po_scalar_or_goto (8, try_nsdq);
5684 break;
5685 try_nsdq:
5686 po_reg_or_fail (REG_TYPE_NSDQ);
5688 break;
5690 case OP_RNDQ_RNSC:
5692 po_scalar_or_goto (8, try_ndq);
5693 break;
5694 try_ndq:
5695 po_reg_or_fail (REG_TYPE_NDQ);
5697 break;
5699 case OP_RND_RNSC:
5701 po_scalar_or_goto (8, try_vfd);
5702 break;
5703 try_vfd:
5704 po_reg_or_fail (REG_TYPE_VFD);
5706 break;
5708 case OP_VMOV:
5709 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5710 not careful then bad things might happen. */
5711 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5712 break;
5714 case OP_RNDQ_IMVNb:
5716 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5717 break;
5718 try_mvnimm:
5719 /* There's a possibility of getting a 64-bit immediate here, so
5720 we need special handling. */
5721 if (parse_big_immediate (&str, i) == FAIL)
5723 inst.error = _("immediate value is out of range");
5724 goto failure;
5727 break;
5729 case OP_RNDQ_I63b:
5731 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5732 break;
5733 try_shimm:
5734 po_imm_or_fail (0, 63, TRUE);
5736 break;
5738 case OP_RRnpcb:
5739 po_char_or_fail ('[');
5740 po_reg_or_fail (REG_TYPE_RN);
5741 po_char_or_fail (']');
5742 break;
5744 case OP_RRw:
5745 case OP_oRRw:
5746 po_reg_or_fail (REG_TYPE_RN);
5747 if (skip_past_char (&str, '!') == SUCCESS)
5748 inst.operands[i].writeback = 1;
5749 break;
5751 /* Immediates */
5752 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5753 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5754 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5755 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5756 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5757 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5758 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5759 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5760 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5761 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5762 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5763 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5765 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5766 case OP_oI7b:
5767 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5768 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5769 case OP_oI31b:
5770 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5771 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5772 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5774 /* Immediate variants */
5775 case OP_oI255c:
5776 po_char_or_fail ('{');
5777 po_imm_or_fail (0, 255, TRUE);
5778 po_char_or_fail ('}');
5779 break;
5781 case OP_I31w:
5782 /* The expression parser chokes on a trailing !, so we have
5783 to find it first and zap it. */
5785 char *s = str;
5786 while (*s && *s != ',')
5787 s++;
5788 if (s[-1] == '!')
5790 s[-1] = '\0';
5791 inst.operands[i].writeback = 1;
5793 po_imm_or_fail (0, 31, TRUE);
5794 if (str == s - 1)
5795 str = s;
5797 break;
5799 /* Expressions */
5800 case OP_EXPi: EXPi:
5801 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5802 GE_OPT_PREFIX));
5803 break;
5805 case OP_EXP:
5806 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5807 GE_NO_PREFIX));
5808 break;
5810 case OP_EXPr: EXPr:
5811 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5812 GE_NO_PREFIX));
5813 if (inst.reloc.exp.X_op == O_symbol)
5815 val = parse_reloc (&str);
5816 if (val == -1)
5818 inst.error = _("unrecognized relocation suffix");
5819 goto failure;
5821 else if (val != BFD_RELOC_UNUSED)
5823 inst.operands[i].imm = val;
5824 inst.operands[i].hasreloc = 1;
5827 break;
5829 /* Operand for MOVW or MOVT. */
5830 case OP_HALF:
5831 po_misc_or_fail (parse_half (&str));
5832 break;
5834 /* Register or expression */
5835 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5836 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5838 /* Register or immediate */
5839 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5840 I0: po_imm_or_fail (0, 0, FALSE); break;
5842 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5844 if (!is_immediate_prefix (*str))
5845 goto bad_args;
5846 str++;
5847 val = parse_fpa_immediate (&str);
5848 if (val == FAIL)
5849 goto failure;
5850 /* FPA immediates are encoded as registers 8-15.
5851 parse_fpa_immediate has already applied the offset. */
5852 inst.operands[i].reg = val;
5853 inst.operands[i].isreg = 1;
5854 break;
5856 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5857 I32z: po_imm_or_fail (0, 32, FALSE); break;
5859 /* Two kinds of register */
5860 case OP_RIWR_RIWC:
5862 struct reg_entry *rege = arm_reg_parse_multi (&str);
5863 if (!rege
5864 || (rege->type != REG_TYPE_MMXWR
5865 && rege->type != REG_TYPE_MMXWC
5866 && rege->type != REG_TYPE_MMXWCG))
5868 inst.error = _("iWMMXt data or control register expected");
5869 goto failure;
5871 inst.operands[i].reg = rege->number;
5872 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5874 break;
5876 case OP_RIWC_RIWG:
5878 struct reg_entry *rege = arm_reg_parse_multi (&str);
5879 if (!rege
5880 || (rege->type != REG_TYPE_MMXWC
5881 && rege->type != REG_TYPE_MMXWCG))
5883 inst.error = _("iWMMXt control register expected");
5884 goto failure;
5886 inst.operands[i].reg = rege->number;
5887 inst.operands[i].isreg = 1;
5889 break;
5891 /* Misc */
5892 case OP_CPSF: val = parse_cps_flags (&str); break;
5893 case OP_ENDI: val = parse_endian_specifier (&str); break;
5894 case OP_oROR: val = parse_ror (&str); break;
5895 case OP_PSR: val = parse_psr (&str); break;
5896 case OP_COND: val = parse_cond (&str); break;
5897 case OP_oBARRIER:val = parse_barrier (&str); break;
5899 case OP_RVC_PSR:
5900 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5901 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5902 break;
5903 try_psr:
5904 val = parse_psr (&str);
5905 break;
5907 case OP_APSR_RR:
5908 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5909 break;
5910 try_apsr:
5911 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5912 instruction). */
5913 if (strncasecmp (str, "APSR_", 5) == 0)
5915 unsigned found = 0;
5916 str += 5;
5917 while (found < 15)
5918 switch (*str++)
5920 case 'c': found = (found & 1) ? 16 : found | 1; break;
5921 case 'n': found = (found & 2) ? 16 : found | 2; break;
5922 case 'z': found = (found & 4) ? 16 : found | 4; break;
5923 case 'v': found = (found & 8) ? 16 : found | 8; break;
5924 default: found = 16;
5926 if (found != 15)
5927 goto failure;
5928 inst.operands[i].isvec = 1;
5930 else
5931 goto failure;
5932 break;
5934 case OP_TB:
5935 po_misc_or_fail (parse_tb (&str));
5936 break;
5938 /* Register lists */
5939 case OP_REGLST:
5940 val = parse_reg_list (&str);
5941 if (*str == '^')
5943 inst.operands[1].writeback = 1;
5944 str++;
5946 break;
5948 case OP_VRSLST:
5949 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5950 break;
5952 case OP_VRDLST:
5953 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5954 break;
5956 case OP_VRSDLST:
5957 /* Allow Q registers too. */
5958 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5959 REGLIST_NEON_D);
5960 if (val == FAIL)
5962 inst.error = NULL;
5963 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5964 REGLIST_VFP_S);
5965 inst.operands[i].issingle = 1;
5967 break;
5969 case OP_NRDLST:
5970 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5971 REGLIST_NEON_D);
5972 break;
5974 case OP_NSTRLST:
5975 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5976 &inst.operands[i].vectype);
5977 break;
5979 /* Addressing modes */
5980 case OP_ADDR:
5981 po_misc_or_fail (parse_address (&str, i));
5982 break;
5984 case OP_ADDRGLDR:
5985 po_misc_or_fail_no_backtrack (
5986 parse_address_group_reloc (&str, i, GROUP_LDR));
5987 break;
5989 case OP_ADDRGLDRS:
5990 po_misc_or_fail_no_backtrack (
5991 parse_address_group_reloc (&str, i, GROUP_LDRS));
5992 break;
5994 case OP_ADDRGLDC:
5995 po_misc_or_fail_no_backtrack (
5996 parse_address_group_reloc (&str, i, GROUP_LDC));
5997 break;
5999 case OP_SH:
6000 po_misc_or_fail (parse_shifter_operand (&str, i));
6001 break;
6003 case OP_SHG:
6004 po_misc_or_fail_no_backtrack (
6005 parse_shifter_operand_group_reloc (&str, i));
6006 break;
6008 case OP_oSHll:
6009 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6010 break;
6012 case OP_oSHar:
6013 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6014 break;
6016 case OP_oSHllar:
6017 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6018 break;
6020 default:
6021 as_fatal (_("unhandled operand code %d"), upat[i]);
6024 /* Various value-based sanity checks and shared operations. We
6025 do not signal immediate failures for the register constraints;
6026 this allows a syntax error to take precedence. */
6027 switch (upat[i])
6029 case OP_oRRnpc:
6030 case OP_RRnpc:
6031 case OP_RRnpcb:
6032 case OP_RRw:
6033 case OP_oRRw:
6034 case OP_RRnpc_I0:
6035 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6036 inst.error = BAD_PC;
6037 break;
6039 case OP_CPSF:
6040 case OP_ENDI:
6041 case OP_oROR:
6042 case OP_PSR:
6043 case OP_RVC_PSR:
6044 case OP_COND:
6045 case OP_oBARRIER:
6046 case OP_REGLST:
6047 case OP_VRSLST:
6048 case OP_VRDLST:
6049 case OP_VRSDLST:
6050 case OP_NRDLST:
6051 case OP_NSTRLST:
6052 if (val == FAIL)
6053 goto failure;
6054 inst.operands[i].imm = val;
6055 break;
6057 default:
6058 break;
6061 /* If we get here, this operand was successfully parsed. */
6062 inst.operands[i].present = 1;
6063 continue;
6065 bad_args:
6066 inst.error = BAD_ARGS;
6068 failure:
6069 if (!backtrack_pos)
6071 /* The parse routine should already have set inst.error, but set a
6072 default here just in case. */
6073 if (!inst.error)
6074 inst.error = _("syntax error");
6075 return FAIL;
6078 /* Do not backtrack over a trailing optional argument that
6079 absorbed some text. We will only fail again, with the
6080 'garbage following instruction' error message, which is
6081 probably less helpful than the current one. */
6082 if (backtrack_index == i && backtrack_pos != str
6083 && upat[i+1] == OP_stop)
6085 if (!inst.error)
6086 inst.error = _("syntax error");
6087 return FAIL;
6090 /* Try again, skipping the optional argument at backtrack_pos. */
6091 str = backtrack_pos;
6092 inst.error = backtrack_error;
6093 inst.operands[backtrack_index].present = 0;
6094 i = backtrack_index;
6095 backtrack_pos = 0;
6098 /* Check that we have parsed all the arguments. */
6099 if (*str != '\0' && !inst.error)
6100 inst.error = _("garbage following instruction");
6102 return inst.error ? FAIL : SUCCESS;
6105 #undef po_char_or_fail
6106 #undef po_reg_or_fail
6107 #undef po_reg_or_goto
6108 #undef po_imm_or_fail
6109 #undef po_scalar_or_fail
6111 /* Shorthand macro for instruction encoding functions issuing errors. */
6112 #define constraint(expr, err) do { \
6113 if (expr) \
6115 inst.error = err; \
6116 return; \
6118 } while (0)
6120 /* Functions for operand encoding. ARM, then Thumb. */
6122 #define rotate_left(v, n) (v << n | v >> (32 - n))
6124 /* If VAL can be encoded in the immediate field of an ARM instruction,
6125 return the encoded form. Otherwise, return FAIL. */
6127 static unsigned int
6128 encode_arm_immediate (unsigned int val)
6130 unsigned int a, i;
6132 for (i = 0; i < 32; i += 2)
6133 if ((a = rotate_left (val, i)) <= 0xff)
6134 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6136 return FAIL;
6139 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6140 return the encoded form. Otherwise, return FAIL. */
6141 static unsigned int
6142 encode_thumb32_immediate (unsigned int val)
6144 unsigned int a, i;
6146 if (val <= 0xff)
6147 return val;
6149 for (i = 1; i <= 24; i++)
6151 a = val >> i;
6152 if ((val & ~(0xff << i)) == 0)
6153 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6156 a = val & 0xff;
6157 if (val == ((a << 16) | a))
6158 return 0x100 | a;
6159 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6160 return 0x300 | a;
6162 a = val & 0xff00;
6163 if (val == ((a << 16) | a))
6164 return 0x200 | (a >> 8);
6166 return FAIL;
6168 /* Encode a VFP SP or DP register number into inst.instruction. */
6170 static void
6171 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6173 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6174 && reg > 15)
6176 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6178 if (thumb_mode)
6179 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6180 fpu_vfp_ext_d32);
6181 else
6182 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6183 fpu_vfp_ext_d32);
6185 else
6187 first_error (_("D register out of range for selected VFP version"));
6188 return;
6192 switch (pos)
6194 case VFP_REG_Sd:
6195 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6196 break;
6198 case VFP_REG_Sn:
6199 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6200 break;
6202 case VFP_REG_Sm:
6203 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6204 break;
6206 case VFP_REG_Dd:
6207 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6208 break;
6210 case VFP_REG_Dn:
6211 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6212 break;
6214 case VFP_REG_Dm:
6215 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6216 break;
6218 default:
6219 abort ();
6223 /* Encode a <shift> in an ARM-format instruction. The immediate,
6224 if any, is handled by md_apply_fix. */
6225 static void
6226 encode_arm_shift (int i)
6228 if (inst.operands[i].shift_kind == SHIFT_RRX)
6229 inst.instruction |= SHIFT_ROR << 5;
6230 else
6232 inst.instruction |= inst.operands[i].shift_kind << 5;
6233 if (inst.operands[i].immisreg)
6235 inst.instruction |= SHIFT_BY_REG;
6236 inst.instruction |= inst.operands[i].imm << 8;
6238 else
6239 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6243 static void
6244 encode_arm_shifter_operand (int i)
6246 if (inst.operands[i].isreg)
6248 inst.instruction |= inst.operands[i].reg;
6249 encode_arm_shift (i);
6251 else
6252 inst.instruction |= INST_IMMEDIATE;
6255 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6256 static void
6257 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6259 assert (inst.operands[i].isreg);
6260 inst.instruction |= inst.operands[i].reg << 16;
6262 if (inst.operands[i].preind)
6264 if (is_t)
6266 inst.error = _("instruction does not accept preindexed addressing");
6267 return;
6269 inst.instruction |= PRE_INDEX;
6270 if (inst.operands[i].writeback)
6271 inst.instruction |= WRITE_BACK;
6274 else if (inst.operands[i].postind)
6276 assert (inst.operands[i].writeback);
6277 if (is_t)
6278 inst.instruction |= WRITE_BACK;
6280 else /* unindexed - only for coprocessor */
6282 inst.error = _("instruction does not accept unindexed addressing");
6283 return;
6286 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6287 && (((inst.instruction & 0x000f0000) >> 16)
6288 == ((inst.instruction & 0x0000f000) >> 12)))
6289 as_warn ((inst.instruction & LOAD_BIT)
6290 ? _("destination register same as write-back base")
6291 : _("source register same as write-back base"));
6294 /* inst.operands[i] was set up by parse_address. Encode it into an
6295 ARM-format mode 2 load or store instruction. If is_t is true,
6296 reject forms that cannot be used with a T instruction (i.e. not
6297 post-indexed). */
6298 static void
6299 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6301 encode_arm_addr_mode_common (i, is_t);
6303 if (inst.operands[i].immisreg)
6305 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6306 inst.instruction |= inst.operands[i].imm;
6307 if (!inst.operands[i].negative)
6308 inst.instruction |= INDEX_UP;
6309 if (inst.operands[i].shifted)
6311 if (inst.operands[i].shift_kind == SHIFT_RRX)
6312 inst.instruction |= SHIFT_ROR << 5;
6313 else
6315 inst.instruction |= inst.operands[i].shift_kind << 5;
6316 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6320 else /* immediate offset in inst.reloc */
6322 if (inst.reloc.type == BFD_RELOC_UNUSED)
6323 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6327 /* inst.operands[i] was set up by parse_address. Encode it into an
6328 ARM-format mode 3 load or store instruction. Reject forms that
6329 cannot be used with such instructions. If is_t is true, reject
6330 forms that cannot be used with a T instruction (i.e. not
6331 post-indexed). */
6332 static void
6333 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6335 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6337 inst.error = _("instruction does not accept scaled register index");
6338 return;
6341 encode_arm_addr_mode_common (i, is_t);
6343 if (inst.operands[i].immisreg)
6345 inst.instruction |= inst.operands[i].imm;
6346 if (!inst.operands[i].negative)
6347 inst.instruction |= INDEX_UP;
6349 else /* immediate offset in inst.reloc */
6351 inst.instruction |= HWOFFSET_IMM;
6352 if (inst.reloc.type == BFD_RELOC_UNUSED)
6353 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6357 /* inst.operands[i] was set up by parse_address. Encode it into an
6358 ARM-format instruction. Reject all forms which cannot be encoded
6359 into a coprocessor load/store instruction. If wb_ok is false,
6360 reject use of writeback; if unind_ok is false, reject use of
6361 unindexed addressing. If reloc_override is not 0, use it instead
6362 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6363 (in which case it is preserved). */
6365 static int
6366 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6368 inst.instruction |= inst.operands[i].reg << 16;
6370 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6372 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6374 assert (!inst.operands[i].writeback);
6375 if (!unind_ok)
6377 inst.error = _("instruction does not support unindexed addressing");
6378 return FAIL;
6380 inst.instruction |= inst.operands[i].imm;
6381 inst.instruction |= INDEX_UP;
6382 return SUCCESS;
6385 if (inst.operands[i].preind)
6386 inst.instruction |= PRE_INDEX;
6388 if (inst.operands[i].writeback)
6390 if (inst.operands[i].reg == REG_PC)
6392 inst.error = _("pc may not be used with write-back");
6393 return FAIL;
6395 if (!wb_ok)
6397 inst.error = _("instruction does not support writeback");
6398 return FAIL;
6400 inst.instruction |= WRITE_BACK;
6403 if (reloc_override)
6404 inst.reloc.type = reloc_override;
6405 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6406 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6407 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6409 if (thumb_mode)
6410 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6411 else
6412 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6415 return SUCCESS;
6418 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6419 Determine whether it can be performed with a move instruction; if
6420 it can, convert inst.instruction to that move instruction and
6421 return 1; if it can't, convert inst.instruction to a literal-pool
6422 load and return 0. If this is not a valid thing to do in the
6423 current context, set inst.error and return 1.
6425 inst.operands[i] describes the destination register. */
6427 static int
6428 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6430 unsigned long tbit;
6432 if (thumb_p)
6433 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6434 else
6435 tbit = LOAD_BIT;
6437 if ((inst.instruction & tbit) == 0)
6439 inst.error = _("invalid pseudo operation");
6440 return 1;
6442 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6444 inst.error = _("constant expression expected");
6445 return 1;
6447 if (inst.reloc.exp.X_op == O_constant)
6449 if (thumb_p)
6451 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6453 /* This can be done with a mov(1) instruction. */
6454 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6455 inst.instruction |= inst.reloc.exp.X_add_number;
6456 return 1;
6459 else
6461 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6462 if (value != FAIL)
6464 /* This can be done with a mov instruction. */
6465 inst.instruction &= LITERAL_MASK;
6466 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6467 inst.instruction |= value & 0xfff;
6468 return 1;
6471 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6472 if (value != FAIL)
6474 /* This can be done with a mvn instruction. */
6475 inst.instruction &= LITERAL_MASK;
6476 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6477 inst.instruction |= value & 0xfff;
6478 return 1;
6483 if (add_to_lit_pool () == FAIL)
6485 inst.error = _("literal pool insertion failed");
6486 return 1;
6488 inst.operands[1].reg = REG_PC;
6489 inst.operands[1].isreg = 1;
6490 inst.operands[1].preind = 1;
6491 inst.reloc.pc_rel = 1;
6492 inst.reloc.type = (thumb_p
6493 ? BFD_RELOC_ARM_THUMB_OFFSET
6494 : (mode_3
6495 ? BFD_RELOC_ARM_HWLITERAL
6496 : BFD_RELOC_ARM_LITERAL));
6497 return 0;
6500 /* Functions for instruction encoding, sorted by sub-architecture.
6501 First some generics; their names are taken from the conventional
6502 bit positions for register arguments in ARM format instructions. */
6504 static void
6505 do_noargs (void)
6509 static void
6510 do_rd (void)
6512 inst.instruction |= inst.operands[0].reg << 12;
6515 static void
6516 do_rd_rm (void)
6518 inst.instruction |= inst.operands[0].reg << 12;
6519 inst.instruction |= inst.operands[1].reg;
6522 static void
6523 do_rd_rn (void)
6525 inst.instruction |= inst.operands[0].reg << 12;
6526 inst.instruction |= inst.operands[1].reg << 16;
6529 static void
6530 do_rn_rd (void)
6532 inst.instruction |= inst.operands[0].reg << 16;
6533 inst.instruction |= inst.operands[1].reg << 12;
6536 static void
6537 do_rd_rm_rn (void)
6539 unsigned Rn = inst.operands[2].reg;
6540 /* Enforce restrictions on SWP instruction. */
6541 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6542 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6543 _("Rn must not overlap other operands"));
6544 inst.instruction |= inst.operands[0].reg << 12;
6545 inst.instruction |= inst.operands[1].reg;
6546 inst.instruction |= Rn << 16;
6549 static void
6550 do_rd_rn_rm (void)
6552 inst.instruction |= inst.operands[0].reg << 12;
6553 inst.instruction |= inst.operands[1].reg << 16;
6554 inst.instruction |= inst.operands[2].reg;
6557 static void
6558 do_rm_rd_rn (void)
6560 inst.instruction |= inst.operands[0].reg;
6561 inst.instruction |= inst.operands[1].reg << 12;
6562 inst.instruction |= inst.operands[2].reg << 16;
6565 static void
6566 do_imm0 (void)
6568 inst.instruction |= inst.operands[0].imm;
6571 static void
6572 do_rd_cpaddr (void)
6574 inst.instruction |= inst.operands[0].reg << 12;
6575 encode_arm_cp_address (1, TRUE, TRUE, 0);
6578 /* ARM instructions, in alphabetical order by function name (except
6579 that wrapper functions appear immediately after the function they
6580 wrap). */
6582 /* This is a pseudo-op of the form "adr rd, label" to be converted
6583 into a relative address of the form "add rd, pc, #label-.-8". */
6585 static void
6586 do_adr (void)
6588 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6590 /* Frag hacking will turn this into a sub instruction if the offset turns
6591 out to be negative. */
6592 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6593 inst.reloc.pc_rel = 1;
6594 inst.reloc.exp.X_add_number -= 8;
6597 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6598 into a relative address of the form:
6599 add rd, pc, #low(label-.-8)"
6600 add rd, rd, #high(label-.-8)" */
6602 static void
6603 do_adrl (void)
6605 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6607 /* Frag hacking will turn this into a sub instruction if the offset turns
6608 out to be negative. */
6609 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6610 inst.reloc.pc_rel = 1;
6611 inst.size = INSN_SIZE * 2;
6612 inst.reloc.exp.X_add_number -= 8;
6615 static void
6616 do_arit (void)
6618 if (!inst.operands[1].present)
6619 inst.operands[1].reg = inst.operands[0].reg;
6620 inst.instruction |= inst.operands[0].reg << 12;
6621 inst.instruction |= inst.operands[1].reg << 16;
6622 encode_arm_shifter_operand (2);
6625 static void
6626 do_barrier (void)
6628 if (inst.operands[0].present)
6630 constraint ((inst.instruction & 0xf0) != 0x40
6631 && inst.operands[0].imm != 0xf,
6632 _("bad barrier type"));
6633 inst.instruction |= inst.operands[0].imm;
6635 else
6636 inst.instruction |= 0xf;
6639 static void
6640 do_bfc (void)
6642 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6643 constraint (msb > 32, _("bit-field extends past end of register"));
6644 /* The instruction encoding stores the LSB and MSB,
6645 not the LSB and width. */
6646 inst.instruction |= inst.operands[0].reg << 12;
6647 inst.instruction |= inst.operands[1].imm << 7;
6648 inst.instruction |= (msb - 1) << 16;
6651 static void
6652 do_bfi (void)
6654 unsigned int msb;
6656 /* #0 in second position is alternative syntax for bfc, which is
6657 the same instruction but with REG_PC in the Rm field. */
6658 if (!inst.operands[1].isreg)
6659 inst.operands[1].reg = REG_PC;
6661 msb = inst.operands[2].imm + inst.operands[3].imm;
6662 constraint (msb > 32, _("bit-field extends past end of register"));
6663 /* The instruction encoding stores the LSB and MSB,
6664 not the LSB and width. */
6665 inst.instruction |= inst.operands[0].reg << 12;
6666 inst.instruction |= inst.operands[1].reg;
6667 inst.instruction |= inst.operands[2].imm << 7;
6668 inst.instruction |= (msb - 1) << 16;
6671 static void
6672 do_bfx (void)
6674 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6675 _("bit-field extends past end of register"));
6676 inst.instruction |= inst.operands[0].reg << 12;
6677 inst.instruction |= inst.operands[1].reg;
6678 inst.instruction |= inst.operands[2].imm << 7;
6679 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6682 /* ARM V5 breakpoint instruction (argument parse)
6683 BKPT <16 bit unsigned immediate>
6684 Instruction is not conditional.
6685 The bit pattern given in insns[] has the COND_ALWAYS condition,
6686 and it is an error if the caller tried to override that. */
6688 static void
6689 do_bkpt (void)
6691 /* Top 12 of 16 bits to bits 19:8. */
6692 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6694 /* Bottom 4 of 16 bits to bits 3:0. */
6695 inst.instruction |= inst.operands[0].imm & 0xf;
6698 static void
6699 encode_branch (int default_reloc)
6701 if (inst.operands[0].hasreloc)
6703 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6704 _("the only suffix valid here is '(plt)'"));
6705 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6707 else
6709 inst.reloc.type = default_reloc;
6711 inst.reloc.pc_rel = 1;
6714 static void
6715 do_branch (void)
6717 #ifdef OBJ_ELF
6718 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6719 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6720 else
6721 #endif
6722 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6725 static void
6726 do_bl (void)
6728 #ifdef OBJ_ELF
6729 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6731 if (inst.cond == COND_ALWAYS)
6732 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6733 else
6734 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6736 else
6737 #endif
6738 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6741 /* ARM V5 branch-link-exchange instruction (argument parse)
6742 BLX <target_addr> ie BLX(1)
6743 BLX{<condition>} <Rm> ie BLX(2)
6744 Unfortunately, there are two different opcodes for this mnemonic.
6745 So, the insns[].value is not used, and the code here zaps values
6746 into inst.instruction.
6747 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6749 static void
6750 do_blx (void)
6752 if (inst.operands[0].isreg)
6754 /* Arg is a register; the opcode provided by insns[] is correct.
6755 It is not illegal to do "blx pc", just useless. */
6756 if (inst.operands[0].reg == REG_PC)
6757 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6759 inst.instruction |= inst.operands[0].reg;
6761 else
6763 /* Arg is an address; this instruction cannot be executed
6764 conditionally, and the opcode must be adjusted. */
6765 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6766 inst.instruction = 0xfa000000;
6767 #ifdef OBJ_ELF
6768 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6769 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6770 else
6771 #endif
6772 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6776 static void
6777 do_bx (void)
6779 bfd_boolean want_reloc;
6781 if (inst.operands[0].reg == REG_PC)
6782 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6784 inst.instruction |= inst.operands[0].reg;
6785 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
6786 it is for ARMv4t or earlier. */
6787 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
6788 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
6789 want_reloc = TRUE;
6791 #ifdef OBJ_ELF
6792 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
6793 #endif
6794 want_reloc = FALSE;
6796 if (want_reloc)
6797 inst.reloc.type = BFD_RELOC_ARM_V4BX;
6801 /* ARM v5TEJ. Jump to Jazelle code. */
6803 static void
6804 do_bxj (void)
6806 if (inst.operands[0].reg == REG_PC)
6807 as_tsktsk (_("use of r15 in bxj is not really useful"));
6809 inst.instruction |= inst.operands[0].reg;
6812 /* Co-processor data operation:
6813 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6814 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6815 static void
6816 do_cdp (void)
6818 inst.instruction |= inst.operands[0].reg << 8;
6819 inst.instruction |= inst.operands[1].imm << 20;
6820 inst.instruction |= inst.operands[2].reg << 12;
6821 inst.instruction |= inst.operands[3].reg << 16;
6822 inst.instruction |= inst.operands[4].reg;
6823 inst.instruction |= inst.operands[5].imm << 5;
6826 static void
6827 do_cmp (void)
6829 inst.instruction |= inst.operands[0].reg << 16;
6830 encode_arm_shifter_operand (1);
6833 /* Transfer between coprocessor and ARM registers.
6834 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6835 MRC2
6836 MCR{cond}
6837 MCR2
6839 No special properties. */
6841 static void
6842 do_co_reg (void)
6844 inst.instruction |= inst.operands[0].reg << 8;
6845 inst.instruction |= inst.operands[1].imm << 21;
6846 inst.instruction |= inst.operands[2].reg << 12;
6847 inst.instruction |= inst.operands[3].reg << 16;
6848 inst.instruction |= inst.operands[4].reg;
6849 inst.instruction |= inst.operands[5].imm << 5;
6852 /* Transfer between coprocessor register and pair of ARM registers.
6853 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6854 MCRR2
6855 MRRC{cond}
6856 MRRC2
6858 Two XScale instructions are special cases of these:
6860 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6861 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6863 Result unpredictable if Rd or Rn is R15. */
6865 static void
6866 do_co_reg2c (void)
6868 inst.instruction |= inst.operands[0].reg << 8;
6869 inst.instruction |= inst.operands[1].imm << 4;
6870 inst.instruction |= inst.operands[2].reg << 12;
6871 inst.instruction |= inst.operands[3].reg << 16;
6872 inst.instruction |= inst.operands[4].reg;
6875 static void
6876 do_cpsi (void)
6878 inst.instruction |= inst.operands[0].imm << 6;
6879 if (inst.operands[1].present)
6881 inst.instruction |= CPSI_MMOD;
6882 inst.instruction |= inst.operands[1].imm;
6886 static void
6887 do_dbg (void)
6889 inst.instruction |= inst.operands[0].imm;
6892 static void
6893 do_it (void)
6895 /* There is no IT instruction in ARM mode. We
6896 process it but do not generate code for it. */
6897 inst.size = 0;
6900 static void
6901 do_ldmstm (void)
6903 int base_reg = inst.operands[0].reg;
6904 int range = inst.operands[1].imm;
6906 inst.instruction |= base_reg << 16;
6907 inst.instruction |= range;
6909 if (inst.operands[1].writeback)
6910 inst.instruction |= LDM_TYPE_2_OR_3;
6912 if (inst.operands[0].writeback)
6914 inst.instruction |= WRITE_BACK;
6915 /* Check for unpredictable uses of writeback. */
6916 if (inst.instruction & LOAD_BIT)
6918 /* Not allowed in LDM type 2. */
6919 if ((inst.instruction & LDM_TYPE_2_OR_3)
6920 && ((range & (1 << REG_PC)) == 0))
6921 as_warn (_("writeback of base register is UNPREDICTABLE"));
6922 /* Only allowed if base reg not in list for other types. */
6923 else if (range & (1 << base_reg))
6924 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6926 else /* STM. */
6928 /* Not allowed for type 2. */
6929 if (inst.instruction & LDM_TYPE_2_OR_3)
6930 as_warn (_("writeback of base register is UNPREDICTABLE"));
6931 /* Only allowed if base reg not in list, or first in list. */
6932 else if ((range & (1 << base_reg))
6933 && (range & ((1 << base_reg) - 1)))
6934 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6939 /* ARMv5TE load-consecutive (argument parse)
6940 Mode is like LDRH.
6942 LDRccD R, mode
6943 STRccD R, mode. */
6945 static void
6946 do_ldrd (void)
6948 constraint (inst.operands[0].reg % 2 != 0,
6949 _("first destination register must be even"));
6950 constraint (inst.operands[1].present
6951 && inst.operands[1].reg != inst.operands[0].reg + 1,
6952 _("can only load two consecutive registers"));
6953 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6954 constraint (!inst.operands[2].isreg, _("'[' expected"));
6956 if (!inst.operands[1].present)
6957 inst.operands[1].reg = inst.operands[0].reg + 1;
6959 if (inst.instruction & LOAD_BIT)
6961 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6962 register and the first register written; we have to diagnose
6963 overlap between the base and the second register written here. */
6965 if (inst.operands[2].reg == inst.operands[1].reg
6966 && (inst.operands[2].writeback || inst.operands[2].postind))
6967 as_warn (_("base register written back, and overlaps "
6968 "second destination register"));
6970 /* For an index-register load, the index register must not overlap the
6971 destination (even if not write-back). */
6972 else if (inst.operands[2].immisreg
6973 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6974 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6975 as_warn (_("index register overlaps destination register"));
6978 inst.instruction |= inst.operands[0].reg << 12;
6979 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6982 static void
6983 do_ldrex (void)
6985 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6986 || inst.operands[1].postind || inst.operands[1].writeback
6987 || inst.operands[1].immisreg || inst.operands[1].shifted
6988 || inst.operands[1].negative
6989 /* This can arise if the programmer has written
6990 strex rN, rM, foo
6991 or if they have mistakenly used a register name as the last
6992 operand, eg:
6993 strex rN, rM, rX
6994 It is very difficult to distinguish between these two cases
6995 because "rX" might actually be a label. ie the register
6996 name has been occluded by a symbol of the same name. So we
6997 just generate a general 'bad addressing mode' type error
6998 message and leave it up to the programmer to discover the
6999 true cause and fix their mistake. */
7000 || (inst.operands[1].reg == REG_PC),
7001 BAD_ADDR_MODE);
7003 constraint (inst.reloc.exp.X_op != O_constant
7004 || inst.reloc.exp.X_add_number != 0,
7005 _("offset must be zero in ARM encoding"));
7007 inst.instruction |= inst.operands[0].reg << 12;
7008 inst.instruction |= inst.operands[1].reg << 16;
7009 inst.reloc.type = BFD_RELOC_UNUSED;
7012 static void
7013 do_ldrexd (void)
7015 constraint (inst.operands[0].reg % 2 != 0,
7016 _("even register required"));
7017 constraint (inst.operands[1].present
7018 && inst.operands[1].reg != inst.operands[0].reg + 1,
7019 _("can only load two consecutive registers"));
7020 /* If op 1 were present and equal to PC, this function wouldn't
7021 have been called in the first place. */
7022 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7024 inst.instruction |= inst.operands[0].reg << 12;
7025 inst.instruction |= inst.operands[2].reg << 16;
7028 static void
7029 do_ldst (void)
7031 inst.instruction |= inst.operands[0].reg << 12;
7032 if (!inst.operands[1].isreg)
7033 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7034 return;
7035 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7038 static void
7039 do_ldstt (void)
7041 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7042 reject [Rn,...]. */
7043 if (inst.operands[1].preind)
7045 constraint (inst.reloc.exp.X_op != O_constant
7046 || inst.reloc.exp.X_add_number != 0,
7047 _("this instruction requires a post-indexed address"));
7049 inst.operands[1].preind = 0;
7050 inst.operands[1].postind = 1;
7051 inst.operands[1].writeback = 1;
7053 inst.instruction |= inst.operands[0].reg << 12;
7054 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7057 /* Halfword and signed-byte load/store operations. */
7059 static void
7060 do_ldstv4 (void)
7062 inst.instruction |= inst.operands[0].reg << 12;
7063 if (!inst.operands[1].isreg)
7064 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7065 return;
7066 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7069 static void
7070 do_ldsttv4 (void)
7072 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7073 reject [Rn,...]. */
7074 if (inst.operands[1].preind)
7076 constraint (inst.reloc.exp.X_op != O_constant
7077 || inst.reloc.exp.X_add_number != 0,
7078 _("this instruction requires a post-indexed address"));
7080 inst.operands[1].preind = 0;
7081 inst.operands[1].postind = 1;
7082 inst.operands[1].writeback = 1;
7084 inst.instruction |= inst.operands[0].reg << 12;
7085 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7088 /* Co-processor register load/store.
7089 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7090 static void
7091 do_lstc (void)
7093 inst.instruction |= inst.operands[0].reg << 8;
7094 inst.instruction |= inst.operands[1].reg << 12;
7095 encode_arm_cp_address (2, TRUE, TRUE, 0);
7098 static void
7099 do_mlas (void)
7101 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7102 if (inst.operands[0].reg == inst.operands[1].reg
7103 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7104 && !(inst.instruction & 0x00400000))
7105 as_tsktsk (_("Rd and Rm should be different in mla"));
7107 inst.instruction |= inst.operands[0].reg << 16;
7108 inst.instruction |= inst.operands[1].reg;
7109 inst.instruction |= inst.operands[2].reg << 8;
7110 inst.instruction |= inst.operands[3].reg << 12;
7113 static void
7114 do_mov (void)
7116 inst.instruction |= inst.operands[0].reg << 12;
7117 encode_arm_shifter_operand (1);
7120 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7121 static void
7122 do_mov16 (void)
7124 bfd_vma imm;
7125 bfd_boolean top;
7127 top = (inst.instruction & 0x00400000) != 0;
7128 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7129 _(":lower16: not allowed this instruction"));
7130 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7131 _(":upper16: not allowed instruction"));
7132 inst.instruction |= inst.operands[0].reg << 12;
7133 if (inst.reloc.type == BFD_RELOC_UNUSED)
7135 imm = inst.reloc.exp.X_add_number;
7136 /* The value is in two pieces: 0:11, 16:19. */
7137 inst.instruction |= (imm & 0x00000fff);
7138 inst.instruction |= (imm & 0x0000f000) << 4;
7142 static void do_vfp_nsyn_opcode (const char *);
7144 static int
7145 do_vfp_nsyn_mrs (void)
7147 if (inst.operands[0].isvec)
7149 if (inst.operands[1].reg != 1)
7150 first_error (_("operand 1 must be FPSCR"));
7151 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7152 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7153 do_vfp_nsyn_opcode ("fmstat");
7155 else if (inst.operands[1].isvec)
7156 do_vfp_nsyn_opcode ("fmrx");
7157 else
7158 return FAIL;
7160 return SUCCESS;
7163 static int
7164 do_vfp_nsyn_msr (void)
7166 if (inst.operands[0].isvec)
7167 do_vfp_nsyn_opcode ("fmxr");
7168 else
7169 return FAIL;
7171 return SUCCESS;
7174 static void
7175 do_mrs (void)
7177 if (do_vfp_nsyn_mrs () == SUCCESS)
7178 return;
7180 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7181 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7182 != (PSR_c|PSR_f),
7183 _("'CPSR' or 'SPSR' expected"));
7184 inst.instruction |= inst.operands[0].reg << 12;
7185 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7188 /* Two possible forms:
7189 "{C|S}PSR_<field>, Rm",
7190 "{C|S}PSR_f, #expression". */
7192 static void
7193 do_msr (void)
7195 if (do_vfp_nsyn_msr () == SUCCESS)
7196 return;
7198 inst.instruction |= inst.operands[0].imm;
7199 if (inst.operands[1].isreg)
7200 inst.instruction |= inst.operands[1].reg;
7201 else
7203 inst.instruction |= INST_IMMEDIATE;
7204 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7205 inst.reloc.pc_rel = 0;
7209 static void
7210 do_mul (void)
7212 if (!inst.operands[2].present)
7213 inst.operands[2].reg = inst.operands[0].reg;
7214 inst.instruction |= inst.operands[0].reg << 16;
7215 inst.instruction |= inst.operands[1].reg;
7216 inst.instruction |= inst.operands[2].reg << 8;
7218 if (inst.operands[0].reg == inst.operands[1].reg
7219 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7220 as_tsktsk (_("Rd and Rm should be different in mul"));
7223 /* Long Multiply Parser
7224 UMULL RdLo, RdHi, Rm, Rs
7225 SMULL RdLo, RdHi, Rm, Rs
7226 UMLAL RdLo, RdHi, Rm, Rs
7227 SMLAL RdLo, RdHi, Rm, Rs. */
7229 static void
7230 do_mull (void)
7232 inst.instruction |= inst.operands[0].reg << 12;
7233 inst.instruction |= inst.operands[1].reg << 16;
7234 inst.instruction |= inst.operands[2].reg;
7235 inst.instruction |= inst.operands[3].reg << 8;
7237 /* rdhi and rdlo must be different. */
7238 if (inst.operands[0].reg == inst.operands[1].reg)
7239 as_tsktsk (_("rdhi and rdlo must be different"));
7241 /* rdhi, rdlo and rm must all be different before armv6. */
7242 if ((inst.operands[0].reg == inst.operands[2].reg
7243 || inst.operands[1].reg == inst.operands[2].reg)
7244 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7245 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7248 static void
7249 do_nop (void)
7251 if (inst.operands[0].present)
7253 /* Architectural NOP hints are CPSR sets with no bits selected. */
7254 inst.instruction &= 0xf0000000;
7255 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7259 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7260 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7261 Condition defaults to COND_ALWAYS.
7262 Error if Rd, Rn or Rm are R15. */
7264 static void
7265 do_pkhbt (void)
7267 inst.instruction |= inst.operands[0].reg << 12;
7268 inst.instruction |= inst.operands[1].reg << 16;
7269 inst.instruction |= inst.operands[2].reg;
7270 if (inst.operands[3].present)
7271 encode_arm_shift (3);
7274 /* ARM V6 PKHTB (Argument Parse). */
7276 static void
7277 do_pkhtb (void)
7279 if (!inst.operands[3].present)
7281 /* If the shift specifier is omitted, turn the instruction
7282 into pkhbt rd, rm, rn. */
7283 inst.instruction &= 0xfff00010;
7284 inst.instruction |= inst.operands[0].reg << 12;
7285 inst.instruction |= inst.operands[1].reg;
7286 inst.instruction |= inst.operands[2].reg << 16;
7288 else
7290 inst.instruction |= inst.operands[0].reg << 12;
7291 inst.instruction |= inst.operands[1].reg << 16;
7292 inst.instruction |= inst.operands[2].reg;
7293 encode_arm_shift (3);
7297 /* ARMv5TE: Preload-Cache
7299 PLD <addr_mode>
7301 Syntactically, like LDR with B=1, W=0, L=1. */
7303 static void
7304 do_pld (void)
7306 constraint (!inst.operands[0].isreg,
7307 _("'[' expected after PLD mnemonic"));
7308 constraint (inst.operands[0].postind,
7309 _("post-indexed expression used in preload instruction"));
7310 constraint (inst.operands[0].writeback,
7311 _("writeback used in preload instruction"));
7312 constraint (!inst.operands[0].preind,
7313 _("unindexed addressing used in preload instruction"));
7314 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7317 /* ARMv7: PLI <addr_mode> */
7318 static void
7319 do_pli (void)
7321 constraint (!inst.operands[0].isreg,
7322 _("'[' expected after PLI mnemonic"));
7323 constraint (inst.operands[0].postind,
7324 _("post-indexed expression used in preload instruction"));
7325 constraint (inst.operands[0].writeback,
7326 _("writeback used in preload instruction"));
7327 constraint (!inst.operands[0].preind,
7328 _("unindexed addressing used in preload instruction"));
7329 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7330 inst.instruction &= ~PRE_INDEX;
7333 static void
7334 do_push_pop (void)
7336 inst.operands[1] = inst.operands[0];
7337 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7338 inst.operands[0].isreg = 1;
7339 inst.operands[0].writeback = 1;
7340 inst.operands[0].reg = REG_SP;
7341 do_ldmstm ();
7344 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7345 word at the specified address and the following word
7346 respectively.
7347 Unconditionally executed.
7348 Error if Rn is R15. */
7350 static void
7351 do_rfe (void)
7353 inst.instruction |= inst.operands[0].reg << 16;
7354 if (inst.operands[0].writeback)
7355 inst.instruction |= WRITE_BACK;
7358 /* ARM V6 ssat (argument parse). */
7360 static void
7361 do_ssat (void)
7363 inst.instruction |= inst.operands[0].reg << 12;
7364 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7365 inst.instruction |= inst.operands[2].reg;
7367 if (inst.operands[3].present)
7368 encode_arm_shift (3);
7371 /* ARM V6 usat (argument parse). */
7373 static void
7374 do_usat (void)
7376 inst.instruction |= inst.operands[0].reg << 12;
7377 inst.instruction |= inst.operands[1].imm << 16;
7378 inst.instruction |= inst.operands[2].reg;
7380 if (inst.operands[3].present)
7381 encode_arm_shift (3);
7384 /* ARM V6 ssat16 (argument parse). */
7386 static void
7387 do_ssat16 (void)
7389 inst.instruction |= inst.operands[0].reg << 12;
7390 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7391 inst.instruction |= inst.operands[2].reg;
7394 static void
7395 do_usat16 (void)
7397 inst.instruction |= inst.operands[0].reg << 12;
7398 inst.instruction |= inst.operands[1].imm << 16;
7399 inst.instruction |= inst.operands[2].reg;
7402 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7403 preserving the other bits.
7405 setend <endian_specifier>, where <endian_specifier> is either
7406 BE or LE. */
7408 static void
7409 do_setend (void)
7411 if (inst.operands[0].imm)
7412 inst.instruction |= 0x200;
7415 static void
7416 do_shift (void)
7418 unsigned int Rm = (inst.operands[1].present
7419 ? inst.operands[1].reg
7420 : inst.operands[0].reg);
7422 inst.instruction |= inst.operands[0].reg << 12;
7423 inst.instruction |= Rm;
7424 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7426 inst.instruction |= inst.operands[2].reg << 8;
7427 inst.instruction |= SHIFT_BY_REG;
7429 else
7430 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7433 static void
7434 do_smc (void)
7436 inst.reloc.type = BFD_RELOC_ARM_SMC;
7437 inst.reloc.pc_rel = 0;
7440 static void
7441 do_swi (void)
7443 inst.reloc.type = BFD_RELOC_ARM_SWI;
7444 inst.reloc.pc_rel = 0;
7447 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7448 SMLAxy{cond} Rd,Rm,Rs,Rn
7449 SMLAWy{cond} Rd,Rm,Rs,Rn
7450 Error if any register is R15. */
7452 static void
7453 do_smla (void)
7455 inst.instruction |= inst.operands[0].reg << 16;
7456 inst.instruction |= inst.operands[1].reg;
7457 inst.instruction |= inst.operands[2].reg << 8;
7458 inst.instruction |= inst.operands[3].reg << 12;
7461 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7462 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7463 Error if any register is R15.
7464 Warning if Rdlo == Rdhi. */
7466 static void
7467 do_smlal (void)
7469 inst.instruction |= inst.operands[0].reg << 12;
7470 inst.instruction |= inst.operands[1].reg << 16;
7471 inst.instruction |= inst.operands[2].reg;
7472 inst.instruction |= inst.operands[3].reg << 8;
7474 if (inst.operands[0].reg == inst.operands[1].reg)
7475 as_tsktsk (_("rdhi and rdlo must be different"));
7478 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7479 SMULxy{cond} Rd,Rm,Rs
7480 Error if any register is R15. */
7482 static void
7483 do_smul (void)
7485 inst.instruction |= inst.operands[0].reg << 16;
7486 inst.instruction |= inst.operands[1].reg;
7487 inst.instruction |= inst.operands[2].reg << 8;
7490 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7491 the same for both ARM and Thumb-2. */
7493 static void
7494 do_srs (void)
7496 int reg;
7498 if (inst.operands[0].present)
7500 reg = inst.operands[0].reg;
7501 constraint (reg != 13, _("SRS base register must be r13"));
7503 else
7504 reg = 13;
7506 inst.instruction |= reg << 16;
7507 inst.instruction |= inst.operands[1].imm;
7508 if (inst.operands[0].writeback || inst.operands[1].writeback)
7509 inst.instruction |= WRITE_BACK;
7512 /* ARM V6 strex (argument parse). */
7514 static void
7515 do_strex (void)
7517 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7518 || inst.operands[2].postind || inst.operands[2].writeback
7519 || inst.operands[2].immisreg || inst.operands[2].shifted
7520 || inst.operands[2].negative
7521 /* See comment in do_ldrex(). */
7522 || (inst.operands[2].reg == REG_PC),
7523 BAD_ADDR_MODE);
7525 constraint (inst.operands[0].reg == inst.operands[1].reg
7526 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7528 constraint (inst.reloc.exp.X_op != O_constant
7529 || inst.reloc.exp.X_add_number != 0,
7530 _("offset must be zero in ARM encoding"));
7532 inst.instruction |= inst.operands[0].reg << 12;
7533 inst.instruction |= inst.operands[1].reg;
7534 inst.instruction |= inst.operands[2].reg << 16;
7535 inst.reloc.type = BFD_RELOC_UNUSED;
7538 static void
7539 do_strexd (void)
7541 constraint (inst.operands[1].reg % 2 != 0,
7542 _("even register required"));
7543 constraint (inst.operands[2].present
7544 && inst.operands[2].reg != inst.operands[1].reg + 1,
7545 _("can only store two consecutive registers"));
7546 /* If op 2 were present and equal to PC, this function wouldn't
7547 have been called in the first place. */
7548 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7550 constraint (inst.operands[0].reg == inst.operands[1].reg
7551 || inst.operands[0].reg == inst.operands[1].reg + 1
7552 || inst.operands[0].reg == inst.operands[3].reg,
7553 BAD_OVERLAP);
7555 inst.instruction |= inst.operands[0].reg << 12;
7556 inst.instruction |= inst.operands[1].reg;
7557 inst.instruction |= inst.operands[3].reg << 16;
7560 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7561 extends it to 32-bits, and adds the result to a value in another
7562 register. You can specify a rotation by 0, 8, 16, or 24 bits
7563 before extracting the 16-bit value.
7564 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7565 Condition defaults to COND_ALWAYS.
7566 Error if any register uses R15. */
7568 static void
7569 do_sxtah (void)
7571 inst.instruction |= inst.operands[0].reg << 12;
7572 inst.instruction |= inst.operands[1].reg << 16;
7573 inst.instruction |= inst.operands[2].reg;
7574 inst.instruction |= inst.operands[3].imm << 10;
7577 /* ARM V6 SXTH.
7579 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7580 Condition defaults to COND_ALWAYS.
7581 Error if any register uses R15. */
7583 static void
7584 do_sxth (void)
7586 inst.instruction |= inst.operands[0].reg << 12;
7587 inst.instruction |= inst.operands[1].reg;
7588 inst.instruction |= inst.operands[2].imm << 10;
7591 /* VFP instructions. In a logical order: SP variant first, monad
7592 before dyad, arithmetic then move then load/store. */
7594 static void
7595 do_vfp_sp_monadic (void)
7597 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7598 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7601 static void
7602 do_vfp_sp_dyadic (void)
7604 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7605 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7606 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7609 static void
7610 do_vfp_sp_compare_z (void)
7612 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7615 static void
7616 do_vfp_dp_sp_cvt (void)
7618 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7619 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7622 static void
7623 do_vfp_sp_dp_cvt (void)
7625 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7626 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7629 static void
7630 do_vfp_reg_from_sp (void)
7632 inst.instruction |= inst.operands[0].reg << 12;
7633 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7636 static void
7637 do_vfp_reg2_from_sp2 (void)
7639 constraint (inst.operands[2].imm != 2,
7640 _("only two consecutive VFP SP registers allowed here"));
7641 inst.instruction |= inst.operands[0].reg << 12;
7642 inst.instruction |= inst.operands[1].reg << 16;
7643 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7646 static void
7647 do_vfp_sp_from_reg (void)
7649 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7650 inst.instruction |= inst.operands[1].reg << 12;
7653 static void
7654 do_vfp_sp2_from_reg2 (void)
7656 constraint (inst.operands[0].imm != 2,
7657 _("only two consecutive VFP SP registers allowed here"));
7658 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7659 inst.instruction |= inst.operands[1].reg << 12;
7660 inst.instruction |= inst.operands[2].reg << 16;
7663 static void
7664 do_vfp_sp_ldst (void)
7666 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7667 encode_arm_cp_address (1, FALSE, TRUE, 0);
7670 static void
7671 do_vfp_dp_ldst (void)
7673 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7674 encode_arm_cp_address (1, FALSE, TRUE, 0);
7678 static void
7679 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7681 if (inst.operands[0].writeback)
7682 inst.instruction |= WRITE_BACK;
7683 else
7684 constraint (ldstm_type != VFP_LDSTMIA,
7685 _("this addressing mode requires base-register writeback"));
7686 inst.instruction |= inst.operands[0].reg << 16;
7687 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7688 inst.instruction |= inst.operands[1].imm;
7691 static void
7692 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7694 int count;
7696 if (inst.operands[0].writeback)
7697 inst.instruction |= WRITE_BACK;
7698 else
7699 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7700 _("this addressing mode requires base-register writeback"));
7702 inst.instruction |= inst.operands[0].reg << 16;
7703 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7705 count = inst.operands[1].imm << 1;
7706 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7707 count += 1;
7709 inst.instruction |= count;
7712 static void
7713 do_vfp_sp_ldstmia (void)
7715 vfp_sp_ldstm (VFP_LDSTMIA);
7718 static void
7719 do_vfp_sp_ldstmdb (void)
7721 vfp_sp_ldstm (VFP_LDSTMDB);
7724 static void
7725 do_vfp_dp_ldstmia (void)
7727 vfp_dp_ldstm (VFP_LDSTMIA);
7730 static void
7731 do_vfp_dp_ldstmdb (void)
7733 vfp_dp_ldstm (VFP_LDSTMDB);
7736 static void
7737 do_vfp_xp_ldstmia (void)
7739 vfp_dp_ldstm (VFP_LDSTMIAX);
7742 static void
7743 do_vfp_xp_ldstmdb (void)
7745 vfp_dp_ldstm (VFP_LDSTMDBX);
7748 static void
7749 do_vfp_dp_rd_rm (void)
7751 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7752 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7755 static void
7756 do_vfp_dp_rn_rd (void)
7758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7759 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7762 static void
7763 do_vfp_dp_rd_rn (void)
7765 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7766 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7769 static void
7770 do_vfp_dp_rd_rn_rm (void)
7772 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7773 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7774 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7777 static void
7778 do_vfp_dp_rd (void)
7780 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7783 static void
7784 do_vfp_dp_rm_rd_rn (void)
7786 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7787 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7788 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7791 /* VFPv3 instructions. */
7792 static void
7793 do_vfp_sp_const (void)
7795 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7796 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7797 inst.instruction |= (inst.operands[1].imm & 0x0f);
7800 static void
7801 do_vfp_dp_const (void)
7803 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7804 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7805 inst.instruction |= (inst.operands[1].imm & 0x0f);
7808 static void
7809 vfp_conv (int srcsize)
7811 unsigned immbits = srcsize - inst.operands[1].imm;
7812 inst.instruction |= (immbits & 1) << 5;
7813 inst.instruction |= (immbits >> 1);
7816 static void
7817 do_vfp_sp_conv_16 (void)
7819 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7820 vfp_conv (16);
7823 static void
7824 do_vfp_dp_conv_16 (void)
7826 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7827 vfp_conv (16);
7830 static void
7831 do_vfp_sp_conv_32 (void)
7833 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7834 vfp_conv (32);
7837 static void
7838 do_vfp_dp_conv_32 (void)
7840 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7841 vfp_conv (32);
7844 /* FPA instructions. Also in a logical order. */
7846 static void
7847 do_fpa_cmp (void)
7849 inst.instruction |= inst.operands[0].reg << 16;
7850 inst.instruction |= inst.operands[1].reg;
7853 static void
7854 do_fpa_ldmstm (void)
7856 inst.instruction |= inst.operands[0].reg << 12;
7857 switch (inst.operands[1].imm)
7859 case 1: inst.instruction |= CP_T_X; break;
7860 case 2: inst.instruction |= CP_T_Y; break;
7861 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7862 case 4: break;
7863 default: abort ();
7866 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7868 /* The instruction specified "ea" or "fd", so we can only accept
7869 [Rn]{!}. The instruction does not really support stacking or
7870 unstacking, so we have to emulate these by setting appropriate
7871 bits and offsets. */
7872 constraint (inst.reloc.exp.X_op != O_constant
7873 || inst.reloc.exp.X_add_number != 0,
7874 _("this instruction does not support indexing"));
7876 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7877 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7879 if (!(inst.instruction & INDEX_UP))
7880 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7882 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7884 inst.operands[2].preind = 0;
7885 inst.operands[2].postind = 1;
7889 encode_arm_cp_address (2, TRUE, TRUE, 0);
7892 /* iWMMXt instructions: strictly in alphabetical order. */
7894 static void
7895 do_iwmmxt_tandorc (void)
7897 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7900 static void
7901 do_iwmmxt_textrc (void)
7903 inst.instruction |= inst.operands[0].reg << 12;
7904 inst.instruction |= inst.operands[1].imm;
7907 static void
7908 do_iwmmxt_textrm (void)
7910 inst.instruction |= inst.operands[0].reg << 12;
7911 inst.instruction |= inst.operands[1].reg << 16;
7912 inst.instruction |= inst.operands[2].imm;
7915 static void
7916 do_iwmmxt_tinsr (void)
7918 inst.instruction |= inst.operands[0].reg << 16;
7919 inst.instruction |= inst.operands[1].reg << 12;
7920 inst.instruction |= inst.operands[2].imm;
7923 static void
7924 do_iwmmxt_tmia (void)
7926 inst.instruction |= inst.operands[0].reg << 5;
7927 inst.instruction |= inst.operands[1].reg;
7928 inst.instruction |= inst.operands[2].reg << 12;
7931 static void
7932 do_iwmmxt_waligni (void)
7934 inst.instruction |= inst.operands[0].reg << 12;
7935 inst.instruction |= inst.operands[1].reg << 16;
7936 inst.instruction |= inst.operands[2].reg;
7937 inst.instruction |= inst.operands[3].imm << 20;
7940 static void
7941 do_iwmmxt_wmerge (void)
7943 inst.instruction |= inst.operands[0].reg << 12;
7944 inst.instruction |= inst.operands[1].reg << 16;
7945 inst.instruction |= inst.operands[2].reg;
7946 inst.instruction |= inst.operands[3].imm << 21;
7949 static void
7950 do_iwmmxt_wmov (void)
7952 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7953 inst.instruction |= inst.operands[0].reg << 12;
7954 inst.instruction |= inst.operands[1].reg << 16;
7955 inst.instruction |= inst.operands[1].reg;
7958 static void
7959 do_iwmmxt_wldstbh (void)
7961 int reloc;
7962 inst.instruction |= inst.operands[0].reg << 12;
7963 if (thumb_mode)
7964 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7965 else
7966 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7967 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7970 static void
7971 do_iwmmxt_wldstw (void)
7973 /* RIWR_RIWC clears .isreg for a control register. */
7974 if (!inst.operands[0].isreg)
7976 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7977 inst.instruction |= 0xf0000000;
7980 inst.instruction |= inst.operands[0].reg << 12;
7981 encode_arm_cp_address (1, TRUE, TRUE, 0);
7984 static void
7985 do_iwmmxt_wldstd (void)
7987 inst.instruction |= inst.operands[0].reg << 12;
7988 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7989 && inst.operands[1].immisreg)
7991 inst.instruction &= ~0x1a000ff;
7992 inst.instruction |= (0xf << 28);
7993 if (inst.operands[1].preind)
7994 inst.instruction |= PRE_INDEX;
7995 if (!inst.operands[1].negative)
7996 inst.instruction |= INDEX_UP;
7997 if (inst.operands[1].writeback)
7998 inst.instruction |= WRITE_BACK;
7999 inst.instruction |= inst.operands[1].reg << 16;
8000 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8001 inst.instruction |= inst.operands[1].imm;
8003 else
8004 encode_arm_cp_address (1, TRUE, FALSE, 0);
8007 static void
8008 do_iwmmxt_wshufh (void)
8010 inst.instruction |= inst.operands[0].reg << 12;
8011 inst.instruction |= inst.operands[1].reg << 16;
8012 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8013 inst.instruction |= (inst.operands[2].imm & 0x0f);
8016 static void
8017 do_iwmmxt_wzero (void)
8019 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8020 inst.instruction |= inst.operands[0].reg;
8021 inst.instruction |= inst.operands[0].reg << 12;
8022 inst.instruction |= inst.operands[0].reg << 16;
8025 static void
8026 do_iwmmxt_wrwrwr_or_imm5 (void)
8028 if (inst.operands[2].isreg)
8029 do_rd_rn_rm ();
8030 else {
8031 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8032 _("immediate operand requires iWMMXt2"));
8033 do_rd_rn ();
8034 if (inst.operands[2].imm == 0)
8036 switch ((inst.instruction >> 20) & 0xf)
8038 case 4:
8039 case 5:
8040 case 6:
8041 case 7:
8042 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8043 inst.operands[2].imm = 16;
8044 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8045 break;
8046 case 8:
8047 case 9:
8048 case 10:
8049 case 11:
8050 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8051 inst.operands[2].imm = 32;
8052 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8053 break;
8054 case 12:
8055 case 13:
8056 case 14:
8057 case 15:
8059 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8060 unsigned long wrn;
8061 wrn = (inst.instruction >> 16) & 0xf;
8062 inst.instruction &= 0xff0fff0f;
8063 inst.instruction |= wrn;
8064 /* Bail out here; the instruction is now assembled. */
8065 return;
8069 /* Map 32 -> 0, etc. */
8070 inst.operands[2].imm &= 0x1f;
8071 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8075 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8076 operations first, then control, shift, and load/store. */
8078 /* Insns like "foo X,Y,Z". */
8080 static void
8081 do_mav_triple (void)
8083 inst.instruction |= inst.operands[0].reg << 16;
8084 inst.instruction |= inst.operands[1].reg;
8085 inst.instruction |= inst.operands[2].reg << 12;
8088 /* Insns like "foo W,X,Y,Z".
8089 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8091 static void
8092 do_mav_quad (void)
8094 inst.instruction |= inst.operands[0].reg << 5;
8095 inst.instruction |= inst.operands[1].reg << 12;
8096 inst.instruction |= inst.operands[2].reg << 16;
8097 inst.instruction |= inst.operands[3].reg;
8100 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8101 static void
8102 do_mav_dspsc (void)
8104 inst.instruction |= inst.operands[1].reg << 12;
8107 /* Maverick shift immediate instructions.
8108 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8109 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8111 static void
8112 do_mav_shift (void)
8114 int imm = inst.operands[2].imm;
8116 inst.instruction |= inst.operands[0].reg << 12;
8117 inst.instruction |= inst.operands[1].reg << 16;
8119 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8120 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8121 Bit 4 should be 0. */
8122 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8124 inst.instruction |= imm;
8127 /* XScale instructions. Also sorted arithmetic before move. */
8129 /* Xscale multiply-accumulate (argument parse)
8130 MIAcc acc0,Rm,Rs
8131 MIAPHcc acc0,Rm,Rs
8132 MIAxycc acc0,Rm,Rs. */
8134 static void
8135 do_xsc_mia (void)
8137 inst.instruction |= inst.operands[1].reg;
8138 inst.instruction |= inst.operands[2].reg << 12;
8141 /* Xscale move-accumulator-register (argument parse)
8143 MARcc acc0,RdLo,RdHi. */
8145 static void
8146 do_xsc_mar (void)
8148 inst.instruction |= inst.operands[1].reg << 12;
8149 inst.instruction |= inst.operands[2].reg << 16;
8152 /* Xscale move-register-accumulator (argument parse)
8154 MRAcc RdLo,RdHi,acc0. */
8156 static void
8157 do_xsc_mra (void)
8159 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8160 inst.instruction |= inst.operands[0].reg << 12;
8161 inst.instruction |= inst.operands[1].reg << 16;
8164 /* Encoding functions relevant only to Thumb. */
8166 /* inst.operands[i] is a shifted-register operand; encode
8167 it into inst.instruction in the format used by Thumb32. */
8169 static void
8170 encode_thumb32_shifted_operand (int i)
8172 unsigned int value = inst.reloc.exp.X_add_number;
8173 unsigned int shift = inst.operands[i].shift_kind;
8175 constraint (inst.operands[i].immisreg,
8176 _("shift by register not allowed in thumb mode"));
8177 inst.instruction |= inst.operands[i].reg;
8178 if (shift == SHIFT_RRX)
8179 inst.instruction |= SHIFT_ROR << 4;
8180 else
8182 constraint (inst.reloc.exp.X_op != O_constant,
8183 _("expression too complex"));
8185 constraint (value > 32
8186 || (value == 32 && (shift == SHIFT_LSL
8187 || shift == SHIFT_ROR)),
8188 _("shift expression is too large"));
8190 if (value == 0)
8191 shift = SHIFT_LSL;
8192 else if (value == 32)
8193 value = 0;
8195 inst.instruction |= shift << 4;
8196 inst.instruction |= (value & 0x1c) << 10;
8197 inst.instruction |= (value & 0x03) << 6;
8202 /* inst.operands[i] was set up by parse_address. Encode it into a
8203 Thumb32 format load or store instruction. Reject forms that cannot
8204 be used with such instructions. If is_t is true, reject forms that
8205 cannot be used with a T instruction; if is_d is true, reject forms
8206 that cannot be used with a D instruction. */
8208 static void
8209 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8211 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8213 constraint (!inst.operands[i].isreg,
8214 _("Instruction does not support =N addresses"));
8216 inst.instruction |= inst.operands[i].reg << 16;
8217 if (inst.operands[i].immisreg)
8219 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8220 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8221 constraint (inst.operands[i].negative,
8222 _("Thumb does not support negative register indexing"));
8223 constraint (inst.operands[i].postind,
8224 _("Thumb does not support register post-indexing"));
8225 constraint (inst.operands[i].writeback,
8226 _("Thumb does not support register indexing with writeback"));
8227 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8228 _("Thumb supports only LSL in shifted register indexing"));
8230 inst.instruction |= inst.operands[i].imm;
8231 if (inst.operands[i].shifted)
8233 constraint (inst.reloc.exp.X_op != O_constant,
8234 _("expression too complex"));
8235 constraint (inst.reloc.exp.X_add_number < 0
8236 || inst.reloc.exp.X_add_number > 3,
8237 _("shift out of range"));
8238 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8240 inst.reloc.type = BFD_RELOC_UNUSED;
8242 else if (inst.operands[i].preind)
8244 constraint (is_pc && inst.operands[i].writeback,
8245 _("cannot use writeback with PC-relative addressing"));
8246 constraint (is_t && inst.operands[i].writeback,
8247 _("cannot use writeback with this instruction"));
8249 if (is_d)
8251 inst.instruction |= 0x01000000;
8252 if (inst.operands[i].writeback)
8253 inst.instruction |= 0x00200000;
8255 else
8257 inst.instruction |= 0x00000c00;
8258 if (inst.operands[i].writeback)
8259 inst.instruction |= 0x00000100;
8261 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8263 else if (inst.operands[i].postind)
8265 assert (inst.operands[i].writeback);
8266 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8267 constraint (is_t, _("cannot use post-indexing with this instruction"));
8269 if (is_d)
8270 inst.instruction |= 0x00200000;
8271 else
8272 inst.instruction |= 0x00000900;
8273 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8275 else /* unindexed - only for coprocessor */
8276 inst.error = _("instruction does not accept unindexed addressing");
8279 /* Table of Thumb instructions which exist in both 16- and 32-bit
8280 encodings (the latter only in post-V6T2 cores). The index is the
8281 value used in the insns table below. When there is more than one
8282 possible 16-bit encoding for the instruction, this table always
8283 holds variant (1).
8284 Also contains several pseudo-instructions used during relaxation. */
8285 #define T16_32_TAB \
8286 X(adc, 4140, eb400000), \
8287 X(adcs, 4140, eb500000), \
8288 X(add, 1c00, eb000000), \
8289 X(adds, 1c00, eb100000), \
8290 X(addi, 0000, f1000000), \
8291 X(addis, 0000, f1100000), \
8292 X(add_pc,000f, f20f0000), \
8293 X(add_sp,000d, f10d0000), \
8294 X(adr, 000f, f20f0000), \
8295 X(and, 4000, ea000000), \
8296 X(ands, 4000, ea100000), \
8297 X(asr, 1000, fa40f000), \
8298 X(asrs, 1000, fa50f000), \
8299 X(b, e000, f000b000), \
8300 X(bcond, d000, f0008000), \
8301 X(bic, 4380, ea200000), \
8302 X(bics, 4380, ea300000), \
8303 X(cmn, 42c0, eb100f00), \
8304 X(cmp, 2800, ebb00f00), \
8305 X(cpsie, b660, f3af8400), \
8306 X(cpsid, b670, f3af8600), \
8307 X(cpy, 4600, ea4f0000), \
8308 X(dec_sp,80dd, f1ad0d00), \
8309 X(eor, 4040, ea800000), \
8310 X(eors, 4040, ea900000), \
8311 X(inc_sp,00dd, f10d0d00), \
8312 X(ldmia, c800, e8900000), \
8313 X(ldr, 6800, f8500000), \
8314 X(ldrb, 7800, f8100000), \
8315 X(ldrh, 8800, f8300000), \
8316 X(ldrsb, 5600, f9100000), \
8317 X(ldrsh, 5e00, f9300000), \
8318 X(ldr_pc,4800, f85f0000), \
8319 X(ldr_pc2,4800, f85f0000), \
8320 X(ldr_sp,9800, f85d0000), \
8321 X(lsl, 0000, fa00f000), \
8322 X(lsls, 0000, fa10f000), \
8323 X(lsr, 0800, fa20f000), \
8324 X(lsrs, 0800, fa30f000), \
8325 X(mov, 2000, ea4f0000), \
8326 X(movs, 2000, ea5f0000), \
8327 X(mul, 4340, fb00f000), \
8328 X(muls, 4340, ffffffff), /* no 32b muls */ \
8329 X(mvn, 43c0, ea6f0000), \
8330 X(mvns, 43c0, ea7f0000), \
8331 X(neg, 4240, f1c00000), /* rsb #0 */ \
8332 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8333 X(orr, 4300, ea400000), \
8334 X(orrs, 4300, ea500000), \
8335 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8336 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8337 X(rev, ba00, fa90f080), \
8338 X(rev16, ba40, fa90f090), \
8339 X(revsh, bac0, fa90f0b0), \
8340 X(ror, 41c0, fa60f000), \
8341 X(rors, 41c0, fa70f000), \
8342 X(sbc, 4180, eb600000), \
8343 X(sbcs, 4180, eb700000), \
8344 X(stmia, c000, e8800000), \
8345 X(str, 6000, f8400000), \
8346 X(strb, 7000, f8000000), \
8347 X(strh, 8000, f8200000), \
8348 X(str_sp,9000, f84d0000), \
8349 X(sub, 1e00, eba00000), \
8350 X(subs, 1e00, ebb00000), \
8351 X(subi, 8000, f1a00000), \
8352 X(subis, 8000, f1b00000), \
8353 X(sxtb, b240, fa4ff080), \
8354 X(sxth, b200, fa0ff080), \
8355 X(tst, 4200, ea100f00), \
8356 X(uxtb, b2c0, fa5ff080), \
8357 X(uxth, b280, fa1ff080), \
8358 X(nop, bf00, f3af8000), \
8359 X(yield, bf10, f3af8001), \
8360 X(wfe, bf20, f3af8002), \
8361 X(wfi, bf30, f3af8003), \
8362 X(sev, bf40, f3af9004), /* typo, 8004? */
8364 /* To catch errors in encoding functions, the codes are all offset by
8365 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8366 as 16-bit instructions. */
8367 #define X(a,b,c) T_MNEM_##a
8368 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8369 #undef X
8371 #define X(a,b,c) 0x##b
8372 static const unsigned short thumb_op16[] = { T16_32_TAB };
8373 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8374 #undef X
8376 #define X(a,b,c) 0x##c
8377 static const unsigned int thumb_op32[] = { T16_32_TAB };
8378 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8379 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8380 #undef X
8381 #undef T16_32_TAB
8383 /* Thumb instruction encoders, in alphabetical order. */
8385 /* ADDW or SUBW. */
8386 static void
8387 do_t_add_sub_w (void)
8389 int Rd, Rn;
8391 Rd = inst.operands[0].reg;
8392 Rn = inst.operands[1].reg;
8394 constraint (Rd == 15, _("PC not allowed as destination"));
8395 inst.instruction |= (Rn << 16) | (Rd << 8);
8396 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8399 /* Parse an add or subtract instruction. We get here with inst.instruction
8400 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8402 static void
8403 do_t_add_sub (void)
8405 int Rd, Rs, Rn;
8407 Rd = inst.operands[0].reg;
8408 Rs = (inst.operands[1].present
8409 ? inst.operands[1].reg /* Rd, Rs, foo */
8410 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8412 if (unified_syntax)
8414 bfd_boolean flags;
8415 bfd_boolean narrow;
8416 int opcode;
8418 flags = (inst.instruction == T_MNEM_adds
8419 || inst.instruction == T_MNEM_subs);
8420 if (flags)
8421 narrow = (current_it_mask == 0);
8422 else
8423 narrow = (current_it_mask != 0);
8424 if (!inst.operands[2].isreg)
8426 int add;
8428 add = (inst.instruction == T_MNEM_add
8429 || inst.instruction == T_MNEM_adds);
8430 opcode = 0;
8431 if (inst.size_req != 4)
8433 /* Attempt to use a narrow opcode, with relaxation if
8434 appropriate. */
8435 if (Rd == REG_SP && Rs == REG_SP && !flags)
8436 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8437 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8438 opcode = T_MNEM_add_sp;
8439 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8440 opcode = T_MNEM_add_pc;
8441 else if (Rd <= 7 && Rs <= 7 && narrow)
8443 if (flags)
8444 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8445 else
8446 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8448 if (opcode)
8450 inst.instruction = THUMB_OP16(opcode);
8451 inst.instruction |= (Rd << 4) | Rs;
8452 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8453 if (inst.size_req != 2)
8454 inst.relax = opcode;
8456 else
8457 constraint (inst.size_req == 2, BAD_HIREG);
8459 if (inst.size_req == 4
8460 || (inst.size_req != 2 && !opcode))
8462 if (Rd == REG_PC)
8464 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8465 _("only SUBS PC, LR, #const allowed"));
8466 constraint (inst.reloc.exp.X_op != O_constant,
8467 _("expression too complex"));
8468 constraint (inst.reloc.exp.X_add_number < 0
8469 || inst.reloc.exp.X_add_number > 0xff,
8470 _("immediate value out of range"));
8471 inst.instruction = T2_SUBS_PC_LR
8472 | inst.reloc.exp.X_add_number;
8473 inst.reloc.type = BFD_RELOC_UNUSED;
8474 return;
8476 else if (Rs == REG_PC)
8478 /* Always use addw/subw. */
8479 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8480 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8482 else
8484 inst.instruction = THUMB_OP32 (inst.instruction);
8485 inst.instruction = (inst.instruction & 0xe1ffffff)
8486 | 0x10000000;
8487 if (flags)
8488 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8489 else
8490 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8492 inst.instruction |= Rd << 8;
8493 inst.instruction |= Rs << 16;
8496 else
8498 Rn = inst.operands[2].reg;
8499 /* See if we can do this with a 16-bit instruction. */
8500 if (!inst.operands[2].shifted && inst.size_req != 4)
8502 if (Rd > 7 || Rs > 7 || Rn > 7)
8503 narrow = FALSE;
8505 if (narrow)
8507 inst.instruction = ((inst.instruction == T_MNEM_adds
8508 || inst.instruction == T_MNEM_add)
8509 ? T_OPCODE_ADD_R3
8510 : T_OPCODE_SUB_R3);
8511 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8512 return;
8515 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
8517 /* Thumb-1 cores (except v6-M) require at least one high
8518 register in a narrow non flag setting add. */
8519 if (Rd > 7 || Rn > 7
8520 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
8521 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
8523 if (Rd == Rn)
8525 Rn = Rs;
8526 Rs = Rd;
8528 inst.instruction = T_OPCODE_ADD_HI;
8529 inst.instruction |= (Rd & 8) << 4;
8530 inst.instruction |= (Rd & 7);
8531 inst.instruction |= Rn << 3;
8532 return;
8536 /* If we get here, it can't be done in 16 bits. */
8537 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8538 _("shift must be constant"));
8539 inst.instruction = THUMB_OP32 (inst.instruction);
8540 inst.instruction |= Rd << 8;
8541 inst.instruction |= Rs << 16;
8542 encode_thumb32_shifted_operand (2);
8545 else
8547 constraint (inst.instruction == T_MNEM_adds
8548 || inst.instruction == T_MNEM_subs,
8549 BAD_THUMB32);
8551 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8553 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8554 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8555 BAD_HIREG);
8557 inst.instruction = (inst.instruction == T_MNEM_add
8558 ? 0x0000 : 0x8000);
8559 inst.instruction |= (Rd << 4) | Rs;
8560 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8561 return;
8564 Rn = inst.operands[2].reg;
8565 constraint (inst.operands[2].shifted, _("unshifted register required"));
8567 /* We now have Rd, Rs, and Rn set to registers. */
8568 if (Rd > 7 || Rs > 7 || Rn > 7)
8570 /* Can't do this for SUB. */
8571 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8572 inst.instruction = T_OPCODE_ADD_HI;
8573 inst.instruction |= (Rd & 8) << 4;
8574 inst.instruction |= (Rd & 7);
8575 if (Rs == Rd)
8576 inst.instruction |= Rn << 3;
8577 else if (Rn == Rd)
8578 inst.instruction |= Rs << 3;
8579 else
8580 constraint (1, _("dest must overlap one source register"));
8582 else
8584 inst.instruction = (inst.instruction == T_MNEM_add
8585 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8586 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8591 static void
8592 do_t_adr (void)
8594 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8596 /* Defer to section relaxation. */
8597 inst.relax = inst.instruction;
8598 inst.instruction = THUMB_OP16 (inst.instruction);
8599 inst.instruction |= inst.operands[0].reg << 4;
8601 else if (unified_syntax && inst.size_req != 2)
8603 /* Generate a 32-bit opcode. */
8604 inst.instruction = THUMB_OP32 (inst.instruction);
8605 inst.instruction |= inst.operands[0].reg << 8;
8606 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8607 inst.reloc.pc_rel = 1;
8609 else
8611 /* Generate a 16-bit opcode. */
8612 inst.instruction = THUMB_OP16 (inst.instruction);
8613 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8614 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8615 inst.reloc.pc_rel = 1;
8617 inst.instruction |= inst.operands[0].reg << 4;
8621 /* Arithmetic instructions for which there is just one 16-bit
8622 instruction encoding, and it allows only two low registers.
8623 For maximal compatibility with ARM syntax, we allow three register
8624 operands even when Thumb-32 instructions are not available, as long
8625 as the first two are identical. For instance, both "sbc r0,r1" and
8626 "sbc r0,r0,r1" are allowed. */
8627 static void
8628 do_t_arit3 (void)
8630 int Rd, Rs, Rn;
8632 Rd = inst.operands[0].reg;
8633 Rs = (inst.operands[1].present
8634 ? inst.operands[1].reg /* Rd, Rs, foo */
8635 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8636 Rn = inst.operands[2].reg;
8638 if (unified_syntax)
8640 if (!inst.operands[2].isreg)
8642 /* For an immediate, we always generate a 32-bit opcode;
8643 section relaxation will shrink it later if possible. */
8644 inst.instruction = THUMB_OP32 (inst.instruction);
8645 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8646 inst.instruction |= Rd << 8;
8647 inst.instruction |= Rs << 16;
8648 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8650 else
8652 bfd_boolean narrow;
8654 /* See if we can do this with a 16-bit instruction. */
8655 if (THUMB_SETS_FLAGS (inst.instruction))
8656 narrow = current_it_mask == 0;
8657 else
8658 narrow = current_it_mask != 0;
8660 if (Rd > 7 || Rn > 7 || Rs > 7)
8661 narrow = FALSE;
8662 if (inst.operands[2].shifted)
8663 narrow = FALSE;
8664 if (inst.size_req == 4)
8665 narrow = FALSE;
8667 if (narrow
8668 && Rd == Rs)
8670 inst.instruction = THUMB_OP16 (inst.instruction);
8671 inst.instruction |= Rd;
8672 inst.instruction |= Rn << 3;
8673 return;
8676 /* If we get here, it can't be done in 16 bits. */
8677 constraint (inst.operands[2].shifted
8678 && inst.operands[2].immisreg,
8679 _("shift must be constant"));
8680 inst.instruction = THUMB_OP32 (inst.instruction);
8681 inst.instruction |= Rd << 8;
8682 inst.instruction |= Rs << 16;
8683 encode_thumb32_shifted_operand (2);
8686 else
8688 /* On its face this is a lie - the instruction does set the
8689 flags. However, the only supported mnemonic in this mode
8690 says it doesn't. */
8691 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8693 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8694 _("unshifted register required"));
8695 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8696 constraint (Rd != Rs,
8697 _("dest and source1 must be the same register"));
8699 inst.instruction = THUMB_OP16 (inst.instruction);
8700 inst.instruction |= Rd;
8701 inst.instruction |= Rn << 3;
8705 /* Similarly, but for instructions where the arithmetic operation is
8706 commutative, so we can allow either of them to be different from
8707 the destination operand in a 16-bit instruction. For instance, all
8708 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8709 accepted. */
8710 static void
8711 do_t_arit3c (void)
8713 int Rd, Rs, Rn;
8715 Rd = inst.operands[0].reg;
8716 Rs = (inst.operands[1].present
8717 ? inst.operands[1].reg /* Rd, Rs, foo */
8718 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8719 Rn = inst.operands[2].reg;
8721 if (unified_syntax)
8723 if (!inst.operands[2].isreg)
8725 /* For an immediate, we always generate a 32-bit opcode;
8726 section relaxation will shrink it later if possible. */
8727 inst.instruction = THUMB_OP32 (inst.instruction);
8728 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8729 inst.instruction |= Rd << 8;
8730 inst.instruction |= Rs << 16;
8731 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8733 else
8735 bfd_boolean narrow;
8737 /* See if we can do this with a 16-bit instruction. */
8738 if (THUMB_SETS_FLAGS (inst.instruction))
8739 narrow = current_it_mask == 0;
8740 else
8741 narrow = current_it_mask != 0;
8743 if (Rd > 7 || Rn > 7 || Rs > 7)
8744 narrow = FALSE;
8745 if (inst.operands[2].shifted)
8746 narrow = FALSE;
8747 if (inst.size_req == 4)
8748 narrow = FALSE;
8750 if (narrow)
8752 if (Rd == Rs)
8754 inst.instruction = THUMB_OP16 (inst.instruction);
8755 inst.instruction |= Rd;
8756 inst.instruction |= Rn << 3;
8757 return;
8759 if (Rd == Rn)
8761 inst.instruction = THUMB_OP16 (inst.instruction);
8762 inst.instruction |= Rd;
8763 inst.instruction |= Rs << 3;
8764 return;
8768 /* If we get here, it can't be done in 16 bits. */
8769 constraint (inst.operands[2].shifted
8770 && inst.operands[2].immisreg,
8771 _("shift must be constant"));
8772 inst.instruction = THUMB_OP32 (inst.instruction);
8773 inst.instruction |= Rd << 8;
8774 inst.instruction |= Rs << 16;
8775 encode_thumb32_shifted_operand (2);
8778 else
8780 /* On its face this is a lie - the instruction does set the
8781 flags. However, the only supported mnemonic in this mode
8782 says it doesn't. */
8783 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8785 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8786 _("unshifted register required"));
8787 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8789 inst.instruction = THUMB_OP16 (inst.instruction);
8790 inst.instruction |= Rd;
8792 if (Rd == Rs)
8793 inst.instruction |= Rn << 3;
8794 else if (Rd == Rn)
8795 inst.instruction |= Rs << 3;
8796 else
8797 constraint (1, _("dest must overlap one source register"));
8801 static void
8802 do_t_barrier (void)
8804 if (inst.operands[0].present)
8806 constraint ((inst.instruction & 0xf0) != 0x40
8807 && inst.operands[0].imm != 0xf,
8808 _("bad barrier type"));
8809 inst.instruction |= inst.operands[0].imm;
8811 else
8812 inst.instruction |= 0xf;
8815 static void
8816 do_t_bfc (void)
8818 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8819 constraint (msb > 32, _("bit-field extends past end of register"));
8820 /* The instruction encoding stores the LSB and MSB,
8821 not the LSB and width. */
8822 inst.instruction |= inst.operands[0].reg << 8;
8823 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8824 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8825 inst.instruction |= msb - 1;
8828 static void
8829 do_t_bfi (void)
8831 unsigned int msb;
8833 /* #0 in second position is alternative syntax for bfc, which is
8834 the same instruction but with REG_PC in the Rm field. */
8835 if (!inst.operands[1].isreg)
8836 inst.operands[1].reg = REG_PC;
8838 msb = inst.operands[2].imm + inst.operands[3].imm;
8839 constraint (msb > 32, _("bit-field extends past end of register"));
8840 /* The instruction encoding stores the LSB and MSB,
8841 not the LSB and width. */
8842 inst.instruction |= inst.operands[0].reg << 8;
8843 inst.instruction |= inst.operands[1].reg << 16;
8844 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8845 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8846 inst.instruction |= msb - 1;
8849 static void
8850 do_t_bfx (void)
8852 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8853 _("bit-field extends past end of register"));
8854 inst.instruction |= inst.operands[0].reg << 8;
8855 inst.instruction |= inst.operands[1].reg << 16;
8856 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8857 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8858 inst.instruction |= inst.operands[3].imm - 1;
8861 /* ARM V5 Thumb BLX (argument parse)
8862 BLX <target_addr> which is BLX(1)
8863 BLX <Rm> which is BLX(2)
8864 Unfortunately, there are two different opcodes for this mnemonic.
8865 So, the insns[].value is not used, and the code here zaps values
8866 into inst.instruction.
8868 ??? How to take advantage of the additional two bits of displacement
8869 available in Thumb32 mode? Need new relocation? */
8871 static void
8872 do_t_blx (void)
8874 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8875 if (inst.operands[0].isreg)
8876 /* We have a register, so this is BLX(2). */
8877 inst.instruction |= inst.operands[0].reg << 3;
8878 else
8880 /* No register. This must be BLX(1). */
8881 inst.instruction = 0xf000e800;
8882 #ifdef OBJ_ELF
8883 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8884 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8885 else
8886 #endif
8887 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8888 inst.reloc.pc_rel = 1;
8892 static void
8893 do_t_branch (void)
8895 int opcode;
8896 int cond;
8898 if (current_it_mask)
8900 /* Conditional branches inside IT blocks are encoded as unconditional
8901 branches. */
8902 cond = COND_ALWAYS;
8903 /* A branch must be the last instruction in an IT block. */
8904 constraint (current_it_mask != 0x10, BAD_BRANCH);
8906 else
8907 cond = inst.cond;
8909 if (cond != COND_ALWAYS)
8910 opcode = T_MNEM_bcond;
8911 else
8912 opcode = inst.instruction;
8914 if (unified_syntax && inst.size_req == 4)
8916 inst.instruction = THUMB_OP32(opcode);
8917 if (cond == COND_ALWAYS)
8918 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8919 else
8921 assert (cond != 0xF);
8922 inst.instruction |= cond << 22;
8923 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8926 else
8928 inst.instruction = THUMB_OP16(opcode);
8929 if (cond == COND_ALWAYS)
8930 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8931 else
8933 inst.instruction |= cond << 8;
8934 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8936 /* Allow section relaxation. */
8937 if (unified_syntax && inst.size_req != 2)
8938 inst.relax = opcode;
8941 inst.reloc.pc_rel = 1;
8944 static void
8945 do_t_bkpt (void)
8947 constraint (inst.cond != COND_ALWAYS,
8948 _("instruction is always unconditional"));
8949 if (inst.operands[0].present)
8951 constraint (inst.operands[0].imm > 255,
8952 _("immediate value out of range"));
8953 inst.instruction |= inst.operands[0].imm;
8957 static void
8958 do_t_branch23 (void)
8960 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8961 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8962 inst.reloc.pc_rel = 1;
8964 /* If the destination of the branch is a defined symbol which does not have
8965 the THUMB_FUNC attribute, then we must be calling a function which has
8966 the (interfacearm) attribute. We look for the Thumb entry point to that
8967 function and change the branch to refer to that function instead. */
8968 if ( inst.reloc.exp.X_op == O_symbol
8969 && inst.reloc.exp.X_add_symbol != NULL
8970 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8971 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8972 inst.reloc.exp.X_add_symbol =
8973 find_real_start (inst.reloc.exp.X_add_symbol);
8976 static void
8977 do_t_bx (void)
8979 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8980 inst.instruction |= inst.operands[0].reg << 3;
8981 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8982 should cause the alignment to be checked once it is known. This is
8983 because BX PC only works if the instruction is word aligned. */
8986 static void
8987 do_t_bxj (void)
8989 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8990 if (inst.operands[0].reg == REG_PC)
8991 as_tsktsk (_("use of r15 in bxj is not really useful"));
8993 inst.instruction |= inst.operands[0].reg << 16;
8996 static void
8997 do_t_clz (void)
8999 inst.instruction |= inst.operands[0].reg << 8;
9000 inst.instruction |= inst.operands[1].reg << 16;
9001 inst.instruction |= inst.operands[1].reg;
9004 static void
9005 do_t_cps (void)
9007 constraint (current_it_mask, BAD_NOT_IT);
9008 inst.instruction |= inst.operands[0].imm;
9011 static void
9012 do_t_cpsi (void)
9014 constraint (current_it_mask, BAD_NOT_IT);
9015 if (unified_syntax
9016 && (inst.operands[1].present || inst.size_req == 4)
9017 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9019 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9020 inst.instruction = 0xf3af8000;
9021 inst.instruction |= imod << 9;
9022 inst.instruction |= inst.operands[0].imm << 5;
9023 if (inst.operands[1].present)
9024 inst.instruction |= 0x100 | inst.operands[1].imm;
9026 else
9028 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9029 && (inst.operands[0].imm & 4),
9030 _("selected processor does not support 'A' form "
9031 "of this instruction"));
9032 constraint (inst.operands[1].present || inst.size_req == 4,
9033 _("Thumb does not support the 2-argument "
9034 "form of this instruction"));
9035 inst.instruction |= inst.operands[0].imm;
9039 /* THUMB CPY instruction (argument parse). */
9041 static void
9042 do_t_cpy (void)
9044 if (inst.size_req == 4)
9046 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9047 inst.instruction |= inst.operands[0].reg << 8;
9048 inst.instruction |= inst.operands[1].reg;
9050 else
9052 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9053 inst.instruction |= (inst.operands[0].reg & 0x7);
9054 inst.instruction |= inst.operands[1].reg << 3;
9058 static void
9059 do_t_cbz (void)
9061 constraint (current_it_mask, BAD_NOT_IT);
9062 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9063 inst.instruction |= inst.operands[0].reg;
9064 inst.reloc.pc_rel = 1;
9065 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9068 static void
9069 do_t_dbg (void)
9071 inst.instruction |= inst.operands[0].imm;
9074 static void
9075 do_t_div (void)
9077 if (!inst.operands[1].present)
9078 inst.operands[1].reg = inst.operands[0].reg;
9079 inst.instruction |= inst.operands[0].reg << 8;
9080 inst.instruction |= inst.operands[1].reg << 16;
9081 inst.instruction |= inst.operands[2].reg;
9084 static void
9085 do_t_hint (void)
9087 if (unified_syntax && inst.size_req == 4)
9088 inst.instruction = THUMB_OP32 (inst.instruction);
9089 else
9090 inst.instruction = THUMB_OP16 (inst.instruction);
9093 static void
9094 do_t_it (void)
9096 unsigned int cond = inst.operands[0].imm;
9098 constraint (current_it_mask, BAD_NOT_IT);
9099 current_it_mask = (inst.instruction & 0xf) | 0x10;
9100 current_cc = cond;
9102 /* If the condition is a negative condition, invert the mask. */
9103 if ((cond & 0x1) == 0x0)
9105 unsigned int mask = inst.instruction & 0x000f;
9107 if ((mask & 0x7) == 0)
9108 /* no conversion needed */;
9109 else if ((mask & 0x3) == 0)
9110 mask ^= 0x8;
9111 else if ((mask & 0x1) == 0)
9112 mask ^= 0xC;
9113 else
9114 mask ^= 0xE;
9116 inst.instruction &= 0xfff0;
9117 inst.instruction |= mask;
9120 inst.instruction |= cond << 4;
9123 /* Helper function used for both push/pop and ldm/stm. */
9124 static void
9125 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9127 bfd_boolean load;
9129 load = (inst.instruction & (1 << 20)) != 0;
9131 if (mask & (1 << 13))
9132 inst.error = _("SP not allowed in register list");
9133 if (load)
9135 if (mask & (1 << 14)
9136 && mask & (1 << 15))
9137 inst.error = _("LR and PC should not both be in register list");
9139 if ((mask & (1 << base)) != 0
9140 && writeback)
9141 as_warn (_("base register should not be in register list "
9142 "when written back"));
9144 else
9146 if (mask & (1 << 15))
9147 inst.error = _("PC not allowed in register list");
9149 if (mask & (1 << base))
9150 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9153 if ((mask & (mask - 1)) == 0)
9155 /* Single register transfers implemented as str/ldr. */
9156 if (writeback)
9158 if (inst.instruction & (1 << 23))
9159 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9160 else
9161 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9163 else
9165 if (inst.instruction & (1 << 23))
9166 inst.instruction = 0x00800000; /* ia -> [base] */
9167 else
9168 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9171 inst.instruction |= 0xf8400000;
9172 if (load)
9173 inst.instruction |= 0x00100000;
9175 mask = ffs (mask) - 1;
9176 mask <<= 12;
9178 else if (writeback)
9179 inst.instruction |= WRITE_BACK;
9181 inst.instruction |= mask;
9182 inst.instruction |= base << 16;
9185 static void
9186 do_t_ldmstm (void)
9188 /* This really doesn't seem worth it. */
9189 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9190 _("expression too complex"));
9191 constraint (inst.operands[1].writeback,
9192 _("Thumb load/store multiple does not support {reglist}^"));
9194 if (unified_syntax)
9196 bfd_boolean narrow;
9197 unsigned mask;
9199 narrow = FALSE;
9200 /* See if we can use a 16-bit instruction. */
9201 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9202 && inst.size_req != 4
9203 && !(inst.operands[1].imm & ~0xff))
9205 mask = 1 << inst.operands[0].reg;
9207 if (inst.operands[0].reg <= 7
9208 && (inst.instruction == T_MNEM_stmia
9209 ? inst.operands[0].writeback
9210 : (inst.operands[0].writeback
9211 == !(inst.operands[1].imm & mask))))
9213 if (inst.instruction == T_MNEM_stmia
9214 && (inst.operands[1].imm & mask)
9215 && (inst.operands[1].imm & (mask - 1)))
9216 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9217 inst.operands[0].reg);
9219 inst.instruction = THUMB_OP16 (inst.instruction);
9220 inst.instruction |= inst.operands[0].reg << 8;
9221 inst.instruction |= inst.operands[1].imm;
9222 narrow = TRUE;
9224 else if (inst.operands[0] .reg == REG_SP
9225 && inst.operands[0].writeback)
9227 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9228 ? T_MNEM_push : T_MNEM_pop);
9229 inst.instruction |= inst.operands[1].imm;
9230 narrow = TRUE;
9234 if (!narrow)
9236 if (inst.instruction < 0xffff)
9237 inst.instruction = THUMB_OP32 (inst.instruction);
9239 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9240 inst.operands[0].writeback);
9243 else
9245 constraint (inst.operands[0].reg > 7
9246 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9247 constraint (inst.instruction != T_MNEM_ldmia
9248 && inst.instruction != T_MNEM_stmia,
9249 _("Thumb-2 instruction only valid in unified syntax"));
9250 if (inst.instruction == T_MNEM_stmia)
9252 if (!inst.operands[0].writeback)
9253 as_warn (_("this instruction will write back the base register"));
9254 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9255 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9256 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9257 inst.operands[0].reg);
9259 else
9261 if (!inst.operands[0].writeback
9262 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9263 as_warn (_("this instruction will write back the base register"));
9264 else if (inst.operands[0].writeback
9265 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9266 as_warn (_("this instruction will not write back the base register"));
9269 inst.instruction = THUMB_OP16 (inst.instruction);
9270 inst.instruction |= inst.operands[0].reg << 8;
9271 inst.instruction |= inst.operands[1].imm;
9275 static void
9276 do_t_ldrex (void)
9278 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9279 || inst.operands[1].postind || inst.operands[1].writeback
9280 || inst.operands[1].immisreg || inst.operands[1].shifted
9281 || inst.operands[1].negative,
9282 BAD_ADDR_MODE);
9284 inst.instruction |= inst.operands[0].reg << 12;
9285 inst.instruction |= inst.operands[1].reg << 16;
9286 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9289 static void
9290 do_t_ldrexd (void)
9292 if (!inst.operands[1].present)
9294 constraint (inst.operands[0].reg == REG_LR,
9295 _("r14 not allowed as first register "
9296 "when second register is omitted"));
9297 inst.operands[1].reg = inst.operands[0].reg + 1;
9299 constraint (inst.operands[0].reg == inst.operands[1].reg,
9300 BAD_OVERLAP);
9302 inst.instruction |= inst.operands[0].reg << 12;
9303 inst.instruction |= inst.operands[1].reg << 8;
9304 inst.instruction |= inst.operands[2].reg << 16;
9307 static void
9308 do_t_ldst (void)
9310 unsigned long opcode;
9311 int Rn;
9313 opcode = inst.instruction;
9314 if (unified_syntax)
9316 if (!inst.operands[1].isreg)
9318 if (opcode <= 0xffff)
9319 inst.instruction = THUMB_OP32 (opcode);
9320 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9321 return;
9323 if (inst.operands[1].isreg
9324 && !inst.operands[1].writeback
9325 && !inst.operands[1].shifted && !inst.operands[1].postind
9326 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9327 && opcode <= 0xffff
9328 && inst.size_req != 4)
9330 /* Insn may have a 16-bit form. */
9331 Rn = inst.operands[1].reg;
9332 if (inst.operands[1].immisreg)
9334 inst.instruction = THUMB_OP16 (opcode);
9335 /* [Rn, Rik] */
9336 if (Rn <= 7 && inst.operands[1].imm <= 7)
9337 goto op16;
9339 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9340 && opcode != T_MNEM_ldrsb)
9341 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9342 || (Rn == REG_SP && opcode == T_MNEM_str))
9344 /* [Rn, #const] */
9345 if (Rn > 7)
9347 if (Rn == REG_PC)
9349 if (inst.reloc.pc_rel)
9350 opcode = T_MNEM_ldr_pc2;
9351 else
9352 opcode = T_MNEM_ldr_pc;
9354 else
9356 if (opcode == T_MNEM_ldr)
9357 opcode = T_MNEM_ldr_sp;
9358 else
9359 opcode = T_MNEM_str_sp;
9361 inst.instruction = inst.operands[0].reg << 8;
9363 else
9365 inst.instruction = inst.operands[0].reg;
9366 inst.instruction |= inst.operands[1].reg << 3;
9368 inst.instruction |= THUMB_OP16 (opcode);
9369 if (inst.size_req == 2)
9370 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9371 else
9372 inst.relax = opcode;
9373 return;
9376 /* Definitely a 32-bit variant. */
9377 inst.instruction = THUMB_OP32 (opcode);
9378 inst.instruction |= inst.operands[0].reg << 12;
9379 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9380 return;
9383 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9385 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9387 /* Only [Rn,Rm] is acceptable. */
9388 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9389 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9390 || inst.operands[1].postind || inst.operands[1].shifted
9391 || inst.operands[1].negative,
9392 _("Thumb does not support this addressing mode"));
9393 inst.instruction = THUMB_OP16 (inst.instruction);
9394 goto op16;
9397 inst.instruction = THUMB_OP16 (inst.instruction);
9398 if (!inst.operands[1].isreg)
9399 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9400 return;
9402 constraint (!inst.operands[1].preind
9403 || inst.operands[1].shifted
9404 || inst.operands[1].writeback,
9405 _("Thumb does not support this addressing mode"));
9406 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9408 constraint (inst.instruction & 0x0600,
9409 _("byte or halfword not valid for base register"));
9410 constraint (inst.operands[1].reg == REG_PC
9411 && !(inst.instruction & THUMB_LOAD_BIT),
9412 _("r15 based store not allowed"));
9413 constraint (inst.operands[1].immisreg,
9414 _("invalid base register for register offset"));
9416 if (inst.operands[1].reg == REG_PC)
9417 inst.instruction = T_OPCODE_LDR_PC;
9418 else if (inst.instruction & THUMB_LOAD_BIT)
9419 inst.instruction = T_OPCODE_LDR_SP;
9420 else
9421 inst.instruction = T_OPCODE_STR_SP;
9423 inst.instruction |= inst.operands[0].reg << 8;
9424 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9425 return;
9428 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9429 if (!inst.operands[1].immisreg)
9431 /* Immediate offset. */
9432 inst.instruction |= inst.operands[0].reg;
9433 inst.instruction |= inst.operands[1].reg << 3;
9434 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9435 return;
9438 /* Register offset. */
9439 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9440 constraint (inst.operands[1].negative,
9441 _("Thumb does not support this addressing mode"));
9443 op16:
9444 switch (inst.instruction)
9446 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9447 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9448 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9449 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9450 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9451 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9452 case 0x5600 /* ldrsb */:
9453 case 0x5e00 /* ldrsh */: break;
9454 default: abort ();
9457 inst.instruction |= inst.operands[0].reg;
9458 inst.instruction |= inst.operands[1].reg << 3;
9459 inst.instruction |= inst.operands[1].imm << 6;
9462 static void
9463 do_t_ldstd (void)
9465 if (!inst.operands[1].present)
9467 inst.operands[1].reg = inst.operands[0].reg + 1;
9468 constraint (inst.operands[0].reg == REG_LR,
9469 _("r14 not allowed here"));
9471 inst.instruction |= inst.operands[0].reg << 12;
9472 inst.instruction |= inst.operands[1].reg << 8;
9473 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9476 static void
9477 do_t_ldstt (void)
9479 inst.instruction |= inst.operands[0].reg << 12;
9480 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9483 static void
9484 do_t_mla (void)
9486 inst.instruction |= inst.operands[0].reg << 8;
9487 inst.instruction |= inst.operands[1].reg << 16;
9488 inst.instruction |= inst.operands[2].reg;
9489 inst.instruction |= inst.operands[3].reg << 12;
9492 static void
9493 do_t_mlal (void)
9495 inst.instruction |= inst.operands[0].reg << 12;
9496 inst.instruction |= inst.operands[1].reg << 8;
9497 inst.instruction |= inst.operands[2].reg << 16;
9498 inst.instruction |= inst.operands[3].reg;
9501 static void
9502 do_t_mov_cmp (void)
9504 if (unified_syntax)
9506 int r0off = (inst.instruction == T_MNEM_mov
9507 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9508 unsigned long opcode;
9509 bfd_boolean narrow;
9510 bfd_boolean low_regs;
9512 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9513 opcode = inst.instruction;
9514 if (current_it_mask)
9515 narrow = opcode != T_MNEM_movs;
9516 else
9517 narrow = opcode != T_MNEM_movs || low_regs;
9518 if (inst.size_req == 4
9519 || inst.operands[1].shifted)
9520 narrow = FALSE;
9522 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9523 if (opcode == T_MNEM_movs && inst.operands[1].isreg
9524 && !inst.operands[1].shifted
9525 && inst.operands[0].reg == REG_PC
9526 && inst.operands[1].reg == REG_LR)
9528 inst.instruction = T2_SUBS_PC_LR;
9529 return;
9532 if (!inst.operands[1].isreg)
9534 /* Immediate operand. */
9535 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9536 narrow = 0;
9537 if (low_regs && narrow)
9539 inst.instruction = THUMB_OP16 (opcode);
9540 inst.instruction |= inst.operands[0].reg << 8;
9541 if (inst.size_req == 2)
9542 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9543 else
9544 inst.relax = opcode;
9546 else
9548 inst.instruction = THUMB_OP32 (inst.instruction);
9549 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9550 inst.instruction |= inst.operands[0].reg << r0off;
9551 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9554 else if (inst.operands[1].shifted && inst.operands[1].immisreg
9555 && (inst.instruction == T_MNEM_mov
9556 || inst.instruction == T_MNEM_movs))
9558 /* Register shifts are encoded as separate shift instructions. */
9559 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
9561 if (current_it_mask)
9562 narrow = !flags;
9563 else
9564 narrow = flags;
9566 if (inst.size_req == 4)
9567 narrow = FALSE;
9569 if (!low_regs || inst.operands[1].imm > 7)
9570 narrow = FALSE;
9572 if (inst.operands[0].reg != inst.operands[1].reg)
9573 narrow = FALSE;
9575 switch (inst.operands[1].shift_kind)
9577 case SHIFT_LSL:
9578 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
9579 break;
9580 case SHIFT_ASR:
9581 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
9582 break;
9583 case SHIFT_LSR:
9584 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
9585 break;
9586 case SHIFT_ROR:
9587 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
9588 break;
9589 default:
9590 abort ();
9593 inst.instruction = opcode;
9594 if (narrow)
9596 inst.instruction |= inst.operands[0].reg;
9597 inst.instruction |= inst.operands[1].imm << 3;
9599 else
9601 if (flags)
9602 inst.instruction |= CONDS_BIT;
9604 inst.instruction |= inst.operands[0].reg << 8;
9605 inst.instruction |= inst.operands[1].reg << 16;
9606 inst.instruction |= inst.operands[1].imm;
9609 else if (!narrow)
9611 /* Some mov with immediate shift have narrow variants.
9612 Register shifts are handled above. */
9613 if (low_regs && inst.operands[1].shifted
9614 && (inst.instruction == T_MNEM_mov
9615 || inst.instruction == T_MNEM_movs))
9617 if (current_it_mask)
9618 narrow = (inst.instruction == T_MNEM_mov);
9619 else
9620 narrow = (inst.instruction == T_MNEM_movs);
9623 if (narrow)
9625 switch (inst.operands[1].shift_kind)
9627 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9628 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9629 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9630 default: narrow = FALSE; break;
9634 if (narrow)
9636 inst.instruction |= inst.operands[0].reg;
9637 inst.instruction |= inst.operands[1].reg << 3;
9638 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9640 else
9642 inst.instruction = THUMB_OP32 (inst.instruction);
9643 inst.instruction |= inst.operands[0].reg << r0off;
9644 encode_thumb32_shifted_operand (1);
9647 else
9648 switch (inst.instruction)
9650 case T_MNEM_mov:
9651 inst.instruction = T_OPCODE_MOV_HR;
9652 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9653 inst.instruction |= (inst.operands[0].reg & 0x7);
9654 inst.instruction |= inst.operands[1].reg << 3;
9655 break;
9657 case T_MNEM_movs:
9658 /* We know we have low registers at this point.
9659 Generate ADD Rd, Rs, #0. */
9660 inst.instruction = T_OPCODE_ADD_I3;
9661 inst.instruction |= inst.operands[0].reg;
9662 inst.instruction |= inst.operands[1].reg << 3;
9663 break;
9665 case T_MNEM_cmp:
9666 if (low_regs)
9668 inst.instruction = T_OPCODE_CMP_LR;
9669 inst.instruction |= inst.operands[0].reg;
9670 inst.instruction |= inst.operands[1].reg << 3;
9672 else
9674 inst.instruction = T_OPCODE_CMP_HR;
9675 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9676 inst.instruction |= (inst.operands[0].reg & 0x7);
9677 inst.instruction |= inst.operands[1].reg << 3;
9679 break;
9681 return;
9684 inst.instruction = THUMB_OP16 (inst.instruction);
9685 if (inst.operands[1].isreg)
9687 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9689 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9690 since a MOV instruction produces unpredictable results. */
9691 if (inst.instruction == T_OPCODE_MOV_I8)
9692 inst.instruction = T_OPCODE_ADD_I3;
9693 else
9694 inst.instruction = T_OPCODE_CMP_LR;
9696 inst.instruction |= inst.operands[0].reg;
9697 inst.instruction |= inst.operands[1].reg << 3;
9699 else
9701 if (inst.instruction == T_OPCODE_MOV_I8)
9702 inst.instruction = T_OPCODE_MOV_HR;
9703 else
9704 inst.instruction = T_OPCODE_CMP_HR;
9705 do_t_cpy ();
9708 else
9710 constraint (inst.operands[0].reg > 7,
9711 _("only lo regs allowed with immediate"));
9712 inst.instruction |= inst.operands[0].reg << 8;
9713 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9717 static void
9718 do_t_mov16 (void)
9720 bfd_vma imm;
9721 bfd_boolean top;
9723 top = (inst.instruction & 0x00800000) != 0;
9724 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9726 constraint (top, _(":lower16: not allowed this instruction"));
9727 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9729 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9731 constraint (!top, _(":upper16: not allowed this instruction"));
9732 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9735 inst.instruction |= inst.operands[0].reg << 8;
9736 if (inst.reloc.type == BFD_RELOC_UNUSED)
9738 imm = inst.reloc.exp.X_add_number;
9739 inst.instruction |= (imm & 0xf000) << 4;
9740 inst.instruction |= (imm & 0x0800) << 15;
9741 inst.instruction |= (imm & 0x0700) << 4;
9742 inst.instruction |= (imm & 0x00ff);
9746 static void
9747 do_t_mvn_tst (void)
9749 if (unified_syntax)
9751 int r0off = (inst.instruction == T_MNEM_mvn
9752 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9753 bfd_boolean narrow;
9755 if (inst.size_req == 4
9756 || inst.instruction > 0xffff
9757 || inst.operands[1].shifted
9758 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9759 narrow = FALSE;
9760 else if (inst.instruction == T_MNEM_cmn)
9761 narrow = TRUE;
9762 else if (THUMB_SETS_FLAGS (inst.instruction))
9763 narrow = (current_it_mask == 0);
9764 else
9765 narrow = (current_it_mask != 0);
9767 if (!inst.operands[1].isreg)
9769 /* For an immediate, we always generate a 32-bit opcode;
9770 section relaxation will shrink it later if possible. */
9771 if (inst.instruction < 0xffff)
9772 inst.instruction = THUMB_OP32 (inst.instruction);
9773 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9774 inst.instruction |= inst.operands[0].reg << r0off;
9775 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9777 else
9779 /* See if we can do this with a 16-bit instruction. */
9780 if (narrow)
9782 inst.instruction = THUMB_OP16 (inst.instruction);
9783 inst.instruction |= inst.operands[0].reg;
9784 inst.instruction |= inst.operands[1].reg << 3;
9786 else
9788 constraint (inst.operands[1].shifted
9789 && inst.operands[1].immisreg,
9790 _("shift must be constant"));
9791 if (inst.instruction < 0xffff)
9792 inst.instruction = THUMB_OP32 (inst.instruction);
9793 inst.instruction |= inst.operands[0].reg << r0off;
9794 encode_thumb32_shifted_operand (1);
9798 else
9800 constraint (inst.instruction > 0xffff
9801 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9802 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9803 _("unshifted register required"));
9804 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9805 BAD_HIREG);
9807 inst.instruction = THUMB_OP16 (inst.instruction);
9808 inst.instruction |= inst.operands[0].reg;
9809 inst.instruction |= inst.operands[1].reg << 3;
9813 static void
9814 do_t_mrs (void)
9816 int flags;
9818 if (do_vfp_nsyn_mrs () == SUCCESS)
9819 return;
9821 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9822 if (flags == 0)
9824 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
9825 _("selected processor does not support "
9826 "requested special purpose register"));
9828 else
9830 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9831 _("selected processor does not support "
9832 "requested special purpose register"));
9833 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9834 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9835 _("'CPSR' or 'SPSR' expected"));
9838 inst.instruction |= inst.operands[0].reg << 8;
9839 inst.instruction |= (flags & SPSR_BIT) >> 2;
9840 inst.instruction |= inst.operands[1].imm & 0xff;
9843 static void
9844 do_t_msr (void)
9846 int flags;
9848 if (do_vfp_nsyn_msr () == SUCCESS)
9849 return;
9851 constraint (!inst.operands[1].isreg,
9852 _("Thumb encoding does not support an immediate here"));
9853 flags = inst.operands[0].imm;
9854 if (flags & ~0xff)
9856 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9857 _("selected processor does not support "
9858 "requested special purpose register"));
9860 else
9862 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
9863 _("selected processor does not support "
9864 "requested special purpose register"));
9865 flags |= PSR_f;
9867 inst.instruction |= (flags & SPSR_BIT) >> 2;
9868 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9869 inst.instruction |= (flags & 0xff);
9870 inst.instruction |= inst.operands[1].reg << 16;
9873 static void
9874 do_t_mul (void)
9876 if (!inst.operands[2].present)
9877 inst.operands[2].reg = inst.operands[0].reg;
9879 /* There is no 32-bit MULS and no 16-bit MUL. */
9880 if (unified_syntax && inst.instruction == T_MNEM_mul)
9882 inst.instruction = THUMB_OP32 (inst.instruction);
9883 inst.instruction |= inst.operands[0].reg << 8;
9884 inst.instruction |= inst.operands[1].reg << 16;
9885 inst.instruction |= inst.operands[2].reg << 0;
9887 else
9889 constraint (!unified_syntax
9890 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9891 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9892 BAD_HIREG);
9894 inst.instruction = THUMB_OP16 (inst.instruction);
9895 inst.instruction |= inst.operands[0].reg;
9897 if (inst.operands[0].reg == inst.operands[1].reg)
9898 inst.instruction |= inst.operands[2].reg << 3;
9899 else if (inst.operands[0].reg == inst.operands[2].reg)
9900 inst.instruction |= inst.operands[1].reg << 3;
9901 else
9902 constraint (1, _("dest must overlap one source register"));
9906 static void
9907 do_t_mull (void)
9909 inst.instruction |= inst.operands[0].reg << 12;
9910 inst.instruction |= inst.operands[1].reg << 8;
9911 inst.instruction |= inst.operands[2].reg << 16;
9912 inst.instruction |= inst.operands[3].reg;
9914 if (inst.operands[0].reg == inst.operands[1].reg)
9915 as_tsktsk (_("rdhi and rdlo must be different"));
9918 static void
9919 do_t_nop (void)
9921 if (unified_syntax)
9923 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9925 inst.instruction = THUMB_OP32 (inst.instruction);
9926 inst.instruction |= inst.operands[0].imm;
9928 else
9930 inst.instruction = THUMB_OP16 (inst.instruction);
9931 inst.instruction |= inst.operands[0].imm << 4;
9934 else
9936 constraint (inst.operands[0].present,
9937 _("Thumb does not support NOP with hints"));
9938 inst.instruction = 0x46c0;
9942 static void
9943 do_t_neg (void)
9945 if (unified_syntax)
9947 bfd_boolean narrow;
9949 if (THUMB_SETS_FLAGS (inst.instruction))
9950 narrow = (current_it_mask == 0);
9951 else
9952 narrow = (current_it_mask != 0);
9953 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9954 narrow = FALSE;
9955 if (inst.size_req == 4)
9956 narrow = FALSE;
9958 if (!narrow)
9960 inst.instruction = THUMB_OP32 (inst.instruction);
9961 inst.instruction |= inst.operands[0].reg << 8;
9962 inst.instruction |= inst.operands[1].reg << 16;
9964 else
9966 inst.instruction = THUMB_OP16 (inst.instruction);
9967 inst.instruction |= inst.operands[0].reg;
9968 inst.instruction |= inst.operands[1].reg << 3;
9971 else
9973 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9974 BAD_HIREG);
9975 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9977 inst.instruction = THUMB_OP16 (inst.instruction);
9978 inst.instruction |= inst.operands[0].reg;
9979 inst.instruction |= inst.operands[1].reg << 3;
9983 static void
9984 do_t_pkhbt (void)
9986 inst.instruction |= inst.operands[0].reg << 8;
9987 inst.instruction |= inst.operands[1].reg << 16;
9988 inst.instruction |= inst.operands[2].reg;
9989 if (inst.operands[3].present)
9991 unsigned int val = inst.reloc.exp.X_add_number;
9992 constraint (inst.reloc.exp.X_op != O_constant,
9993 _("expression too complex"));
9994 inst.instruction |= (val & 0x1c) << 10;
9995 inst.instruction |= (val & 0x03) << 6;
9999 static void
10000 do_t_pkhtb (void)
10002 if (!inst.operands[3].present)
10003 inst.instruction &= ~0x00000020;
10004 do_t_pkhbt ();
10007 static void
10008 do_t_pld (void)
10010 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10013 static void
10014 do_t_push_pop (void)
10016 unsigned mask;
10018 constraint (inst.operands[0].writeback,
10019 _("push/pop do not support {reglist}^"));
10020 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10021 _("expression too complex"));
10023 mask = inst.operands[0].imm;
10024 if ((mask & ~0xff) == 0)
10025 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10026 else if ((inst.instruction == T_MNEM_push
10027 && (mask & ~0xff) == 1 << REG_LR)
10028 || (inst.instruction == T_MNEM_pop
10029 && (mask & ~0xff) == 1 << REG_PC))
10031 inst.instruction = THUMB_OP16 (inst.instruction);
10032 inst.instruction |= THUMB_PP_PC_LR;
10033 inst.instruction |= mask & 0xff;
10035 else if (unified_syntax)
10037 inst.instruction = THUMB_OP32 (inst.instruction);
10038 encode_thumb2_ldmstm (13, mask, TRUE);
10040 else
10042 inst.error = _("invalid register list to push/pop instruction");
10043 return;
10047 static void
10048 do_t_rbit (void)
10050 inst.instruction |= inst.operands[0].reg << 8;
10051 inst.instruction |= inst.operands[1].reg << 16;
10052 inst.instruction |= inst.operands[1].reg;
10055 static void
10056 do_t_rev (void)
10058 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10059 && inst.size_req != 4)
10061 inst.instruction = THUMB_OP16 (inst.instruction);
10062 inst.instruction |= inst.operands[0].reg;
10063 inst.instruction |= inst.operands[1].reg << 3;
10065 else if (unified_syntax)
10067 inst.instruction = THUMB_OP32 (inst.instruction);
10068 inst.instruction |= inst.operands[0].reg << 8;
10069 inst.instruction |= inst.operands[1].reg << 16;
10070 inst.instruction |= inst.operands[1].reg;
10072 else
10073 inst.error = BAD_HIREG;
10076 static void
10077 do_t_rsb (void)
10079 int Rd, Rs;
10081 Rd = inst.operands[0].reg;
10082 Rs = (inst.operands[1].present
10083 ? inst.operands[1].reg /* Rd, Rs, foo */
10084 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10086 inst.instruction |= Rd << 8;
10087 inst.instruction |= Rs << 16;
10088 if (!inst.operands[2].isreg)
10090 bfd_boolean narrow;
10092 if ((inst.instruction & 0x00100000) != 0)
10093 narrow = (current_it_mask == 0);
10094 else
10095 narrow = (current_it_mask != 0);
10097 if (Rd > 7 || Rs > 7)
10098 narrow = FALSE;
10100 if (inst.size_req == 4 || !unified_syntax)
10101 narrow = FALSE;
10103 if (inst.reloc.exp.X_op != O_constant
10104 || inst.reloc.exp.X_add_number != 0)
10105 narrow = FALSE;
10107 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10108 relaxation, but it doesn't seem worth the hassle. */
10109 if (narrow)
10111 inst.reloc.type = BFD_RELOC_UNUSED;
10112 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10113 inst.instruction |= Rs << 3;
10114 inst.instruction |= Rd;
10116 else
10118 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10119 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10122 else
10123 encode_thumb32_shifted_operand (2);
10126 static void
10127 do_t_setend (void)
10129 constraint (current_it_mask, BAD_NOT_IT);
10130 if (inst.operands[0].imm)
10131 inst.instruction |= 0x8;
10134 static void
10135 do_t_shift (void)
10137 if (!inst.operands[1].present)
10138 inst.operands[1].reg = inst.operands[0].reg;
10140 if (unified_syntax)
10142 bfd_boolean narrow;
10143 int shift_kind;
10145 switch (inst.instruction)
10147 case T_MNEM_asr:
10148 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10149 case T_MNEM_lsl:
10150 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10151 case T_MNEM_lsr:
10152 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10153 case T_MNEM_ror:
10154 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10155 default: abort ();
10158 if (THUMB_SETS_FLAGS (inst.instruction))
10159 narrow = (current_it_mask == 0);
10160 else
10161 narrow = (current_it_mask != 0);
10162 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10163 narrow = FALSE;
10164 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10165 narrow = FALSE;
10166 if (inst.operands[2].isreg
10167 && (inst.operands[1].reg != inst.operands[0].reg
10168 || inst.operands[2].reg > 7))
10169 narrow = FALSE;
10170 if (inst.size_req == 4)
10171 narrow = FALSE;
10173 if (!narrow)
10175 if (inst.operands[2].isreg)
10177 inst.instruction = THUMB_OP32 (inst.instruction);
10178 inst.instruction |= inst.operands[0].reg << 8;
10179 inst.instruction |= inst.operands[1].reg << 16;
10180 inst.instruction |= inst.operands[2].reg;
10182 else
10184 inst.operands[1].shifted = 1;
10185 inst.operands[1].shift_kind = shift_kind;
10186 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10187 ? T_MNEM_movs : T_MNEM_mov);
10188 inst.instruction |= inst.operands[0].reg << 8;
10189 encode_thumb32_shifted_operand (1);
10190 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10191 inst.reloc.type = BFD_RELOC_UNUSED;
10194 else
10196 if (inst.operands[2].isreg)
10198 switch (shift_kind)
10200 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10201 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10202 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10203 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10204 default: abort ();
10207 inst.instruction |= inst.operands[0].reg;
10208 inst.instruction |= inst.operands[2].reg << 3;
10210 else
10212 switch (shift_kind)
10214 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10215 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10216 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10217 default: abort ();
10219 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10220 inst.instruction |= inst.operands[0].reg;
10221 inst.instruction |= inst.operands[1].reg << 3;
10225 else
10227 constraint (inst.operands[0].reg > 7
10228 || inst.operands[1].reg > 7, BAD_HIREG);
10229 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10231 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10233 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10234 constraint (inst.operands[0].reg != inst.operands[1].reg,
10235 _("source1 and dest must be same register"));
10237 switch (inst.instruction)
10239 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10240 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10241 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10242 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10243 default: abort ();
10246 inst.instruction |= inst.operands[0].reg;
10247 inst.instruction |= inst.operands[2].reg << 3;
10249 else
10251 switch (inst.instruction)
10253 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10254 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10255 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10256 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10257 default: abort ();
10259 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10260 inst.instruction |= inst.operands[0].reg;
10261 inst.instruction |= inst.operands[1].reg << 3;
10266 static void
10267 do_t_simd (void)
10269 inst.instruction |= inst.operands[0].reg << 8;
10270 inst.instruction |= inst.operands[1].reg << 16;
10271 inst.instruction |= inst.operands[2].reg;
10274 static void
10275 do_t_smc (void)
10277 unsigned int value = inst.reloc.exp.X_add_number;
10278 constraint (inst.reloc.exp.X_op != O_constant,
10279 _("expression too complex"));
10280 inst.reloc.type = BFD_RELOC_UNUSED;
10281 inst.instruction |= (value & 0xf000) >> 12;
10282 inst.instruction |= (value & 0x0ff0);
10283 inst.instruction |= (value & 0x000f) << 16;
10286 static void
10287 do_t_ssat (void)
10289 inst.instruction |= inst.operands[0].reg << 8;
10290 inst.instruction |= inst.operands[1].imm - 1;
10291 inst.instruction |= inst.operands[2].reg << 16;
10293 if (inst.operands[3].present)
10295 constraint (inst.reloc.exp.X_op != O_constant,
10296 _("expression too complex"));
10298 if (inst.reloc.exp.X_add_number != 0)
10300 if (inst.operands[3].shift_kind == SHIFT_ASR)
10301 inst.instruction |= 0x00200000; /* sh bit */
10302 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10303 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10305 inst.reloc.type = BFD_RELOC_UNUSED;
10309 static void
10310 do_t_ssat16 (void)
10312 inst.instruction |= inst.operands[0].reg << 8;
10313 inst.instruction |= inst.operands[1].imm - 1;
10314 inst.instruction |= inst.operands[2].reg << 16;
10317 static void
10318 do_t_strex (void)
10320 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10321 || inst.operands[2].postind || inst.operands[2].writeback
10322 || inst.operands[2].immisreg || inst.operands[2].shifted
10323 || inst.operands[2].negative,
10324 BAD_ADDR_MODE);
10326 inst.instruction |= inst.operands[0].reg << 8;
10327 inst.instruction |= inst.operands[1].reg << 12;
10328 inst.instruction |= inst.operands[2].reg << 16;
10329 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10332 static void
10333 do_t_strexd (void)
10335 if (!inst.operands[2].present)
10336 inst.operands[2].reg = inst.operands[1].reg + 1;
10338 constraint (inst.operands[0].reg == inst.operands[1].reg
10339 || inst.operands[0].reg == inst.operands[2].reg
10340 || inst.operands[0].reg == inst.operands[3].reg
10341 || inst.operands[1].reg == inst.operands[2].reg,
10342 BAD_OVERLAP);
10344 inst.instruction |= inst.operands[0].reg;
10345 inst.instruction |= inst.operands[1].reg << 12;
10346 inst.instruction |= inst.operands[2].reg << 8;
10347 inst.instruction |= inst.operands[3].reg << 16;
10350 static void
10351 do_t_sxtah (void)
10353 inst.instruction |= inst.operands[0].reg << 8;
10354 inst.instruction |= inst.operands[1].reg << 16;
10355 inst.instruction |= inst.operands[2].reg;
10356 inst.instruction |= inst.operands[3].imm << 4;
10359 static void
10360 do_t_sxth (void)
10362 if (inst.instruction <= 0xffff && inst.size_req != 4
10363 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10364 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10366 inst.instruction = THUMB_OP16 (inst.instruction);
10367 inst.instruction |= inst.operands[0].reg;
10368 inst.instruction |= inst.operands[1].reg << 3;
10370 else if (unified_syntax)
10372 if (inst.instruction <= 0xffff)
10373 inst.instruction = THUMB_OP32 (inst.instruction);
10374 inst.instruction |= inst.operands[0].reg << 8;
10375 inst.instruction |= inst.operands[1].reg;
10376 inst.instruction |= inst.operands[2].imm << 4;
10378 else
10380 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10381 _("Thumb encoding does not support rotation"));
10382 constraint (1, BAD_HIREG);
10386 static void
10387 do_t_swi (void)
10389 inst.reloc.type = BFD_RELOC_ARM_SWI;
10392 static void
10393 do_t_tb (void)
10395 int half;
10397 half = (inst.instruction & 0x10) != 0;
10398 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10399 constraint (inst.operands[0].immisreg,
10400 _("instruction requires register index"));
10401 constraint (inst.operands[0].imm == 15,
10402 _("PC is not a valid index register"));
10403 constraint (!half && inst.operands[0].shifted,
10404 _("instruction does not allow shifted index"));
10405 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10408 static void
10409 do_t_usat (void)
10411 inst.instruction |= inst.operands[0].reg << 8;
10412 inst.instruction |= inst.operands[1].imm;
10413 inst.instruction |= inst.operands[2].reg << 16;
10415 if (inst.operands[3].present)
10417 constraint (inst.reloc.exp.X_op != O_constant,
10418 _("expression too complex"));
10419 if (inst.reloc.exp.X_add_number != 0)
10421 if (inst.operands[3].shift_kind == SHIFT_ASR)
10422 inst.instruction |= 0x00200000; /* sh bit */
10424 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10425 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10427 inst.reloc.type = BFD_RELOC_UNUSED;
10431 static void
10432 do_t_usat16 (void)
10434 inst.instruction |= inst.operands[0].reg << 8;
10435 inst.instruction |= inst.operands[1].imm;
10436 inst.instruction |= inst.operands[2].reg << 16;
10439 /* Neon instruction encoder helpers. */
10441 /* Encodings for the different types for various Neon opcodes. */
10443 /* An "invalid" code for the following tables. */
10444 #define N_INV -1u
10446 struct neon_tab_entry
10448 unsigned integer;
10449 unsigned float_or_poly;
10450 unsigned scalar_or_imm;
10453 /* Map overloaded Neon opcodes to their respective encodings. */
10454 #define NEON_ENC_TAB \
10455 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10456 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10457 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10458 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10459 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10460 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10461 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10462 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10463 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10464 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10465 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10466 /* Register variants of the following two instructions are encoded as
10467 vcge / vcgt with the operands reversed. */ \
10468 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10469 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10470 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10471 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10472 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10473 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10474 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10475 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10476 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10477 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10478 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10479 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10480 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10481 X(vshl, 0x0000400, N_INV, 0x0800510), \
10482 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10483 X(vand, 0x0000110, N_INV, 0x0800030), \
10484 X(vbic, 0x0100110, N_INV, 0x0800030), \
10485 X(veor, 0x1000110, N_INV, N_INV), \
10486 X(vorn, 0x0300110, N_INV, 0x0800010), \
10487 X(vorr, 0x0200110, N_INV, 0x0800010), \
10488 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10489 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10490 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10491 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10492 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10493 X(vst1, 0x0000000, 0x0800000, N_INV), \
10494 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10495 X(vst2, 0x0000100, 0x0800100, N_INV), \
10496 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10497 X(vst3, 0x0000200, 0x0800200, N_INV), \
10498 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10499 X(vst4, 0x0000300, 0x0800300, N_INV), \
10500 X(vmovn, 0x1b20200, N_INV, N_INV), \
10501 X(vtrn, 0x1b20080, N_INV, N_INV), \
10502 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10503 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10504 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10505 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10506 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10507 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10508 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10509 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10510 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10512 enum neon_opc
10514 #define X(OPC,I,F,S) N_MNEM_##OPC
10515 NEON_ENC_TAB
10516 #undef X
10519 static const struct neon_tab_entry neon_enc_tab[] =
10521 #define X(OPC,I,F,S) { (I), (F), (S) }
10522 NEON_ENC_TAB
10523 #undef X
10526 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10527 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10528 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10529 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10530 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10531 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10532 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10533 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10534 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10535 #define NEON_ENC_SINGLE(X) \
10536 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10537 #define NEON_ENC_DOUBLE(X) \
10538 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10540 /* Define shapes for instruction operands. The following mnemonic characters
10541 are used in this table:
10543 F - VFP S<n> register
10544 D - Neon D<n> register
10545 Q - Neon Q<n> register
10546 I - Immediate
10547 S - Scalar
10548 R - ARM register
10549 L - D<n> register list
10551 This table is used to generate various data:
10552 - enumerations of the form NS_DDR to be used as arguments to
10553 neon_select_shape.
10554 - a table classifying shapes into single, double, quad, mixed.
10555 - a table used to drive neon_select_shape. */
10557 #define NEON_SHAPE_DEF \
10558 X(3, (D, D, D), DOUBLE), \
10559 X(3, (Q, Q, Q), QUAD), \
10560 X(3, (D, D, I), DOUBLE), \
10561 X(3, (Q, Q, I), QUAD), \
10562 X(3, (D, D, S), DOUBLE), \
10563 X(3, (Q, Q, S), QUAD), \
10564 X(2, (D, D), DOUBLE), \
10565 X(2, (Q, Q), QUAD), \
10566 X(2, (D, S), DOUBLE), \
10567 X(2, (Q, S), QUAD), \
10568 X(2, (D, R), DOUBLE), \
10569 X(2, (Q, R), QUAD), \
10570 X(2, (D, I), DOUBLE), \
10571 X(2, (Q, I), QUAD), \
10572 X(3, (D, L, D), DOUBLE), \
10573 X(2, (D, Q), MIXED), \
10574 X(2, (Q, D), MIXED), \
10575 X(3, (D, Q, I), MIXED), \
10576 X(3, (Q, D, I), MIXED), \
10577 X(3, (Q, D, D), MIXED), \
10578 X(3, (D, Q, Q), MIXED), \
10579 X(3, (Q, Q, D), MIXED), \
10580 X(3, (Q, D, S), MIXED), \
10581 X(3, (D, Q, S), MIXED), \
10582 X(4, (D, D, D, I), DOUBLE), \
10583 X(4, (Q, Q, Q, I), QUAD), \
10584 X(2, (F, F), SINGLE), \
10585 X(3, (F, F, F), SINGLE), \
10586 X(2, (F, I), SINGLE), \
10587 X(2, (F, D), MIXED), \
10588 X(2, (D, F), MIXED), \
10589 X(3, (F, F, I), MIXED), \
10590 X(4, (R, R, F, F), SINGLE), \
10591 X(4, (F, F, R, R), SINGLE), \
10592 X(3, (D, R, R), DOUBLE), \
10593 X(3, (R, R, D), DOUBLE), \
10594 X(2, (S, R), SINGLE), \
10595 X(2, (R, S), SINGLE), \
10596 X(2, (F, R), SINGLE), \
10597 X(2, (R, F), SINGLE)
10599 #define S2(A,B) NS_##A##B
10600 #define S3(A,B,C) NS_##A##B##C
10601 #define S4(A,B,C,D) NS_##A##B##C##D
10603 #define X(N, L, C) S##N L
10605 enum neon_shape
10607 NEON_SHAPE_DEF,
10608 NS_NULL
10611 #undef X
10612 #undef S2
10613 #undef S3
10614 #undef S4
10616 enum neon_shape_class
10618 SC_SINGLE,
10619 SC_DOUBLE,
10620 SC_QUAD,
10621 SC_MIXED
10624 #define X(N, L, C) SC_##C
10626 static enum neon_shape_class neon_shape_class[] =
10628 NEON_SHAPE_DEF
10631 #undef X
10633 enum neon_shape_el
10635 SE_F,
10636 SE_D,
10637 SE_Q,
10638 SE_I,
10639 SE_S,
10640 SE_R,
10641 SE_L
10644 /* Register widths of above. */
10645 static unsigned neon_shape_el_size[] =
10649 128,
10656 struct neon_shape_info
10658 unsigned els;
10659 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10662 #define S2(A,B) { SE_##A, SE_##B }
10663 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10664 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10666 #define X(N, L, C) { N, S##N L }
10668 static struct neon_shape_info neon_shape_tab[] =
10670 NEON_SHAPE_DEF
10673 #undef X
10674 #undef S2
10675 #undef S3
10676 #undef S4
10678 /* Bit masks used in type checking given instructions.
10679 'N_EQK' means the type must be the same as (or based on in some way) the key
10680 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10681 set, various other bits can be set as well in order to modify the meaning of
10682 the type constraint. */
10684 enum neon_type_mask
10686 N_S8 = 0x0000001,
10687 N_S16 = 0x0000002,
10688 N_S32 = 0x0000004,
10689 N_S64 = 0x0000008,
10690 N_U8 = 0x0000010,
10691 N_U16 = 0x0000020,
10692 N_U32 = 0x0000040,
10693 N_U64 = 0x0000080,
10694 N_I8 = 0x0000100,
10695 N_I16 = 0x0000200,
10696 N_I32 = 0x0000400,
10697 N_I64 = 0x0000800,
10698 N_8 = 0x0001000,
10699 N_16 = 0x0002000,
10700 N_32 = 0x0004000,
10701 N_64 = 0x0008000,
10702 N_P8 = 0x0010000,
10703 N_P16 = 0x0020000,
10704 N_F16 = 0x0040000,
10705 N_F32 = 0x0080000,
10706 N_F64 = 0x0100000,
10707 N_KEY = 0x1000000, /* key element (main type specifier). */
10708 N_EQK = 0x2000000, /* given operand has the same type & size as the key. */
10709 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
10710 N_DBL = 0x0000001, /* if N_EQK, this operand is twice the size. */
10711 N_HLF = 0x0000002, /* if N_EQK, this operand is half the size. */
10712 N_SGN = 0x0000004, /* if N_EQK, this operand is forced to be signed. */
10713 N_UNS = 0x0000008, /* if N_EQK, this operand is forced to be unsigned. */
10714 N_INT = 0x0000010, /* if N_EQK, this operand is forced to be integer. */
10715 N_FLT = 0x0000020, /* if N_EQK, this operand is forced to be float. */
10716 N_SIZ = 0x0000040, /* if N_EQK, this operand is forced to be size-only. */
10717 N_UTYP = 0,
10718 N_MAX_NONSPECIAL = N_F64
10721 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10723 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10724 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10725 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10726 #define N_SUF_32 (N_SU_32 | N_F32)
10727 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10728 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10730 /* Pass this as the first type argument to neon_check_type to ignore types
10731 altogether. */
10732 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10734 /* Select a "shape" for the current instruction (describing register types or
10735 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10736 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10737 function of operand parsing, so this function doesn't need to be called.
10738 Shapes should be listed in order of decreasing length. */
10740 static enum neon_shape
10741 neon_select_shape (enum neon_shape shape, ...)
10743 va_list ap;
10744 enum neon_shape first_shape = shape;
10746 /* Fix missing optional operands. FIXME: we don't know at this point how
10747 many arguments we should have, so this makes the assumption that we have
10748 > 1. This is true of all current Neon opcodes, I think, but may not be
10749 true in the future. */
10750 if (!inst.operands[1].present)
10751 inst.operands[1] = inst.operands[0];
10753 va_start (ap, shape);
10755 for (; shape != NS_NULL; shape = va_arg (ap, int))
10757 unsigned j;
10758 int matches = 1;
10760 for (j = 0; j < neon_shape_tab[shape].els; j++)
10762 if (!inst.operands[j].present)
10764 matches = 0;
10765 break;
10768 switch (neon_shape_tab[shape].el[j])
10770 case SE_F:
10771 if (!(inst.operands[j].isreg
10772 && inst.operands[j].isvec
10773 && inst.operands[j].issingle
10774 && !inst.operands[j].isquad))
10775 matches = 0;
10776 break;
10778 case SE_D:
10779 if (!(inst.operands[j].isreg
10780 && inst.operands[j].isvec
10781 && !inst.operands[j].isquad
10782 && !inst.operands[j].issingle))
10783 matches = 0;
10784 break;
10786 case SE_R:
10787 if (!(inst.operands[j].isreg
10788 && !inst.operands[j].isvec))
10789 matches = 0;
10790 break;
10792 case SE_Q:
10793 if (!(inst.operands[j].isreg
10794 && inst.operands[j].isvec
10795 && inst.operands[j].isquad
10796 && !inst.operands[j].issingle))
10797 matches = 0;
10798 break;
10800 case SE_I:
10801 if (!(!inst.operands[j].isreg
10802 && !inst.operands[j].isscalar))
10803 matches = 0;
10804 break;
10806 case SE_S:
10807 if (!(!inst.operands[j].isreg
10808 && inst.operands[j].isscalar))
10809 matches = 0;
10810 break;
10812 case SE_L:
10813 break;
10816 if (matches)
10817 break;
10820 va_end (ap);
10822 if (shape == NS_NULL && first_shape != NS_NULL)
10823 first_error (_("invalid instruction shape"));
10825 return shape;
10828 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10829 means the Q bit should be set). */
10831 static int
10832 neon_quad (enum neon_shape shape)
10834 return neon_shape_class[shape] == SC_QUAD;
10837 static void
10838 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10839 unsigned *g_size)
10841 /* Allow modification to be made to types which are constrained to be
10842 based on the key element, based on bits set alongside N_EQK. */
10843 if ((typebits & N_EQK) != 0)
10845 if ((typebits & N_HLF) != 0)
10846 *g_size /= 2;
10847 else if ((typebits & N_DBL) != 0)
10848 *g_size *= 2;
10849 if ((typebits & N_SGN) != 0)
10850 *g_type = NT_signed;
10851 else if ((typebits & N_UNS) != 0)
10852 *g_type = NT_unsigned;
10853 else if ((typebits & N_INT) != 0)
10854 *g_type = NT_integer;
10855 else if ((typebits & N_FLT) != 0)
10856 *g_type = NT_float;
10857 else if ((typebits & N_SIZ) != 0)
10858 *g_type = NT_untyped;
10862 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10863 operand type, i.e. the single type specified in a Neon instruction when it
10864 is the only one given. */
10866 static struct neon_type_el
10867 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10869 struct neon_type_el dest = *key;
10871 assert ((thisarg & N_EQK) != 0);
10873 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10875 return dest;
10878 /* Convert Neon type and size into compact bitmask representation. */
10880 static enum neon_type_mask
10881 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10883 switch (type)
10885 case NT_untyped:
10886 switch (size)
10888 case 8: return N_8;
10889 case 16: return N_16;
10890 case 32: return N_32;
10891 case 64: return N_64;
10892 default: ;
10894 break;
10896 case NT_integer:
10897 switch (size)
10899 case 8: return N_I8;
10900 case 16: return N_I16;
10901 case 32: return N_I32;
10902 case 64: return N_I64;
10903 default: ;
10905 break;
10907 case NT_float:
10908 switch (size)
10910 case 16: return N_F16;
10911 case 32: return N_F32;
10912 case 64: return N_F64;
10913 default: ;
10915 break;
10917 case NT_poly:
10918 switch (size)
10920 case 8: return N_P8;
10921 case 16: return N_P16;
10922 default: ;
10924 break;
10926 case NT_signed:
10927 switch (size)
10929 case 8: return N_S8;
10930 case 16: return N_S16;
10931 case 32: return N_S32;
10932 case 64: return N_S64;
10933 default: ;
10935 break;
10937 case NT_unsigned:
10938 switch (size)
10940 case 8: return N_U8;
10941 case 16: return N_U16;
10942 case 32: return N_U32;
10943 case 64: return N_U64;
10944 default: ;
10946 break;
10948 default: ;
10951 return N_UTYP;
10954 /* Convert compact Neon bitmask type representation to a type and size. Only
10955 handles the case where a single bit is set in the mask. */
10957 static int
10958 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10959 enum neon_type_mask mask)
10961 if ((mask & N_EQK) != 0)
10962 return FAIL;
10964 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10965 *size = 8;
10966 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10967 *size = 16;
10968 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10969 *size = 32;
10970 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10971 *size = 64;
10972 else
10973 return FAIL;
10975 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10976 *type = NT_signed;
10977 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10978 *type = NT_unsigned;
10979 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10980 *type = NT_integer;
10981 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10982 *type = NT_untyped;
10983 else if ((mask & (N_P8 | N_P16)) != 0)
10984 *type = NT_poly;
10985 else if ((mask & (N_F32 | N_F64)) != 0)
10986 *type = NT_float;
10987 else
10988 return FAIL;
10990 return SUCCESS;
10993 /* Modify a bitmask of allowed types. This is only needed for type
10994 relaxation. */
10996 static unsigned
10997 modify_types_allowed (unsigned allowed, unsigned mods)
10999 unsigned size;
11000 enum neon_el_type type;
11001 unsigned destmask;
11002 int i;
11004 destmask = 0;
11006 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11008 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
11010 neon_modify_type_size (mods, &type, &size);
11011 destmask |= type_chk_of_el_type (type, size);
11015 return destmask;
11018 /* Check type and return type classification.
11019 The manual states (paraphrase): If one datatype is given, it indicates the
11020 type given in:
11021 - the second operand, if there is one
11022 - the operand, if there is no second operand
11023 - the result, if there are no operands.
11024 This isn't quite good enough though, so we use a concept of a "key" datatype
11025 which is set on a per-instruction basis, which is the one which matters when
11026 only one data type is written.
11027 Note: this function has side-effects (e.g. filling in missing operands). All
11028 Neon instructions should call it before performing bit encoding. */
11030 static struct neon_type_el
11031 neon_check_type (unsigned els, enum neon_shape ns, ...)
11033 va_list ap;
11034 unsigned i, pass, key_el = 0;
11035 unsigned types[NEON_MAX_TYPE_ELS];
11036 enum neon_el_type k_type = NT_invtype;
11037 unsigned k_size = -1u;
11038 struct neon_type_el badtype = {NT_invtype, -1};
11039 unsigned key_allowed = 0;
11041 /* Optional registers in Neon instructions are always (not) in operand 1.
11042 Fill in the missing operand here, if it was omitted. */
11043 if (els > 1 && !inst.operands[1].present)
11044 inst.operands[1] = inst.operands[0];
11046 /* Suck up all the varargs. */
11047 va_start (ap, ns);
11048 for (i = 0; i < els; i++)
11050 unsigned thisarg = va_arg (ap, unsigned);
11051 if (thisarg == N_IGNORE_TYPE)
11053 va_end (ap);
11054 return badtype;
11056 types[i] = thisarg;
11057 if ((thisarg & N_KEY) != 0)
11058 key_el = i;
11060 va_end (ap);
11062 if (inst.vectype.elems > 0)
11063 for (i = 0; i < els; i++)
11064 if (inst.operands[i].vectype.type != NT_invtype)
11066 first_error (_("types specified in both the mnemonic and operands"));
11067 return badtype;
11070 /* Duplicate inst.vectype elements here as necessary.
11071 FIXME: No idea if this is exactly the same as the ARM assembler,
11072 particularly when an insn takes one register and one non-register
11073 operand. */
11074 if (inst.vectype.elems == 1 && els > 1)
11076 unsigned j;
11077 inst.vectype.elems = els;
11078 inst.vectype.el[key_el] = inst.vectype.el[0];
11079 for (j = 0; j < els; j++)
11080 if (j != key_el)
11081 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11082 types[j]);
11084 else if (inst.vectype.elems == 0 && els > 0)
11086 unsigned j;
11087 /* No types were given after the mnemonic, so look for types specified
11088 after each operand. We allow some flexibility here; as long as the
11089 "key" operand has a type, we can infer the others. */
11090 for (j = 0; j < els; j++)
11091 if (inst.operands[j].vectype.type != NT_invtype)
11092 inst.vectype.el[j] = inst.operands[j].vectype;
11094 if (inst.operands[key_el].vectype.type != NT_invtype)
11096 for (j = 0; j < els; j++)
11097 if (inst.operands[j].vectype.type == NT_invtype)
11098 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11099 types[j]);
11101 else
11103 first_error (_("operand types can't be inferred"));
11104 return badtype;
11107 else if (inst.vectype.elems != els)
11109 first_error (_("type specifier has the wrong number of parts"));
11110 return badtype;
11113 for (pass = 0; pass < 2; pass++)
11115 for (i = 0; i < els; i++)
11117 unsigned thisarg = types[i];
11118 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
11119 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
11120 enum neon_el_type g_type = inst.vectype.el[i].type;
11121 unsigned g_size = inst.vectype.el[i].size;
11123 /* Decay more-specific signed & unsigned types to sign-insensitive
11124 integer types if sign-specific variants are unavailable. */
11125 if ((g_type == NT_signed || g_type == NT_unsigned)
11126 && (types_allowed & N_SU_ALL) == 0)
11127 g_type = NT_integer;
11129 /* If only untyped args are allowed, decay any more specific types to
11130 them. Some instructions only care about signs for some element
11131 sizes, so handle that properly. */
11132 if ((g_size == 8 && (types_allowed & N_8) != 0)
11133 || (g_size == 16 && (types_allowed & N_16) != 0)
11134 || (g_size == 32 && (types_allowed & N_32) != 0)
11135 || (g_size == 64 && (types_allowed & N_64) != 0))
11136 g_type = NT_untyped;
11138 if (pass == 0)
11140 if ((thisarg & N_KEY) != 0)
11142 k_type = g_type;
11143 k_size = g_size;
11144 key_allowed = thisarg & ~N_KEY;
11147 else
11149 if ((thisarg & N_VFP) != 0)
11151 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11152 unsigned regwidth = neon_shape_el_size[regshape], match;
11154 /* In VFP mode, operands must match register widths. If we
11155 have a key operand, use its width, else use the width of
11156 the current operand. */
11157 if (k_size != -1u)
11158 match = k_size;
11159 else
11160 match = g_size;
11162 if (regwidth != match)
11164 first_error (_("operand size must match register width"));
11165 return badtype;
11169 if ((thisarg & N_EQK) == 0)
11171 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11173 if ((given_type & types_allowed) == 0)
11175 first_error (_("bad type in Neon instruction"));
11176 return badtype;
11179 else
11181 enum neon_el_type mod_k_type = k_type;
11182 unsigned mod_k_size = k_size;
11183 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11184 if (g_type != mod_k_type || g_size != mod_k_size)
11186 first_error (_("inconsistent types in Neon instruction"));
11187 return badtype;
11194 return inst.vectype.el[key_el];
11197 /* Neon-style VFP instruction forwarding. */
11199 /* Thumb VFP instructions have 0xE in the condition field. */
11201 static void
11202 do_vfp_cond_or_thumb (void)
11204 if (thumb_mode)
11205 inst.instruction |= 0xe0000000;
11206 else
11207 inst.instruction |= inst.cond << 28;
11210 /* Look up and encode a simple mnemonic, for use as a helper function for the
11211 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11212 etc. It is assumed that operand parsing has already been done, and that the
11213 operands are in the form expected by the given opcode (this isn't necessarily
11214 the same as the form in which they were parsed, hence some massaging must
11215 take place before this function is called).
11216 Checks current arch version against that in the looked-up opcode. */
11218 static void
11219 do_vfp_nsyn_opcode (const char *opname)
11221 const struct asm_opcode *opcode;
11223 opcode = hash_find (arm_ops_hsh, opname);
11225 if (!opcode)
11226 abort ();
11228 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11229 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11230 _(BAD_FPU));
11232 if (thumb_mode)
11234 inst.instruction = opcode->tvalue;
11235 opcode->tencode ();
11237 else
11239 inst.instruction = (inst.cond << 28) | opcode->avalue;
11240 opcode->aencode ();
11244 static void
11245 do_vfp_nsyn_add_sub (enum neon_shape rs)
11247 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11249 if (rs == NS_FFF)
11251 if (is_add)
11252 do_vfp_nsyn_opcode ("fadds");
11253 else
11254 do_vfp_nsyn_opcode ("fsubs");
11256 else
11258 if (is_add)
11259 do_vfp_nsyn_opcode ("faddd");
11260 else
11261 do_vfp_nsyn_opcode ("fsubd");
11265 /* Check operand types to see if this is a VFP instruction, and if so call
11266 PFN (). */
11268 static int
11269 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11271 enum neon_shape rs;
11272 struct neon_type_el et;
11274 switch (args)
11276 case 2:
11277 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11278 et = neon_check_type (2, rs,
11279 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11280 break;
11282 case 3:
11283 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11284 et = neon_check_type (3, rs,
11285 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11286 break;
11288 default:
11289 abort ();
11292 if (et.type != NT_invtype)
11294 pfn (rs);
11295 return SUCCESS;
11297 else
11298 inst.error = NULL;
11300 return FAIL;
11303 static void
11304 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11306 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11308 if (rs == NS_FFF)
11310 if (is_mla)
11311 do_vfp_nsyn_opcode ("fmacs");
11312 else
11313 do_vfp_nsyn_opcode ("fmscs");
11315 else
11317 if (is_mla)
11318 do_vfp_nsyn_opcode ("fmacd");
11319 else
11320 do_vfp_nsyn_opcode ("fmscd");
11324 static void
11325 do_vfp_nsyn_mul (enum neon_shape rs)
11327 if (rs == NS_FFF)
11328 do_vfp_nsyn_opcode ("fmuls");
11329 else
11330 do_vfp_nsyn_opcode ("fmuld");
11333 static void
11334 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11336 int is_neg = (inst.instruction & 0x80) != 0;
11337 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11339 if (rs == NS_FF)
11341 if (is_neg)
11342 do_vfp_nsyn_opcode ("fnegs");
11343 else
11344 do_vfp_nsyn_opcode ("fabss");
11346 else
11348 if (is_neg)
11349 do_vfp_nsyn_opcode ("fnegd");
11350 else
11351 do_vfp_nsyn_opcode ("fabsd");
11355 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11356 insns belong to Neon, and are handled elsewhere. */
11358 static void
11359 do_vfp_nsyn_ldm_stm (int is_dbmode)
11361 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11362 if (is_ldm)
11364 if (is_dbmode)
11365 do_vfp_nsyn_opcode ("fldmdbs");
11366 else
11367 do_vfp_nsyn_opcode ("fldmias");
11369 else
11371 if (is_dbmode)
11372 do_vfp_nsyn_opcode ("fstmdbs");
11373 else
11374 do_vfp_nsyn_opcode ("fstmias");
11378 static void
11379 do_vfp_nsyn_sqrt (void)
11381 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11382 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11384 if (rs == NS_FF)
11385 do_vfp_nsyn_opcode ("fsqrts");
11386 else
11387 do_vfp_nsyn_opcode ("fsqrtd");
11390 static void
11391 do_vfp_nsyn_div (void)
11393 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11394 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11395 N_F32 | N_F64 | N_KEY | N_VFP);
11397 if (rs == NS_FFF)
11398 do_vfp_nsyn_opcode ("fdivs");
11399 else
11400 do_vfp_nsyn_opcode ("fdivd");
11403 static void
11404 do_vfp_nsyn_nmul (void)
11406 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11407 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11408 N_F32 | N_F64 | N_KEY | N_VFP);
11410 if (rs == NS_FFF)
11412 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11413 do_vfp_sp_dyadic ();
11415 else
11417 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11418 do_vfp_dp_rd_rn_rm ();
11420 do_vfp_cond_or_thumb ();
11423 static void
11424 do_vfp_nsyn_cmp (void)
11426 if (inst.operands[1].isreg)
11428 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11429 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11431 if (rs == NS_FF)
11433 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11434 do_vfp_sp_monadic ();
11436 else
11438 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11439 do_vfp_dp_rd_rm ();
11442 else
11444 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11445 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11447 switch (inst.instruction & 0x0fffffff)
11449 case N_MNEM_vcmp:
11450 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11451 break;
11452 case N_MNEM_vcmpe:
11453 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11454 break;
11455 default:
11456 abort ();
11459 if (rs == NS_FI)
11461 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11462 do_vfp_sp_compare_z ();
11464 else
11466 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11467 do_vfp_dp_rd ();
11470 do_vfp_cond_or_thumb ();
11473 static void
11474 nsyn_insert_sp (void)
11476 inst.operands[1] = inst.operands[0];
11477 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11478 inst.operands[0].reg = 13;
11479 inst.operands[0].isreg = 1;
11480 inst.operands[0].writeback = 1;
11481 inst.operands[0].present = 1;
11484 static void
11485 do_vfp_nsyn_push (void)
11487 nsyn_insert_sp ();
11488 if (inst.operands[1].issingle)
11489 do_vfp_nsyn_opcode ("fstmdbs");
11490 else
11491 do_vfp_nsyn_opcode ("fstmdbd");
11494 static void
11495 do_vfp_nsyn_pop (void)
11497 nsyn_insert_sp ();
11498 if (inst.operands[1].issingle)
11499 do_vfp_nsyn_opcode ("fldmias");
11500 else
11501 do_vfp_nsyn_opcode ("fldmiad");
11504 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11505 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11507 static unsigned
11508 neon_dp_fixup (unsigned i)
11510 if (thumb_mode)
11512 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11513 if (i & (1 << 24))
11514 i |= 1 << 28;
11516 i &= ~(1 << 24);
11518 i |= 0xef000000;
11520 else
11521 i |= 0xf2000000;
11523 return i;
11526 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11527 (0, 1, 2, 3). */
11529 static unsigned
11530 neon_logbits (unsigned x)
11532 return ffs (x) - 4;
11535 #define LOW4(R) ((R) & 0xf)
11536 #define HI1(R) (((R) >> 4) & 1)
11538 /* Encode insns with bit pattern:
11540 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11541 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11543 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11544 different meaning for some instruction. */
11546 static void
11547 neon_three_same (int isquad, int ubit, int size)
11549 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11550 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11551 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11552 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11553 inst.instruction |= LOW4 (inst.operands[2].reg);
11554 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11555 inst.instruction |= (isquad != 0) << 6;
11556 inst.instruction |= (ubit != 0) << 24;
11557 if (size != -1)
11558 inst.instruction |= neon_logbits (size) << 20;
11560 inst.instruction = neon_dp_fixup (inst.instruction);
11563 /* Encode instructions of the form:
11565 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11566 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11568 Don't write size if SIZE == -1. */
11570 static void
11571 neon_two_same (int qbit, int ubit, int size)
11573 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11574 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11575 inst.instruction |= LOW4 (inst.operands[1].reg);
11576 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11577 inst.instruction |= (qbit != 0) << 6;
11578 inst.instruction |= (ubit != 0) << 24;
11580 if (size != -1)
11581 inst.instruction |= neon_logbits (size) << 18;
11583 inst.instruction = neon_dp_fixup (inst.instruction);
11586 /* Neon instruction encoders, in approximate order of appearance. */
11588 static void
11589 do_neon_dyadic_i_su (void)
11591 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11592 struct neon_type_el et = neon_check_type (3, rs,
11593 N_EQK, N_EQK, N_SU_32 | N_KEY);
11594 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11597 static void
11598 do_neon_dyadic_i64_su (void)
11600 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11601 struct neon_type_el et = neon_check_type (3, rs,
11602 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11603 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11606 static void
11607 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11608 unsigned immbits)
11610 unsigned size = et.size >> 3;
11611 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11612 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11613 inst.instruction |= LOW4 (inst.operands[1].reg);
11614 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11615 inst.instruction |= (isquad != 0) << 6;
11616 inst.instruction |= immbits << 16;
11617 inst.instruction |= (size >> 3) << 7;
11618 inst.instruction |= (size & 0x7) << 19;
11619 if (write_ubit)
11620 inst.instruction |= (uval != 0) << 24;
11622 inst.instruction = neon_dp_fixup (inst.instruction);
11625 static void
11626 do_neon_shl_imm (void)
11628 if (!inst.operands[2].isreg)
11630 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11631 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11632 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11633 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11635 else
11637 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11638 struct neon_type_el et = neon_check_type (3, rs,
11639 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11640 unsigned int tmp;
11642 /* VSHL/VQSHL 3-register variants have syntax such as:
11643 vshl.xx Dd, Dm, Dn
11644 whereas other 3-register operations encoded by neon_three_same have
11645 syntax like:
11646 vadd.xx Dd, Dn, Dm
11647 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11648 here. */
11649 tmp = inst.operands[2].reg;
11650 inst.operands[2].reg = inst.operands[1].reg;
11651 inst.operands[1].reg = tmp;
11652 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11653 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11657 static void
11658 do_neon_qshl_imm (void)
11660 if (!inst.operands[2].isreg)
11662 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11663 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11665 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11666 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11667 inst.operands[2].imm);
11669 else
11671 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11672 struct neon_type_el et = neon_check_type (3, rs,
11673 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11674 unsigned int tmp;
11676 /* See note in do_neon_shl_imm. */
11677 tmp = inst.operands[2].reg;
11678 inst.operands[2].reg = inst.operands[1].reg;
11679 inst.operands[1].reg = tmp;
11680 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11681 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11685 static void
11686 do_neon_rshl (void)
11688 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11689 struct neon_type_el et = neon_check_type (3, rs,
11690 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11691 unsigned int tmp;
11693 tmp = inst.operands[2].reg;
11694 inst.operands[2].reg = inst.operands[1].reg;
11695 inst.operands[1].reg = tmp;
11696 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11699 static int
11700 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11702 /* Handle .I8 pseudo-instructions. */
11703 if (size == 8)
11705 /* Unfortunately, this will make everything apart from zero out-of-range.
11706 FIXME is this the intended semantics? There doesn't seem much point in
11707 accepting .I8 if so. */
11708 immediate |= immediate << 8;
11709 size = 16;
11712 if (size >= 32)
11714 if (immediate == (immediate & 0x000000ff))
11716 *immbits = immediate;
11717 return 0x1;
11719 else if (immediate == (immediate & 0x0000ff00))
11721 *immbits = immediate >> 8;
11722 return 0x3;
11724 else if (immediate == (immediate & 0x00ff0000))
11726 *immbits = immediate >> 16;
11727 return 0x5;
11729 else if (immediate == (immediate & 0xff000000))
11731 *immbits = immediate >> 24;
11732 return 0x7;
11734 if ((immediate & 0xffff) != (immediate >> 16))
11735 goto bad_immediate;
11736 immediate &= 0xffff;
11739 if (immediate == (immediate & 0x000000ff))
11741 *immbits = immediate;
11742 return 0x9;
11744 else if (immediate == (immediate & 0x0000ff00))
11746 *immbits = immediate >> 8;
11747 return 0xb;
11750 bad_immediate:
11751 first_error (_("immediate value out of range"));
11752 return FAIL;
11755 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11756 A, B, C, D. */
11758 static int
11759 neon_bits_same_in_bytes (unsigned imm)
11761 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11762 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11763 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11764 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11767 /* For immediate of above form, return 0bABCD. */
11769 static unsigned
11770 neon_squash_bits (unsigned imm)
11772 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11773 | ((imm & 0x01000000) >> 21);
11776 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11778 static unsigned
11779 neon_qfloat_bits (unsigned imm)
11781 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11784 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11785 the instruction. *OP is passed as the initial value of the op field, and
11786 may be set to a different value depending on the constant (i.e.
11787 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11788 MVN). If the immediate looks like a repeated pattern then also
11789 try smaller element sizes. */
11791 static int
11792 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
11793 unsigned *immbits, int *op, int size,
11794 enum neon_el_type type)
11796 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11797 float. */
11798 if (type == NT_float && !float_p)
11799 return FAIL;
11801 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11803 if (size != 32 || *op == 1)
11804 return FAIL;
11805 *immbits = neon_qfloat_bits (immlo);
11806 return 0xf;
11809 if (size == 64)
11811 if (neon_bits_same_in_bytes (immhi)
11812 && neon_bits_same_in_bytes (immlo))
11814 if (*op == 1)
11815 return FAIL;
11816 *immbits = (neon_squash_bits (immhi) << 4)
11817 | neon_squash_bits (immlo);
11818 *op = 1;
11819 return 0xe;
11822 if (immhi != immlo)
11823 return FAIL;
11826 if (size >= 32)
11828 if (immlo == (immlo & 0x000000ff))
11830 *immbits = immlo;
11831 return 0x0;
11833 else if (immlo == (immlo & 0x0000ff00))
11835 *immbits = immlo >> 8;
11836 return 0x2;
11838 else if (immlo == (immlo & 0x00ff0000))
11840 *immbits = immlo >> 16;
11841 return 0x4;
11843 else if (immlo == (immlo & 0xff000000))
11845 *immbits = immlo >> 24;
11846 return 0x6;
11848 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11850 *immbits = (immlo >> 8) & 0xff;
11851 return 0xc;
11853 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11855 *immbits = (immlo >> 16) & 0xff;
11856 return 0xd;
11859 if ((immlo & 0xffff) != (immlo >> 16))
11860 return FAIL;
11861 immlo &= 0xffff;
11864 if (size >= 16)
11866 if (immlo == (immlo & 0x000000ff))
11868 *immbits = immlo;
11869 return 0x8;
11871 else if (immlo == (immlo & 0x0000ff00))
11873 *immbits = immlo >> 8;
11874 return 0xa;
11877 if ((immlo & 0xff) != (immlo >> 8))
11878 return FAIL;
11879 immlo &= 0xff;
11882 if (immlo == (immlo & 0x000000ff))
11884 /* Don't allow MVN with 8-bit immediate. */
11885 if (*op == 1)
11886 return FAIL;
11887 *immbits = immlo;
11888 return 0xe;
11891 return FAIL;
11894 /* Write immediate bits [7:0] to the following locations:
11896 |28/24|23 19|18 16|15 4|3 0|
11897 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11899 This function is used by VMOV/VMVN/VORR/VBIC. */
11901 static void
11902 neon_write_immbits (unsigned immbits)
11904 inst.instruction |= immbits & 0xf;
11905 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11906 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11909 /* Invert low-order SIZE bits of XHI:XLO. */
11911 static void
11912 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11914 unsigned immlo = xlo ? *xlo : 0;
11915 unsigned immhi = xhi ? *xhi : 0;
11917 switch (size)
11919 case 8:
11920 immlo = (~immlo) & 0xff;
11921 break;
11923 case 16:
11924 immlo = (~immlo) & 0xffff;
11925 break;
11927 case 64:
11928 immhi = (~immhi) & 0xffffffff;
11929 /* fall through. */
11931 case 32:
11932 immlo = (~immlo) & 0xffffffff;
11933 break;
11935 default:
11936 abort ();
11939 if (xlo)
11940 *xlo = immlo;
11942 if (xhi)
11943 *xhi = immhi;
11946 static void
11947 do_neon_logic (void)
11949 if (inst.operands[2].present && inst.operands[2].isreg)
11951 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11952 neon_check_type (3, rs, N_IGNORE_TYPE);
11953 /* U bit and size field were set as part of the bitmask. */
11954 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11955 neon_three_same (neon_quad (rs), 0, -1);
11957 else
11959 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11960 struct neon_type_el et = neon_check_type (2, rs,
11961 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11962 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11963 unsigned immbits;
11964 int cmode;
11966 if (et.type == NT_invtype)
11967 return;
11969 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11971 immbits = inst.operands[1].imm;
11972 if (et.size == 64)
11974 /* .i64 is a pseudo-op, so the immediate must be a repeating
11975 pattern. */
11976 if (immbits != (inst.operands[1].regisimm ?
11977 inst.operands[1].reg : 0))
11979 /* Set immbits to an invalid constant. */
11980 immbits = 0xdeadbeef;
11984 switch (opcode)
11986 case N_MNEM_vbic:
11987 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11988 break;
11990 case N_MNEM_vorr:
11991 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11992 break;
11994 case N_MNEM_vand:
11995 /* Pseudo-instruction for VBIC. */
11996 neon_invert_size (&immbits, 0, et.size);
11997 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11998 break;
12000 case N_MNEM_vorn:
12001 /* Pseudo-instruction for VORR. */
12002 neon_invert_size (&immbits, 0, et.size);
12003 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12004 break;
12006 default:
12007 abort ();
12010 if (cmode == FAIL)
12011 return;
12013 inst.instruction |= neon_quad (rs) << 6;
12014 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12015 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12016 inst.instruction |= cmode << 8;
12017 neon_write_immbits (immbits);
12019 inst.instruction = neon_dp_fixup (inst.instruction);
12023 static void
12024 do_neon_bitfield (void)
12026 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12027 neon_check_type (3, rs, N_IGNORE_TYPE);
12028 neon_three_same (neon_quad (rs), 0, -1);
12031 static void
12032 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12033 unsigned destbits)
12035 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12036 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12037 types | N_KEY);
12038 if (et.type == NT_float)
12040 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
12041 neon_three_same (neon_quad (rs), 0, -1);
12043 else
12045 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12046 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
12050 static void
12051 do_neon_dyadic_if_su (void)
12053 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12056 static void
12057 do_neon_dyadic_if_su_d (void)
12059 /* This version only allow D registers, but that constraint is enforced during
12060 operand parsing so we don't need to do anything extra here. */
12061 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12064 static void
12065 do_neon_dyadic_if_i_d (void)
12067 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12068 affected if we specify unsigned args. */
12069 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12072 enum vfp_or_neon_is_neon_bits
12074 NEON_CHECK_CC = 1,
12075 NEON_CHECK_ARCH = 2
12078 /* Call this function if an instruction which may have belonged to the VFP or
12079 Neon instruction sets, but turned out to be a Neon instruction (due to the
12080 operand types involved, etc.). We have to check and/or fix-up a couple of
12081 things:
12083 - Make sure the user hasn't attempted to make a Neon instruction
12084 conditional.
12085 - Alter the value in the condition code field if necessary.
12086 - Make sure that the arch supports Neon instructions.
12088 Which of these operations take place depends on bits from enum
12089 vfp_or_neon_is_neon_bits.
12091 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12092 current instruction's condition is COND_ALWAYS, the condition field is
12093 changed to inst.uncond_value. This is necessary because instructions shared
12094 between VFP and Neon may be conditional for the VFP variants only, and the
12095 unconditional Neon version must have, e.g., 0xF in the condition field. */
12097 static int
12098 vfp_or_neon_is_neon (unsigned check)
12100 /* Conditions are always legal in Thumb mode (IT blocks). */
12101 if (!thumb_mode && (check & NEON_CHECK_CC))
12103 if (inst.cond != COND_ALWAYS)
12105 first_error (_(BAD_COND));
12106 return FAIL;
12108 if (inst.uncond_value != -1)
12109 inst.instruction |= inst.uncond_value << 28;
12112 if ((check & NEON_CHECK_ARCH)
12113 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
12115 first_error (_(BAD_FPU));
12116 return FAIL;
12119 return SUCCESS;
12122 static void
12123 do_neon_addsub_if_i (void)
12125 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
12126 return;
12128 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12129 return;
12131 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12132 affected if we specify unsigned args. */
12133 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12136 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12137 result to be:
12138 V<op> A,B (A is operand 0, B is operand 2)
12139 to mean:
12140 V<op> A,B,A
12141 not:
12142 V<op> A,B,B
12143 so handle that case specially. */
12145 static void
12146 neon_exchange_operands (void)
12148 void *scratch = alloca (sizeof (inst.operands[0]));
12149 if (inst.operands[1].present)
12151 /* Swap operands[1] and operands[2]. */
12152 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12153 inst.operands[1] = inst.operands[2];
12154 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12156 else
12158 inst.operands[1] = inst.operands[2];
12159 inst.operands[2] = inst.operands[0];
12163 static void
12164 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12166 if (inst.operands[2].isreg)
12168 if (invert)
12169 neon_exchange_operands ();
12170 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12172 else
12174 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12175 struct neon_type_el et = neon_check_type (2, rs,
12176 N_EQK | N_SIZ, immtypes | N_KEY);
12178 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12179 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12180 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12181 inst.instruction |= LOW4 (inst.operands[1].reg);
12182 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12183 inst.instruction |= neon_quad (rs) << 6;
12184 inst.instruction |= (et.type == NT_float) << 10;
12185 inst.instruction |= neon_logbits (et.size) << 18;
12187 inst.instruction = neon_dp_fixup (inst.instruction);
12191 static void
12192 do_neon_cmp (void)
12194 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12197 static void
12198 do_neon_cmp_inv (void)
12200 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12203 static void
12204 do_neon_ceq (void)
12206 neon_compare (N_IF_32, N_IF_32, FALSE);
12209 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12210 scalars, which are encoded in 5 bits, M : Rm.
12211 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12212 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12213 index in M. */
12215 static unsigned
12216 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12218 unsigned regno = NEON_SCALAR_REG (scalar);
12219 unsigned elno = NEON_SCALAR_INDEX (scalar);
12221 switch (elsize)
12223 case 16:
12224 if (regno > 7 || elno > 3)
12225 goto bad_scalar;
12226 return regno | (elno << 3);
12228 case 32:
12229 if (regno > 15 || elno > 1)
12230 goto bad_scalar;
12231 return regno | (elno << 4);
12233 default:
12234 bad_scalar:
12235 first_error (_("scalar out of range for multiply instruction"));
12238 return 0;
12241 /* Encode multiply / multiply-accumulate scalar instructions. */
12243 static void
12244 neon_mul_mac (struct neon_type_el et, int ubit)
12246 unsigned scalar;
12248 /* Give a more helpful error message if we have an invalid type. */
12249 if (et.type == NT_invtype)
12250 return;
12252 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12253 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12254 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12255 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12256 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12257 inst.instruction |= LOW4 (scalar);
12258 inst.instruction |= HI1 (scalar) << 5;
12259 inst.instruction |= (et.type == NT_float) << 8;
12260 inst.instruction |= neon_logbits (et.size) << 20;
12261 inst.instruction |= (ubit != 0) << 24;
12263 inst.instruction = neon_dp_fixup (inst.instruction);
12266 static void
12267 do_neon_mac_maybe_scalar (void)
12269 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12270 return;
12272 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12273 return;
12275 if (inst.operands[2].isscalar)
12277 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12278 struct neon_type_el et = neon_check_type (3, rs,
12279 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12280 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12281 neon_mul_mac (et, neon_quad (rs));
12283 else
12285 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12286 affected if we specify unsigned args. */
12287 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12291 static void
12292 do_neon_tst (void)
12294 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12295 struct neon_type_el et = neon_check_type (3, rs,
12296 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12297 neon_three_same (neon_quad (rs), 0, et.size);
12300 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12301 same types as the MAC equivalents. The polynomial type for this instruction
12302 is encoded the same as the integer type. */
12304 static void
12305 do_neon_mul (void)
12307 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12308 return;
12310 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12311 return;
12313 if (inst.operands[2].isscalar)
12314 do_neon_mac_maybe_scalar ();
12315 else
12316 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12319 static void
12320 do_neon_qdmulh (void)
12322 if (inst.operands[2].isscalar)
12324 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12325 struct neon_type_el et = neon_check_type (3, rs,
12326 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12327 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12328 neon_mul_mac (et, neon_quad (rs));
12330 else
12332 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12333 struct neon_type_el et = neon_check_type (3, rs,
12334 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12335 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12336 /* The U bit (rounding) comes from bit mask. */
12337 neon_three_same (neon_quad (rs), 0, et.size);
12341 static void
12342 do_neon_fcmp_absolute (void)
12344 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12345 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12346 /* Size field comes from bit mask. */
12347 neon_three_same (neon_quad (rs), 1, -1);
12350 static void
12351 do_neon_fcmp_absolute_inv (void)
12353 neon_exchange_operands ();
12354 do_neon_fcmp_absolute ();
12357 static void
12358 do_neon_step (void)
12360 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12361 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12362 neon_three_same (neon_quad (rs), 0, -1);
12365 static void
12366 do_neon_abs_neg (void)
12368 enum neon_shape rs;
12369 struct neon_type_el et;
12371 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12372 return;
12374 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12375 return;
12377 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12378 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12380 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12381 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12382 inst.instruction |= LOW4 (inst.operands[1].reg);
12383 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12384 inst.instruction |= neon_quad (rs) << 6;
12385 inst.instruction |= (et.type == NT_float) << 10;
12386 inst.instruction |= neon_logbits (et.size) << 18;
12388 inst.instruction = neon_dp_fixup (inst.instruction);
12391 static void
12392 do_neon_sli (void)
12394 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12395 struct neon_type_el et = neon_check_type (2, rs,
12396 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12397 int imm = inst.operands[2].imm;
12398 constraint (imm < 0 || (unsigned)imm >= et.size,
12399 _("immediate out of range for insert"));
12400 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12403 static void
12404 do_neon_sri (void)
12406 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12407 struct neon_type_el et = neon_check_type (2, rs,
12408 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12409 int imm = inst.operands[2].imm;
12410 constraint (imm < 1 || (unsigned)imm > et.size,
12411 _("immediate out of range for insert"));
12412 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12415 static void
12416 do_neon_qshlu_imm (void)
12418 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12419 struct neon_type_el et = neon_check_type (2, rs,
12420 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12421 int imm = inst.operands[2].imm;
12422 constraint (imm < 0 || (unsigned)imm >= et.size,
12423 _("immediate out of range for shift"));
12424 /* Only encodes the 'U present' variant of the instruction.
12425 In this case, signed types have OP (bit 8) set to 0.
12426 Unsigned types have OP set to 1. */
12427 inst.instruction |= (et.type == NT_unsigned) << 8;
12428 /* The rest of the bits are the same as other immediate shifts. */
12429 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12432 static void
12433 do_neon_qmovn (void)
12435 struct neon_type_el et = neon_check_type (2, NS_DQ,
12436 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12437 /* Saturating move where operands can be signed or unsigned, and the
12438 destination has the same signedness. */
12439 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12440 if (et.type == NT_unsigned)
12441 inst.instruction |= 0xc0;
12442 else
12443 inst.instruction |= 0x80;
12444 neon_two_same (0, 1, et.size / 2);
12447 static void
12448 do_neon_qmovun (void)
12450 struct neon_type_el et = neon_check_type (2, NS_DQ,
12451 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12452 /* Saturating move with unsigned results. Operands must be signed. */
12453 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12454 neon_two_same (0, 1, et.size / 2);
12457 static void
12458 do_neon_rshift_sat_narrow (void)
12460 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12461 or unsigned. If operands are unsigned, results must also be unsigned. */
12462 struct neon_type_el et = neon_check_type (2, NS_DQI,
12463 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12464 int imm = inst.operands[2].imm;
12465 /* This gets the bounds check, size encoding and immediate bits calculation
12466 right. */
12467 et.size /= 2;
12469 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12470 VQMOVN.I<size> <Dd>, <Qm>. */
12471 if (imm == 0)
12473 inst.operands[2].present = 0;
12474 inst.instruction = N_MNEM_vqmovn;
12475 do_neon_qmovn ();
12476 return;
12479 constraint (imm < 1 || (unsigned)imm > et.size,
12480 _("immediate out of range"));
12481 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12484 static void
12485 do_neon_rshift_sat_narrow_u (void)
12487 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12488 or unsigned. If operands are unsigned, results must also be unsigned. */
12489 struct neon_type_el et = neon_check_type (2, NS_DQI,
12490 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12491 int imm = inst.operands[2].imm;
12492 /* This gets the bounds check, size encoding and immediate bits calculation
12493 right. */
12494 et.size /= 2;
12496 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12497 VQMOVUN.I<size> <Dd>, <Qm>. */
12498 if (imm == 0)
12500 inst.operands[2].present = 0;
12501 inst.instruction = N_MNEM_vqmovun;
12502 do_neon_qmovun ();
12503 return;
12506 constraint (imm < 1 || (unsigned)imm > et.size,
12507 _("immediate out of range"));
12508 /* FIXME: The manual is kind of unclear about what value U should have in
12509 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12510 must be 1. */
12511 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12514 static void
12515 do_neon_movn (void)
12517 struct neon_type_el et = neon_check_type (2, NS_DQ,
12518 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12519 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12520 neon_two_same (0, 1, et.size / 2);
12523 static void
12524 do_neon_rshift_narrow (void)
12526 struct neon_type_el et = neon_check_type (2, NS_DQI,
12527 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12528 int imm = inst.operands[2].imm;
12529 /* This gets the bounds check, size encoding and immediate bits calculation
12530 right. */
12531 et.size /= 2;
12533 /* If immediate is zero then we are a pseudo-instruction for
12534 VMOVN.I<size> <Dd>, <Qm> */
12535 if (imm == 0)
12537 inst.operands[2].present = 0;
12538 inst.instruction = N_MNEM_vmovn;
12539 do_neon_movn ();
12540 return;
12543 constraint (imm < 1 || (unsigned)imm > et.size,
12544 _("immediate out of range for narrowing operation"));
12545 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12548 static void
12549 do_neon_shll (void)
12551 /* FIXME: Type checking when lengthening. */
12552 struct neon_type_el et = neon_check_type (2, NS_QDI,
12553 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12554 unsigned imm = inst.operands[2].imm;
12556 if (imm == et.size)
12558 /* Maximum shift variant. */
12559 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12560 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12561 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12562 inst.instruction |= LOW4 (inst.operands[1].reg);
12563 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12564 inst.instruction |= neon_logbits (et.size) << 18;
12566 inst.instruction = neon_dp_fixup (inst.instruction);
12568 else
12570 /* A more-specific type check for non-max versions. */
12571 et = neon_check_type (2, NS_QDI,
12572 N_EQK | N_DBL, N_SU_32 | N_KEY);
12573 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12574 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12578 /* Check the various types for the VCVT instruction, and return which version
12579 the current instruction is. */
12581 static int
12582 neon_cvt_flavour (enum neon_shape rs)
12584 #define CVT_VAR(C,X,Y) \
12585 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12586 if (et.type != NT_invtype) \
12588 inst.error = NULL; \
12589 return (C); \
12591 struct neon_type_el et;
12592 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12593 || rs == NS_FF) ? N_VFP : 0;
12594 /* The instruction versions which take an immediate take one register
12595 argument, which is extended to the width of the full register. Thus the
12596 "source" and "destination" registers must have the same width. Hack that
12597 here by making the size equal to the key (wider, in this case) operand. */
12598 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12600 CVT_VAR (0, N_S32, N_F32);
12601 CVT_VAR (1, N_U32, N_F32);
12602 CVT_VAR (2, N_F32, N_S32);
12603 CVT_VAR (3, N_F32, N_U32);
12604 /* Half-precision conversions. */
12605 CVT_VAR (4, N_F32, N_F16);
12606 CVT_VAR (5, N_F16, N_F32);
12608 whole_reg = N_VFP;
12610 /* VFP instructions. */
12611 CVT_VAR (6, N_F32, N_F64);
12612 CVT_VAR (7, N_F64, N_F32);
12613 CVT_VAR (8, N_S32, N_F64 | key);
12614 CVT_VAR (9, N_U32, N_F64 | key);
12615 CVT_VAR (10, N_F64 | key, N_S32);
12616 CVT_VAR (11, N_F64 | key, N_U32);
12617 /* VFP instructions with bitshift. */
12618 CVT_VAR (12, N_F32 | key, N_S16);
12619 CVT_VAR (13, N_F32 | key, N_U16);
12620 CVT_VAR (14, N_F64 | key, N_S16);
12621 CVT_VAR (15, N_F64 | key, N_U16);
12622 CVT_VAR (16, N_S16, N_F32 | key);
12623 CVT_VAR (17, N_U16, N_F32 | key);
12624 CVT_VAR (18, N_S16, N_F64 | key);
12625 CVT_VAR (19, N_U16, N_F64 | key);
12627 return -1;
12628 #undef CVT_VAR
12631 /* Neon-syntax VFP conversions. */
12633 static void
12634 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12636 const char *opname = 0;
12638 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12640 /* Conversions with immediate bitshift. */
12641 const char *enc[] =
12643 "ftosls",
12644 "ftouls",
12645 "fsltos",
12646 "fultos",
12647 NULL,
12648 NULL,
12649 NULL,
12650 NULL,
12651 "ftosld",
12652 "ftould",
12653 "fsltod",
12654 "fultod",
12655 "fshtos",
12656 "fuhtos",
12657 "fshtod",
12658 "fuhtod",
12659 "ftoshs",
12660 "ftouhs",
12661 "ftoshd",
12662 "ftouhd"
12665 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12667 opname = enc[flavour];
12668 constraint (inst.operands[0].reg != inst.operands[1].reg,
12669 _("operands 0 and 1 must be the same register"));
12670 inst.operands[1] = inst.operands[2];
12671 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12674 else
12676 /* Conversions without bitshift. */
12677 const char *enc[] =
12679 "ftosis",
12680 "ftouis",
12681 "fsitos",
12682 "fuitos",
12683 "NULL",
12684 "NULL",
12685 "fcvtsd",
12686 "fcvtds",
12687 "ftosid",
12688 "ftouid",
12689 "fsitod",
12690 "fuitod"
12693 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12694 opname = enc[flavour];
12697 if (opname)
12698 do_vfp_nsyn_opcode (opname);
12701 static void
12702 do_vfp_nsyn_cvtz (void)
12704 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12705 int flavour = neon_cvt_flavour (rs);
12706 const char *enc[] =
12708 "ftosizs",
12709 "ftouizs",
12710 NULL,
12711 NULL,
12712 NULL,
12713 NULL,
12714 NULL,
12715 NULL,
12716 "ftosizd",
12717 "ftouizd"
12720 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12721 do_vfp_nsyn_opcode (enc[flavour]);
12723 static void
12724 do_neon_cvt (void)
12726 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12727 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
12728 int flavour = neon_cvt_flavour (rs);
12730 /* VFP rather than Neon conversions. */
12731 if (flavour >= 6)
12733 do_vfp_nsyn_cvt (rs, flavour);
12734 return;
12737 switch (rs)
12739 case NS_DDI:
12740 case NS_QQI:
12742 unsigned immbits;
12743 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12745 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12746 return;
12748 /* Fixed-point conversion with #0 immediate is encoded as an
12749 integer conversion. */
12750 if (inst.operands[2].present && inst.operands[2].imm == 0)
12751 goto int_encode;
12752 immbits = 32 - inst.operands[2].imm;
12753 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12754 if (flavour != -1)
12755 inst.instruction |= enctab[flavour];
12756 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12757 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12758 inst.instruction |= LOW4 (inst.operands[1].reg);
12759 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12760 inst.instruction |= neon_quad (rs) << 6;
12761 inst.instruction |= 1 << 21;
12762 inst.instruction |= immbits << 16;
12764 inst.instruction = neon_dp_fixup (inst.instruction);
12766 break;
12768 case NS_DD:
12769 case NS_QQ:
12770 int_encode:
12772 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12774 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12776 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12777 return;
12779 if (flavour != -1)
12780 inst.instruction |= enctab[flavour];
12782 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12783 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12784 inst.instruction |= LOW4 (inst.operands[1].reg);
12785 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12786 inst.instruction |= neon_quad (rs) << 6;
12787 inst.instruction |= 2 << 18;
12789 inst.instruction = neon_dp_fixup (inst.instruction);
12791 break;
12793 /* Half-precision conversions for Advanced SIMD -- neon. */
12794 case NS_QD:
12795 case NS_DQ:
12797 if ((rs == NS_DQ)
12798 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
12800 as_bad (_("operand size must match register width"));
12801 break;
12804 if ((rs == NS_QD)
12805 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
12807 as_bad (_("operand size must match register width"));
12808 break;
12811 if (rs == NS_DQ)
12812 inst.instruction = 0x3b60600;
12813 else
12814 inst.instruction = 0x3b60700;
12816 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12817 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12818 inst.instruction |= LOW4 (inst.operands[1].reg);
12819 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12820 inst.instruction = neon_dp_fixup (inst.instruction);
12821 break;
12823 default:
12824 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12825 do_vfp_nsyn_cvt (rs, flavour);
12829 static void
12830 do_neon_cvtb (void)
12832 inst.instruction = 0xeb20a40;
12834 /* The sizes are attached to the mnemonic. */
12835 if (inst.vectype.el[0].type != NT_invtype
12836 && inst.vectype.el[0].size == 16)
12837 inst.instruction |= 0x00010000;
12839 /* Programmer's syntax: the sizes are attached to the operands. */
12840 else if (inst.operands[0].vectype.type != NT_invtype
12841 && inst.operands[0].vectype.size == 16)
12842 inst.instruction |= 0x00010000;
12844 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
12845 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
12846 do_vfp_cond_or_thumb ();
12850 static void
12851 do_neon_cvtt (void)
12853 do_neon_cvtb ();
12854 inst.instruction |= 0x80;
12857 static void
12858 neon_move_immediate (void)
12860 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12861 struct neon_type_el et = neon_check_type (2, rs,
12862 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12863 unsigned immlo, immhi = 0, immbits;
12864 int op, cmode, float_p;
12866 constraint (et.type == NT_invtype,
12867 _("operand size must be specified for immediate VMOV"));
12869 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12870 op = (inst.instruction & (1 << 5)) != 0;
12872 immlo = inst.operands[1].imm;
12873 if (inst.operands[1].regisimm)
12874 immhi = inst.operands[1].reg;
12876 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12877 _("immediate has bits set outside the operand size"));
12879 float_p = inst.operands[1].immisfloat;
12881 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
12882 et.size, et.type)) == FAIL)
12884 /* Invert relevant bits only. */
12885 neon_invert_size (&immlo, &immhi, et.size);
12886 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12887 with one or the other; those cases are caught by
12888 neon_cmode_for_move_imm. */
12889 op = !op;
12890 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
12891 &op, et.size, et.type)) == FAIL)
12893 first_error (_("immediate out of range"));
12894 return;
12898 inst.instruction &= ~(1 << 5);
12899 inst.instruction |= op << 5;
12901 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12902 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12903 inst.instruction |= neon_quad (rs) << 6;
12904 inst.instruction |= cmode << 8;
12906 neon_write_immbits (immbits);
12909 static void
12910 do_neon_mvn (void)
12912 if (inst.operands[1].isreg)
12914 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12916 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12917 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12918 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12919 inst.instruction |= LOW4 (inst.operands[1].reg);
12920 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12921 inst.instruction |= neon_quad (rs) << 6;
12923 else
12925 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12926 neon_move_immediate ();
12929 inst.instruction = neon_dp_fixup (inst.instruction);
12932 /* Encode instructions of form:
12934 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12935 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
12937 static void
12938 neon_mixed_length (struct neon_type_el et, unsigned size)
12940 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12941 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12942 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12943 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12944 inst.instruction |= LOW4 (inst.operands[2].reg);
12945 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12946 inst.instruction |= (et.type == NT_unsigned) << 24;
12947 inst.instruction |= neon_logbits (size) << 20;
12949 inst.instruction = neon_dp_fixup (inst.instruction);
12952 static void
12953 do_neon_dyadic_long (void)
12955 /* FIXME: Type checking for lengthening op. */
12956 struct neon_type_el et = neon_check_type (3, NS_QDD,
12957 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12958 neon_mixed_length (et, et.size);
12961 static void
12962 do_neon_abal (void)
12964 struct neon_type_el et = neon_check_type (3, NS_QDD,
12965 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12966 neon_mixed_length (et, et.size);
12969 static void
12970 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12972 if (inst.operands[2].isscalar)
12974 struct neon_type_el et = neon_check_type (3, NS_QDS,
12975 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12976 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12977 neon_mul_mac (et, et.type == NT_unsigned);
12979 else
12981 struct neon_type_el et = neon_check_type (3, NS_QDD,
12982 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12983 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12984 neon_mixed_length (et, et.size);
12988 static void
12989 do_neon_mac_maybe_scalar_long (void)
12991 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12994 static void
12995 do_neon_dyadic_wide (void)
12997 struct neon_type_el et = neon_check_type (3, NS_QQD,
12998 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12999 neon_mixed_length (et, et.size);
13002 static void
13003 do_neon_dyadic_narrow (void)
13005 struct neon_type_el et = neon_check_type (3, NS_QDD,
13006 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13007 /* Operand sign is unimportant, and the U bit is part of the opcode,
13008 so force the operand type to integer. */
13009 et.type = NT_integer;
13010 neon_mixed_length (et, et.size / 2);
13013 static void
13014 do_neon_mul_sat_scalar_long (void)
13016 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13019 static void
13020 do_neon_vmull (void)
13022 if (inst.operands[2].isscalar)
13023 do_neon_mac_maybe_scalar_long ();
13024 else
13026 struct neon_type_el et = neon_check_type (3, NS_QDD,
13027 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13028 if (et.type == NT_poly)
13029 inst.instruction = NEON_ENC_POLY (inst.instruction);
13030 else
13031 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13032 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13033 zero. Should be OK as-is. */
13034 neon_mixed_length (et, et.size);
13038 static void
13039 do_neon_ext (void)
13041 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
13042 struct neon_type_el et = neon_check_type (3, rs,
13043 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13044 unsigned imm = (inst.operands[3].imm * et.size) / 8;
13046 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
13047 _("shift out of range"));
13048 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13049 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13050 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13051 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13052 inst.instruction |= LOW4 (inst.operands[2].reg);
13053 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13054 inst.instruction |= neon_quad (rs) << 6;
13055 inst.instruction |= imm << 8;
13057 inst.instruction = neon_dp_fixup (inst.instruction);
13060 static void
13061 do_neon_rev (void)
13063 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13064 struct neon_type_el et = neon_check_type (2, rs,
13065 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13066 unsigned op = (inst.instruction >> 7) & 3;
13067 /* N (width of reversed regions) is encoded as part of the bitmask. We
13068 extract it here to check the elements to be reversed are smaller.
13069 Otherwise we'd get a reserved instruction. */
13070 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
13071 assert (elsize != 0);
13072 constraint (et.size >= elsize,
13073 _("elements must be smaller than reversal region"));
13074 neon_two_same (neon_quad (rs), 1, et.size);
13077 static void
13078 do_neon_dup (void)
13080 if (inst.operands[1].isscalar)
13082 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
13083 struct neon_type_el et = neon_check_type (2, rs,
13084 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13085 unsigned sizebits = et.size >> 3;
13086 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
13087 int logsize = neon_logbits (et.size);
13088 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
13090 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
13091 return;
13093 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13094 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13095 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13096 inst.instruction |= LOW4 (dm);
13097 inst.instruction |= HI1 (dm) << 5;
13098 inst.instruction |= neon_quad (rs) << 6;
13099 inst.instruction |= x << 17;
13100 inst.instruction |= sizebits << 16;
13102 inst.instruction = neon_dp_fixup (inst.instruction);
13104 else
13106 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
13107 struct neon_type_el et = neon_check_type (2, rs,
13108 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13109 /* Duplicate ARM register to lanes of vector. */
13110 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
13111 switch (et.size)
13113 case 8: inst.instruction |= 0x400000; break;
13114 case 16: inst.instruction |= 0x000020; break;
13115 case 32: inst.instruction |= 0x000000; break;
13116 default: break;
13118 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13119 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
13120 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
13121 inst.instruction |= neon_quad (rs) << 21;
13122 /* The encoding for this instruction is identical for the ARM and Thumb
13123 variants, except for the condition field. */
13124 do_vfp_cond_or_thumb ();
13128 /* VMOV has particularly many variations. It can be one of:
13129 0. VMOV<c><q> <Qd>, <Qm>
13130 1. VMOV<c><q> <Dd>, <Dm>
13131 (Register operations, which are VORR with Rm = Rn.)
13132 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13133 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13134 (Immediate loads.)
13135 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13136 (ARM register to scalar.)
13137 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13138 (Two ARM registers to vector.)
13139 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13140 (Scalar to ARM register.)
13141 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13142 (Vector to two ARM registers.)
13143 8. VMOV.F32 <Sd>, <Sm>
13144 9. VMOV.F64 <Dd>, <Dm>
13145 (VFP register moves.)
13146 10. VMOV.F32 <Sd>, #imm
13147 11. VMOV.F64 <Dd>, #imm
13148 (VFP float immediate load.)
13149 12. VMOV <Rd>, <Sm>
13150 (VFP single to ARM reg.)
13151 13. VMOV <Sd>, <Rm>
13152 (ARM reg to VFP single.)
13153 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13154 (Two ARM regs to two VFP singles.)
13155 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13156 (Two VFP singles to two ARM regs.)
13158 These cases can be disambiguated using neon_select_shape, except cases 1/9
13159 and 3/11 which depend on the operand type too.
13161 All the encoded bits are hardcoded by this function.
13163 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13164 Cases 5, 7 may be used with VFPv2 and above.
13166 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13167 can specify a type where it doesn't make sense to, and is ignored). */
13169 static void
13170 do_neon_mov (void)
13172 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
13173 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
13174 NS_NULL);
13175 struct neon_type_el et;
13176 const char *ldconst = 0;
13178 switch (rs)
13180 case NS_DD: /* case 1/9. */
13181 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13182 /* It is not an error here if no type is given. */
13183 inst.error = NULL;
13184 if (et.type == NT_float && et.size == 64)
13186 do_vfp_nsyn_opcode ("fcpyd");
13187 break;
13189 /* fall through. */
13191 case NS_QQ: /* case 0/1. */
13193 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13194 return;
13195 /* The architecture manual I have doesn't explicitly state which
13196 value the U bit should have for register->register moves, but
13197 the equivalent VORR instruction has U = 0, so do that. */
13198 inst.instruction = 0x0200110;
13199 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13200 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13201 inst.instruction |= LOW4 (inst.operands[1].reg);
13202 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13203 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13204 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13205 inst.instruction |= neon_quad (rs) << 6;
13207 inst.instruction = neon_dp_fixup (inst.instruction);
13209 break;
13211 case NS_DI: /* case 3/11. */
13212 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13213 inst.error = NULL;
13214 if (et.type == NT_float && et.size == 64)
13216 /* case 11 (fconstd). */
13217 ldconst = "fconstd";
13218 goto encode_fconstd;
13220 /* fall through. */
13222 case NS_QI: /* case 2/3. */
13223 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13224 return;
13225 inst.instruction = 0x0800010;
13226 neon_move_immediate ();
13227 inst.instruction = neon_dp_fixup (inst.instruction);
13228 break;
13230 case NS_SR: /* case 4. */
13232 unsigned bcdebits = 0;
13233 struct neon_type_el et = neon_check_type (2, NS_NULL,
13234 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13235 int logsize = neon_logbits (et.size);
13236 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13237 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13239 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13240 _(BAD_FPU));
13241 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13242 && et.size != 32, _(BAD_FPU));
13243 constraint (et.type == NT_invtype, _("bad type for scalar"));
13244 constraint (x >= 64 / et.size, _("scalar index out of range"));
13246 switch (et.size)
13248 case 8: bcdebits = 0x8; break;
13249 case 16: bcdebits = 0x1; break;
13250 case 32: bcdebits = 0x0; break;
13251 default: ;
13254 bcdebits |= x << logsize;
13256 inst.instruction = 0xe000b10;
13257 do_vfp_cond_or_thumb ();
13258 inst.instruction |= LOW4 (dn) << 16;
13259 inst.instruction |= HI1 (dn) << 7;
13260 inst.instruction |= inst.operands[1].reg << 12;
13261 inst.instruction |= (bcdebits & 3) << 5;
13262 inst.instruction |= (bcdebits >> 2) << 21;
13264 break;
13266 case NS_DRR: /* case 5 (fmdrr). */
13267 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13268 _(BAD_FPU));
13270 inst.instruction = 0xc400b10;
13271 do_vfp_cond_or_thumb ();
13272 inst.instruction |= LOW4 (inst.operands[0].reg);
13273 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13274 inst.instruction |= inst.operands[1].reg << 12;
13275 inst.instruction |= inst.operands[2].reg << 16;
13276 break;
13278 case NS_RS: /* case 6. */
13280 struct neon_type_el et = neon_check_type (2, NS_NULL,
13281 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13282 unsigned logsize = neon_logbits (et.size);
13283 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13284 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13285 unsigned abcdebits = 0;
13287 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13288 _(BAD_FPU));
13289 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13290 && et.size != 32, _(BAD_FPU));
13291 constraint (et.type == NT_invtype, _("bad type for scalar"));
13292 constraint (x >= 64 / et.size, _("scalar index out of range"));
13294 switch (et.size)
13296 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13297 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13298 case 32: abcdebits = 0x00; break;
13299 default: ;
13302 abcdebits |= x << logsize;
13303 inst.instruction = 0xe100b10;
13304 do_vfp_cond_or_thumb ();
13305 inst.instruction |= LOW4 (dn) << 16;
13306 inst.instruction |= HI1 (dn) << 7;
13307 inst.instruction |= inst.operands[0].reg << 12;
13308 inst.instruction |= (abcdebits & 3) << 5;
13309 inst.instruction |= (abcdebits >> 2) << 21;
13311 break;
13313 case NS_RRD: /* case 7 (fmrrd). */
13314 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13315 _(BAD_FPU));
13317 inst.instruction = 0xc500b10;
13318 do_vfp_cond_or_thumb ();
13319 inst.instruction |= inst.operands[0].reg << 12;
13320 inst.instruction |= inst.operands[1].reg << 16;
13321 inst.instruction |= LOW4 (inst.operands[2].reg);
13322 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13323 break;
13325 case NS_FF: /* case 8 (fcpys). */
13326 do_vfp_nsyn_opcode ("fcpys");
13327 break;
13329 case NS_FI: /* case 10 (fconsts). */
13330 ldconst = "fconsts";
13331 encode_fconstd:
13332 if (is_quarter_float (inst.operands[1].imm))
13334 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13335 do_vfp_nsyn_opcode (ldconst);
13337 else
13338 first_error (_("immediate out of range"));
13339 break;
13341 case NS_RF: /* case 12 (fmrs). */
13342 do_vfp_nsyn_opcode ("fmrs");
13343 break;
13345 case NS_FR: /* case 13 (fmsr). */
13346 do_vfp_nsyn_opcode ("fmsr");
13347 break;
13349 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13350 (one of which is a list), but we have parsed four. Do some fiddling to
13351 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13352 expect. */
13353 case NS_RRFF: /* case 14 (fmrrs). */
13354 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13355 _("VFP registers must be adjacent"));
13356 inst.operands[2].imm = 2;
13357 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13358 do_vfp_nsyn_opcode ("fmrrs");
13359 break;
13361 case NS_FFRR: /* case 15 (fmsrr). */
13362 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13363 _("VFP registers must be adjacent"));
13364 inst.operands[1] = inst.operands[2];
13365 inst.operands[2] = inst.operands[3];
13366 inst.operands[0].imm = 2;
13367 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13368 do_vfp_nsyn_opcode ("fmsrr");
13369 break;
13371 default:
13372 abort ();
13376 static void
13377 do_neon_rshift_round_imm (void)
13379 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13380 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13381 int imm = inst.operands[2].imm;
13383 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13384 if (imm == 0)
13386 inst.operands[2].present = 0;
13387 do_neon_mov ();
13388 return;
13391 constraint (imm < 1 || (unsigned)imm > et.size,
13392 _("immediate out of range for shift"));
13393 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13394 et.size - imm);
13397 static void
13398 do_neon_movl (void)
13400 struct neon_type_el et = neon_check_type (2, NS_QD,
13401 N_EQK | N_DBL, N_SU_32 | N_KEY);
13402 unsigned sizebits = et.size >> 3;
13403 inst.instruction |= sizebits << 19;
13404 neon_two_same (0, et.type == NT_unsigned, -1);
13407 static void
13408 do_neon_trn (void)
13410 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13411 struct neon_type_el et = neon_check_type (2, rs,
13412 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13413 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13414 neon_two_same (neon_quad (rs), 1, et.size);
13417 static void
13418 do_neon_zip_uzp (void)
13420 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13421 struct neon_type_el et = neon_check_type (2, rs,
13422 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13423 if (rs == NS_DD && et.size == 32)
13425 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13426 inst.instruction = N_MNEM_vtrn;
13427 do_neon_trn ();
13428 return;
13430 neon_two_same (neon_quad (rs), 1, et.size);
13433 static void
13434 do_neon_sat_abs_neg (void)
13436 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13437 struct neon_type_el et = neon_check_type (2, rs,
13438 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13439 neon_two_same (neon_quad (rs), 1, et.size);
13442 static void
13443 do_neon_pair_long (void)
13445 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13446 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13447 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13448 inst.instruction |= (et.type == NT_unsigned) << 7;
13449 neon_two_same (neon_quad (rs), 1, et.size);
13452 static void
13453 do_neon_recip_est (void)
13455 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13456 struct neon_type_el et = neon_check_type (2, rs,
13457 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13458 inst.instruction |= (et.type == NT_float) << 8;
13459 neon_two_same (neon_quad (rs), 1, et.size);
13462 static void
13463 do_neon_cls (void)
13465 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13466 struct neon_type_el et = neon_check_type (2, rs,
13467 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13468 neon_two_same (neon_quad (rs), 1, et.size);
13471 static void
13472 do_neon_clz (void)
13474 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13475 struct neon_type_el et = neon_check_type (2, rs,
13476 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13477 neon_two_same (neon_quad (rs), 1, et.size);
13480 static void
13481 do_neon_cnt (void)
13483 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13484 struct neon_type_el et = neon_check_type (2, rs,
13485 N_EQK | N_INT, N_8 | N_KEY);
13486 neon_two_same (neon_quad (rs), 1, et.size);
13489 static void
13490 do_neon_swp (void)
13492 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13493 neon_two_same (neon_quad (rs), 1, -1);
13496 static void
13497 do_neon_tbl_tbx (void)
13499 unsigned listlenbits;
13500 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13502 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13504 first_error (_("bad list length for table lookup"));
13505 return;
13508 listlenbits = inst.operands[1].imm - 1;
13509 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13510 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13511 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13512 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13513 inst.instruction |= LOW4 (inst.operands[2].reg);
13514 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13515 inst.instruction |= listlenbits << 8;
13517 inst.instruction = neon_dp_fixup (inst.instruction);
13520 static void
13521 do_neon_ldm_stm (void)
13523 /* P, U and L bits are part of bitmask. */
13524 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13525 unsigned offsetbits = inst.operands[1].imm * 2;
13527 if (inst.operands[1].issingle)
13529 do_vfp_nsyn_ldm_stm (is_dbmode);
13530 return;
13533 constraint (is_dbmode && !inst.operands[0].writeback,
13534 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13536 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13537 _("register list must contain at least 1 and at most 16 "
13538 "registers"));
13540 inst.instruction |= inst.operands[0].reg << 16;
13541 inst.instruction |= inst.operands[0].writeback << 21;
13542 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13543 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13545 inst.instruction |= offsetbits;
13547 do_vfp_cond_or_thumb ();
13550 static void
13551 do_neon_ldr_str (void)
13553 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13555 if (inst.operands[0].issingle)
13557 if (is_ldr)
13558 do_vfp_nsyn_opcode ("flds");
13559 else
13560 do_vfp_nsyn_opcode ("fsts");
13562 else
13564 if (is_ldr)
13565 do_vfp_nsyn_opcode ("fldd");
13566 else
13567 do_vfp_nsyn_opcode ("fstd");
13571 /* "interleave" version also handles non-interleaving register VLD1/VST1
13572 instructions. */
13574 static void
13575 do_neon_ld_st_interleave (void)
13577 struct neon_type_el et = neon_check_type (1, NS_NULL,
13578 N_8 | N_16 | N_32 | N_64);
13579 unsigned alignbits = 0;
13580 unsigned idx;
13581 /* The bits in this table go:
13582 0: register stride of one (0) or two (1)
13583 1,2: register list length, minus one (1, 2, 3, 4).
13584 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13585 We use -1 for invalid entries. */
13586 const int typetable[] =
13588 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13589 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13590 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13591 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13593 int typebits;
13595 if (et.type == NT_invtype)
13596 return;
13598 if (inst.operands[1].immisalign)
13599 switch (inst.operands[1].imm >> 8)
13601 case 64: alignbits = 1; break;
13602 case 128:
13603 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13604 goto bad_alignment;
13605 alignbits = 2;
13606 break;
13607 case 256:
13608 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13609 goto bad_alignment;
13610 alignbits = 3;
13611 break;
13612 default:
13613 bad_alignment:
13614 first_error (_("bad alignment"));
13615 return;
13618 inst.instruction |= alignbits << 4;
13619 inst.instruction |= neon_logbits (et.size) << 6;
13621 /* Bits [4:6] of the immediate in a list specifier encode register stride
13622 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13623 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13624 up the right value for "type" in a table based on this value and the given
13625 list style, then stick it back. */
13626 idx = ((inst.operands[0].imm >> 4) & 7)
13627 | (((inst.instruction >> 8) & 3) << 3);
13629 typebits = typetable[idx];
13631 constraint (typebits == -1, _("bad list type for instruction"));
13633 inst.instruction &= ~0xf00;
13634 inst.instruction |= typebits << 8;
13637 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13638 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13639 otherwise. The variable arguments are a list of pairs of legal (size, align)
13640 values, terminated with -1. */
13642 static int
13643 neon_alignment_bit (int size, int align, int *do_align, ...)
13645 va_list ap;
13646 int result = FAIL, thissize, thisalign;
13648 if (!inst.operands[1].immisalign)
13650 *do_align = 0;
13651 return SUCCESS;
13654 va_start (ap, do_align);
13658 thissize = va_arg (ap, int);
13659 if (thissize == -1)
13660 break;
13661 thisalign = va_arg (ap, int);
13663 if (size == thissize && align == thisalign)
13664 result = SUCCESS;
13666 while (result != SUCCESS);
13668 va_end (ap);
13670 if (result == SUCCESS)
13671 *do_align = 1;
13672 else
13673 first_error (_("unsupported alignment for instruction"));
13675 return result;
13678 static void
13679 do_neon_ld_st_lane (void)
13681 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13682 int align_good, do_align = 0;
13683 int logsize = neon_logbits (et.size);
13684 int align = inst.operands[1].imm >> 8;
13685 int n = (inst.instruction >> 8) & 3;
13686 int max_el = 64 / et.size;
13688 if (et.type == NT_invtype)
13689 return;
13691 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13692 _("bad list length"));
13693 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13694 _("scalar index out of range"));
13695 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13696 && et.size == 8,
13697 _("stride of 2 unavailable when element size is 8"));
13699 switch (n)
13701 case 0: /* VLD1 / VST1. */
13702 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13703 32, 32, -1);
13704 if (align_good == FAIL)
13705 return;
13706 if (do_align)
13708 unsigned alignbits = 0;
13709 switch (et.size)
13711 case 16: alignbits = 0x1; break;
13712 case 32: alignbits = 0x3; break;
13713 default: ;
13715 inst.instruction |= alignbits << 4;
13717 break;
13719 case 1: /* VLD2 / VST2. */
13720 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13721 32, 64, -1);
13722 if (align_good == FAIL)
13723 return;
13724 if (do_align)
13725 inst.instruction |= 1 << 4;
13726 break;
13728 case 2: /* VLD3 / VST3. */
13729 constraint (inst.operands[1].immisalign,
13730 _("can't use alignment with this instruction"));
13731 break;
13733 case 3: /* VLD4 / VST4. */
13734 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13735 16, 64, 32, 64, 32, 128, -1);
13736 if (align_good == FAIL)
13737 return;
13738 if (do_align)
13740 unsigned alignbits = 0;
13741 switch (et.size)
13743 case 8: alignbits = 0x1; break;
13744 case 16: alignbits = 0x1; break;
13745 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13746 default: ;
13748 inst.instruction |= alignbits << 4;
13750 break;
13752 default: ;
13755 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13756 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13757 inst.instruction |= 1 << (4 + logsize);
13759 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13760 inst.instruction |= logsize << 10;
13763 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13765 static void
13766 do_neon_ld_dup (void)
13768 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13769 int align_good, do_align = 0;
13771 if (et.type == NT_invtype)
13772 return;
13774 switch ((inst.instruction >> 8) & 3)
13776 case 0: /* VLD1. */
13777 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13778 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13779 &do_align, 16, 16, 32, 32, -1);
13780 if (align_good == FAIL)
13781 return;
13782 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13784 case 1: break;
13785 case 2: inst.instruction |= 1 << 5; break;
13786 default: first_error (_("bad list length")); return;
13788 inst.instruction |= neon_logbits (et.size) << 6;
13789 break;
13791 case 1: /* VLD2. */
13792 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13793 &do_align, 8, 16, 16, 32, 32, 64, -1);
13794 if (align_good == FAIL)
13795 return;
13796 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13797 _("bad list length"));
13798 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13799 inst.instruction |= 1 << 5;
13800 inst.instruction |= neon_logbits (et.size) << 6;
13801 break;
13803 case 2: /* VLD3. */
13804 constraint (inst.operands[1].immisalign,
13805 _("can't use alignment with this instruction"));
13806 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13807 _("bad list length"));
13808 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13809 inst.instruction |= 1 << 5;
13810 inst.instruction |= neon_logbits (et.size) << 6;
13811 break;
13813 case 3: /* VLD4. */
13815 int align = inst.operands[1].imm >> 8;
13816 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13817 16, 64, 32, 64, 32, 128, -1);
13818 if (align_good == FAIL)
13819 return;
13820 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13821 _("bad list length"));
13822 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13823 inst.instruction |= 1 << 5;
13824 if (et.size == 32 && align == 128)
13825 inst.instruction |= 0x3 << 6;
13826 else
13827 inst.instruction |= neon_logbits (et.size) << 6;
13829 break;
13831 default: ;
13834 inst.instruction |= do_align << 4;
13837 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13838 apart from bits [11:4]. */
13840 static void
13841 do_neon_ldx_stx (void)
13843 switch (NEON_LANE (inst.operands[0].imm))
13845 case NEON_INTERLEAVE_LANES:
13846 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13847 do_neon_ld_st_interleave ();
13848 break;
13850 case NEON_ALL_LANES:
13851 inst.instruction = NEON_ENC_DUP (inst.instruction);
13852 do_neon_ld_dup ();
13853 break;
13855 default:
13856 inst.instruction = NEON_ENC_LANE (inst.instruction);
13857 do_neon_ld_st_lane ();
13860 /* L bit comes from bit mask. */
13861 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13862 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13863 inst.instruction |= inst.operands[1].reg << 16;
13865 if (inst.operands[1].postind)
13867 int postreg = inst.operands[1].imm & 0xf;
13868 constraint (!inst.operands[1].immisreg,
13869 _("post-index must be a register"));
13870 constraint (postreg == 0xd || postreg == 0xf,
13871 _("bad register for post-index"));
13872 inst.instruction |= postreg;
13874 else if (inst.operands[1].writeback)
13876 inst.instruction |= 0xd;
13878 else
13879 inst.instruction |= 0xf;
13881 if (thumb_mode)
13882 inst.instruction |= 0xf9000000;
13883 else
13884 inst.instruction |= 0xf4000000;
13887 /* Overall per-instruction processing. */
13889 /* We need to be able to fix up arbitrary expressions in some statements.
13890 This is so that we can handle symbols that are an arbitrary distance from
13891 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13892 which returns part of an address in a form which will be valid for
13893 a data instruction. We do this by pushing the expression into a symbol
13894 in the expr_section, and creating a fix for that. */
13896 static void
13897 fix_new_arm (fragS * frag,
13898 int where,
13899 short int size,
13900 expressionS * exp,
13901 int pc_rel,
13902 int reloc)
13904 fixS * new_fix;
13906 switch (exp->X_op)
13908 case O_constant:
13909 case O_symbol:
13910 case O_add:
13911 case O_subtract:
13912 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13913 break;
13915 default:
13916 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13917 pc_rel, reloc);
13918 break;
13921 /* Mark whether the fix is to a THUMB instruction, or an ARM
13922 instruction. */
13923 new_fix->tc_fix_data = thumb_mode;
13926 /* Create a frg for an instruction requiring relaxation. */
13927 static void
13928 output_relax_insn (void)
13930 char * to;
13931 symbolS *sym;
13932 int offset;
13934 /* The size of the instruction is unknown, so tie the debug info to the
13935 start of the instruction. */
13936 dwarf2_emit_insn (0);
13938 switch (inst.reloc.exp.X_op)
13940 case O_symbol:
13941 sym = inst.reloc.exp.X_add_symbol;
13942 offset = inst.reloc.exp.X_add_number;
13943 break;
13944 case O_constant:
13945 sym = NULL;
13946 offset = inst.reloc.exp.X_add_number;
13947 break;
13948 default:
13949 sym = make_expr_symbol (&inst.reloc.exp);
13950 offset = 0;
13951 break;
13953 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13954 inst.relax, sym, offset, NULL/*offset, opcode*/);
13955 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13958 /* Write a 32-bit thumb instruction to buf. */
13959 static void
13960 put_thumb32_insn (char * buf, unsigned long insn)
13962 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13963 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13966 static void
13967 output_inst (const char * str)
13969 char * to = NULL;
13971 if (inst.error)
13973 as_bad ("%s -- `%s'", inst.error, str);
13974 return;
13976 if (inst.relax)
13978 output_relax_insn ();
13979 return;
13981 if (inst.size == 0)
13982 return;
13984 to = frag_more (inst.size);
13986 if (thumb_mode && (inst.size > THUMB_SIZE))
13988 assert (inst.size == (2 * THUMB_SIZE));
13989 put_thumb32_insn (to, inst.instruction);
13991 else if (inst.size > INSN_SIZE)
13993 assert (inst.size == (2 * INSN_SIZE));
13994 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13995 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13997 else
13998 md_number_to_chars (to, inst.instruction, inst.size);
14000 if (inst.reloc.type != BFD_RELOC_UNUSED)
14001 fix_new_arm (frag_now, to - frag_now->fr_literal,
14002 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14003 inst.reloc.type);
14005 dwarf2_emit_insn (inst.size);
14008 /* Tag values used in struct asm_opcode's tag field. */
14009 enum opcode_tag
14011 OT_unconditional, /* Instruction cannot be conditionalized.
14012 The ARM condition field is still 0xE. */
14013 OT_unconditionalF, /* Instruction cannot be conditionalized
14014 and carries 0xF in its ARM condition field. */
14015 OT_csuffix, /* Instruction takes a conditional suffix. */
14016 OT_csuffixF, /* Some forms of the instruction take a conditional
14017 suffix, others place 0xF where the condition field
14018 would be. */
14019 OT_cinfix3, /* Instruction takes a conditional infix,
14020 beginning at character index 3. (In
14021 unified mode, it becomes a suffix.) */
14022 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
14023 tsts, cmps, cmns, and teqs. */
14024 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
14025 character index 3, even in unified mode. Used for
14026 legacy instructions where suffix and infix forms
14027 may be ambiguous. */
14028 OT_csuf_or_in3, /* Instruction takes either a conditional
14029 suffix or an infix at character index 3. */
14030 OT_odd_infix_unc, /* This is the unconditional variant of an
14031 instruction that takes a conditional infix
14032 at an unusual position. In unified mode,
14033 this variant will accept a suffix. */
14034 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
14035 are the conditional variants of instructions that
14036 take conditional infixes in unusual positions.
14037 The infix appears at character index
14038 (tag - OT_odd_infix_0). These are not accepted
14039 in unified mode. */
14042 /* Subroutine of md_assemble, responsible for looking up the primary
14043 opcode from the mnemonic the user wrote. STR points to the
14044 beginning of the mnemonic.
14046 This is not simply a hash table lookup, because of conditional
14047 variants. Most instructions have conditional variants, which are
14048 expressed with a _conditional affix_ to the mnemonic. If we were
14049 to encode each conditional variant as a literal string in the opcode
14050 table, it would have approximately 20,000 entries.
14052 Most mnemonics take this affix as a suffix, and in unified syntax,
14053 'most' is upgraded to 'all'. However, in the divided syntax, some
14054 instructions take the affix as an infix, notably the s-variants of
14055 the arithmetic instructions. Of those instructions, all but six
14056 have the infix appear after the third character of the mnemonic.
14058 Accordingly, the algorithm for looking up primary opcodes given
14059 an identifier is:
14061 1. Look up the identifier in the opcode table.
14062 If we find a match, go to step U.
14064 2. Look up the last two characters of the identifier in the
14065 conditions table. If we find a match, look up the first N-2
14066 characters of the identifier in the opcode table. If we
14067 find a match, go to step CE.
14069 3. Look up the fourth and fifth characters of the identifier in
14070 the conditions table. If we find a match, extract those
14071 characters from the identifier, and look up the remaining
14072 characters in the opcode table. If we find a match, go
14073 to step CM.
14075 4. Fail.
14077 U. Examine the tag field of the opcode structure, in case this is
14078 one of the six instructions with its conditional infix in an
14079 unusual place. If it is, the tag tells us where to find the
14080 infix; look it up in the conditions table and set inst.cond
14081 accordingly. Otherwise, this is an unconditional instruction.
14082 Again set inst.cond accordingly. Return the opcode structure.
14084 CE. Examine the tag field to make sure this is an instruction that
14085 should receive a conditional suffix. If it is not, fail.
14086 Otherwise, set inst.cond from the suffix we already looked up,
14087 and return the opcode structure.
14089 CM. Examine the tag field to make sure this is an instruction that
14090 should receive a conditional infix after the third character.
14091 If it is not, fail. Otherwise, undo the edits to the current
14092 line of input and proceed as for case CE. */
14094 static const struct asm_opcode *
14095 opcode_lookup (char **str)
14097 char *end, *base;
14098 char *affix;
14099 const struct asm_opcode *opcode;
14100 const struct asm_cond *cond;
14101 char save[2];
14102 bfd_boolean neon_supported;
14104 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
14106 /* Scan up to the end of the mnemonic, which must end in white space,
14107 '.' (in unified mode, or for Neon instructions), or end of string. */
14108 for (base = end = *str; *end != '\0'; end++)
14109 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
14110 break;
14112 if (end == base)
14113 return 0;
14115 /* Handle a possible width suffix and/or Neon type suffix. */
14116 if (end[0] == '.')
14118 int offset = 2;
14120 /* The .w and .n suffixes are only valid if the unified syntax is in
14121 use. */
14122 if (unified_syntax && end[1] == 'w')
14123 inst.size_req = 4;
14124 else if (unified_syntax && end[1] == 'n')
14125 inst.size_req = 2;
14126 else
14127 offset = 0;
14129 inst.vectype.elems = 0;
14131 *str = end + offset;
14133 if (end[offset] == '.')
14135 /* See if we have a Neon type suffix (possible in either unified or
14136 non-unified ARM syntax mode). */
14137 if (parse_neon_type (&inst.vectype, str) == FAIL)
14138 return 0;
14140 else if (end[offset] != '\0' && end[offset] != ' ')
14141 return 0;
14143 else
14144 *str = end;
14146 /* Look for unaffixed or special-case affixed mnemonic. */
14147 opcode = hash_find_n (arm_ops_hsh, base, end - base);
14148 if (opcode)
14150 /* step U */
14151 if (opcode->tag < OT_odd_infix_0)
14153 inst.cond = COND_ALWAYS;
14154 return opcode;
14157 if (unified_syntax)
14158 as_warn (_("conditional infixes are deprecated in unified syntax"));
14159 affix = base + (opcode->tag - OT_odd_infix_0);
14160 cond = hash_find_n (arm_cond_hsh, affix, 2);
14161 assert (cond);
14163 inst.cond = cond->value;
14164 return opcode;
14167 /* Cannot have a conditional suffix on a mnemonic of less than two
14168 characters. */
14169 if (end - base < 3)
14170 return 0;
14172 /* Look for suffixed mnemonic. */
14173 affix = end - 2;
14174 cond = hash_find_n (arm_cond_hsh, affix, 2);
14175 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
14176 if (opcode && cond)
14178 /* step CE */
14179 switch (opcode->tag)
14181 case OT_cinfix3_legacy:
14182 /* Ignore conditional suffixes matched on infix only mnemonics. */
14183 break;
14185 case OT_cinfix3:
14186 case OT_cinfix3_deprecated:
14187 case OT_odd_infix_unc:
14188 if (!unified_syntax)
14189 return 0;
14190 /* else fall through */
14192 case OT_csuffix:
14193 case OT_csuffixF:
14194 case OT_csuf_or_in3:
14195 inst.cond = cond->value;
14196 return opcode;
14198 case OT_unconditional:
14199 case OT_unconditionalF:
14200 if (thumb_mode)
14202 inst.cond = cond->value;
14204 else
14206 /* delayed diagnostic */
14207 inst.error = BAD_COND;
14208 inst.cond = COND_ALWAYS;
14210 return opcode;
14212 default:
14213 return 0;
14217 /* Cannot have a usual-position infix on a mnemonic of less than
14218 six characters (five would be a suffix). */
14219 if (end - base < 6)
14220 return 0;
14222 /* Look for infixed mnemonic in the usual position. */
14223 affix = base + 3;
14224 cond = hash_find_n (arm_cond_hsh, affix, 2);
14225 if (!cond)
14226 return 0;
14228 memcpy (save, affix, 2);
14229 memmove (affix, affix + 2, (end - affix) - 2);
14230 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14231 memmove (affix + 2, affix, (end - affix) - 2);
14232 memcpy (affix, save, 2);
14234 if (opcode
14235 && (opcode->tag == OT_cinfix3
14236 || opcode->tag == OT_cinfix3_deprecated
14237 || opcode->tag == OT_csuf_or_in3
14238 || opcode->tag == OT_cinfix3_legacy))
14240 /* step CM */
14241 if (unified_syntax
14242 && (opcode->tag == OT_cinfix3
14243 || opcode->tag == OT_cinfix3_deprecated))
14244 as_warn (_("conditional infixes are deprecated in unified syntax"));
14246 inst.cond = cond->value;
14247 return opcode;
14250 return 0;
14253 void
14254 md_assemble (char *str)
14256 char *p = str;
14257 const struct asm_opcode * opcode;
14259 /* Align the previous label if needed. */
14260 if (last_label_seen != NULL)
14262 symbol_set_frag (last_label_seen, frag_now);
14263 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14264 S_SET_SEGMENT (last_label_seen, now_seg);
14267 memset (&inst, '\0', sizeof (inst));
14268 inst.reloc.type = BFD_RELOC_UNUSED;
14270 opcode = opcode_lookup (&p);
14271 if (!opcode)
14273 /* It wasn't an instruction, but it might be a register alias of
14274 the form alias .req reg, or a Neon .dn/.qn directive. */
14275 if (!create_register_alias (str, p)
14276 && !create_neon_reg_alias (str, p))
14277 as_bad (_("bad instruction `%s'"), str);
14279 return;
14282 if (opcode->tag == OT_cinfix3_deprecated)
14283 as_warn (_("s suffix on comparison instruction is deprecated"));
14285 /* The value which unconditional instructions should have in place of the
14286 condition field. */
14287 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14289 if (thumb_mode)
14291 arm_feature_set variant;
14293 variant = cpu_variant;
14294 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14295 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14296 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14297 /* Check that this instruction is supported for this CPU. */
14298 if (!opcode->tvariant
14299 || (thumb_mode == 1
14300 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14302 as_bad (_("selected processor does not support `%s'"), str);
14303 return;
14305 if (inst.cond != COND_ALWAYS && !unified_syntax
14306 && opcode->tencode != do_t_branch)
14308 as_bad (_("Thumb does not support conditional execution"));
14309 return;
14312 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req)
14314 /* Implicit require narrow instructions on Thumb-1. This avoids
14315 relaxation accidentally introducing Thumb-2 instructions. */
14316 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
14317 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14318 inst.size_req = 2;
14321 /* Check conditional suffixes. */
14322 if (current_it_mask)
14324 int cond;
14325 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14326 current_it_mask <<= 1;
14327 current_it_mask &= 0x1f;
14328 /* The BKPT instruction is unconditional even in an IT block. */
14329 if (!inst.error
14330 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14332 as_bad (_("incorrect condition in IT block"));
14333 return;
14336 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14338 as_bad (_("thumb conditional instruction not in IT block"));
14339 return;
14342 mapping_state (MAP_THUMB);
14343 inst.instruction = opcode->tvalue;
14345 if (!parse_operands (p, opcode->operands))
14346 opcode->tencode ();
14348 /* Clear current_it_mask at the end of an IT block. */
14349 if (current_it_mask == 0x10)
14350 current_it_mask = 0;
14352 if (!(inst.error || inst.relax))
14354 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14355 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14356 if (inst.size_req && inst.size_req != inst.size)
14358 as_bad (_("cannot honor width suffix -- `%s'"), str);
14359 return;
14363 /* Something has gone badly wrong if we try to relax a fixed size
14364 instruction. */
14365 assert (inst.size_req == 0 || !inst.relax);
14367 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14368 *opcode->tvariant);
14369 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14370 set those bits when Thumb-2 32-bit instructions are seen. ie.
14371 anything other than bl/blx and v6-M instructions.
14372 This is overly pessimistic for relaxable instructions. */
14373 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14374 || inst.relax)
14375 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14376 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14377 arm_ext_v6t2);
14379 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14381 bfd_boolean is_bx;
14383 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
14384 is_bx = (opcode->aencode == do_bx);
14386 /* Check that this instruction is supported for this CPU. */
14387 if (!(is_bx && fix_v4bx)
14388 && !(opcode->avariant &&
14389 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
14391 as_bad (_("selected processor does not support `%s'"), str);
14392 return;
14394 if (inst.size_req)
14396 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14397 return;
14400 mapping_state (MAP_ARM);
14401 inst.instruction = opcode->avalue;
14402 if (opcode->tag == OT_unconditionalF)
14403 inst.instruction |= 0xF << 28;
14404 else
14405 inst.instruction |= inst.cond << 28;
14406 inst.size = INSN_SIZE;
14407 if (!parse_operands (p, opcode->operands))
14408 opcode->aencode ();
14409 /* Arm mode bx is marked as both v4T and v5 because it's still required
14410 on a hypothetical non-thumb v5 core. */
14411 if (is_bx)
14412 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14413 else
14414 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14415 *opcode->avariant);
14417 else
14419 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14420 "-- `%s'"), str);
14421 return;
14423 output_inst (str);
14426 /* Various frobbings of labels and their addresses. */
14428 void
14429 arm_start_line_hook (void)
14431 last_label_seen = NULL;
14434 void
14435 arm_frob_label (symbolS * sym)
14437 last_label_seen = sym;
14439 ARM_SET_THUMB (sym, thumb_mode);
14441 #if defined OBJ_COFF || defined OBJ_ELF
14442 ARM_SET_INTERWORK (sym, support_interwork);
14443 #endif
14445 /* Note - do not allow local symbols (.Lxxx) to be labelled
14446 as Thumb functions. This is because these labels, whilst
14447 they exist inside Thumb code, are not the entry points for
14448 possible ARM->Thumb calls. Also, these labels can be used
14449 as part of a computed goto or switch statement. eg gcc
14450 can generate code that looks like this:
14452 ldr r2, [pc, .Laaa]
14453 lsl r3, r3, #2
14454 ldr r2, [r3, r2]
14455 mov pc, r2
14457 .Lbbb: .word .Lxxx
14458 .Lccc: .word .Lyyy
14459 ..etc...
14460 .Laaa: .word Lbbb
14462 The first instruction loads the address of the jump table.
14463 The second instruction converts a table index into a byte offset.
14464 The third instruction gets the jump address out of the table.
14465 The fourth instruction performs the jump.
14467 If the address stored at .Laaa is that of a symbol which has the
14468 Thumb_Func bit set, then the linker will arrange for this address
14469 to have the bottom bit set, which in turn would mean that the
14470 address computation performed by the third instruction would end
14471 up with the bottom bit set. Since the ARM is capable of unaligned
14472 word loads, the instruction would then load the incorrect address
14473 out of the jump table, and chaos would ensue. */
14474 if (label_is_thumb_function_name
14475 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14476 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14478 /* When the address of a Thumb function is taken the bottom
14479 bit of that address should be set. This will allow
14480 interworking between Arm and Thumb functions to work
14481 correctly. */
14483 THUMB_SET_FUNC (sym, 1);
14485 label_is_thumb_function_name = FALSE;
14488 dwarf2_emit_label (sym);
14492 arm_data_in_code (void)
14494 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14496 *input_line_pointer = '/';
14497 input_line_pointer += 5;
14498 *input_line_pointer = 0;
14499 return 1;
14502 return 0;
14505 char *
14506 arm_canonicalize_symbol_name (char * name)
14508 int len;
14510 if (thumb_mode && (len = strlen (name)) > 5
14511 && streq (name + len - 5, "/data"))
14512 *(name + len - 5) = 0;
14514 return name;
14517 /* Table of all register names defined by default. The user can
14518 define additional names with .req. Note that all register names
14519 should appear in both upper and lowercase variants. Some registers
14520 also have mixed-case names. */
14522 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14523 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14524 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14525 #define REGSET(p,t) \
14526 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14527 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14528 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14529 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14530 #define REGSETH(p,t) \
14531 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14532 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14533 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14534 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14535 #define REGSET2(p,t) \
14536 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14537 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14538 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14539 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14541 static const struct reg_entry reg_names[] =
14543 /* ARM integer registers. */
14544 REGSET(r, RN), REGSET(R, RN),
14546 /* ATPCS synonyms. */
14547 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14548 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14549 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14551 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14552 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14553 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14555 /* Well-known aliases. */
14556 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14557 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14559 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14560 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14562 /* Coprocessor numbers. */
14563 REGSET(p, CP), REGSET(P, CP),
14565 /* Coprocessor register numbers. The "cr" variants are for backward
14566 compatibility. */
14567 REGSET(c, CN), REGSET(C, CN),
14568 REGSET(cr, CN), REGSET(CR, CN),
14570 /* FPA registers. */
14571 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14572 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14574 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14575 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14577 /* VFP SP registers. */
14578 REGSET(s,VFS), REGSET(S,VFS),
14579 REGSETH(s,VFS), REGSETH(S,VFS),
14581 /* VFP DP Registers. */
14582 REGSET(d,VFD), REGSET(D,VFD),
14583 /* Extra Neon DP registers. */
14584 REGSETH(d,VFD), REGSETH(D,VFD),
14586 /* Neon QP registers. */
14587 REGSET2(q,NQ), REGSET2(Q,NQ),
14589 /* VFP control registers. */
14590 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14591 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14592 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
14593 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
14594 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
14595 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
14597 /* Maverick DSP coprocessor registers. */
14598 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14599 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14601 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14602 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14603 REGDEF(dspsc,0,DSPSC),
14605 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14606 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14607 REGDEF(DSPSC,0,DSPSC),
14609 /* iWMMXt data registers - p0, c0-15. */
14610 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14612 /* iWMMXt control registers - p1, c0-3. */
14613 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14614 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14615 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14616 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14618 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14619 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14620 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14621 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14622 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14624 /* XScale accumulator registers. */
14625 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14627 #undef REGDEF
14628 #undef REGNUM
14629 #undef REGSET
14631 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14632 within psr_required_here. */
14633 static const struct asm_psr psrs[] =
14635 /* Backward compatibility notation. Note that "all" is no longer
14636 truly all possible PSR bits. */
14637 {"all", PSR_c | PSR_f},
14638 {"flg", PSR_f},
14639 {"ctl", PSR_c},
14641 /* Individual flags. */
14642 {"f", PSR_f},
14643 {"c", PSR_c},
14644 {"x", PSR_x},
14645 {"s", PSR_s},
14646 /* Combinations of flags. */
14647 {"fs", PSR_f | PSR_s},
14648 {"fx", PSR_f | PSR_x},
14649 {"fc", PSR_f | PSR_c},
14650 {"sf", PSR_s | PSR_f},
14651 {"sx", PSR_s | PSR_x},
14652 {"sc", PSR_s | PSR_c},
14653 {"xf", PSR_x | PSR_f},
14654 {"xs", PSR_x | PSR_s},
14655 {"xc", PSR_x | PSR_c},
14656 {"cf", PSR_c | PSR_f},
14657 {"cs", PSR_c | PSR_s},
14658 {"cx", PSR_c | PSR_x},
14659 {"fsx", PSR_f | PSR_s | PSR_x},
14660 {"fsc", PSR_f | PSR_s | PSR_c},
14661 {"fxs", PSR_f | PSR_x | PSR_s},
14662 {"fxc", PSR_f | PSR_x | PSR_c},
14663 {"fcs", PSR_f | PSR_c | PSR_s},
14664 {"fcx", PSR_f | PSR_c | PSR_x},
14665 {"sfx", PSR_s | PSR_f | PSR_x},
14666 {"sfc", PSR_s | PSR_f | PSR_c},
14667 {"sxf", PSR_s | PSR_x | PSR_f},
14668 {"sxc", PSR_s | PSR_x | PSR_c},
14669 {"scf", PSR_s | PSR_c | PSR_f},
14670 {"scx", PSR_s | PSR_c | PSR_x},
14671 {"xfs", PSR_x | PSR_f | PSR_s},
14672 {"xfc", PSR_x | PSR_f | PSR_c},
14673 {"xsf", PSR_x | PSR_s | PSR_f},
14674 {"xsc", PSR_x | PSR_s | PSR_c},
14675 {"xcf", PSR_x | PSR_c | PSR_f},
14676 {"xcs", PSR_x | PSR_c | PSR_s},
14677 {"cfs", PSR_c | PSR_f | PSR_s},
14678 {"cfx", PSR_c | PSR_f | PSR_x},
14679 {"csf", PSR_c | PSR_s | PSR_f},
14680 {"csx", PSR_c | PSR_s | PSR_x},
14681 {"cxf", PSR_c | PSR_x | PSR_f},
14682 {"cxs", PSR_c | PSR_x | PSR_s},
14683 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14684 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14685 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14686 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14687 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14688 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14689 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14690 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14691 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14692 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14693 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14694 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14695 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14696 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14697 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14698 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14699 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14700 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14701 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14702 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14703 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14704 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14705 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14706 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14709 /* Table of V7M psr names. */
14710 static const struct asm_psr v7m_psrs[] =
14712 {"apsr", 0 }, {"APSR", 0 },
14713 {"iapsr", 1 }, {"IAPSR", 1 },
14714 {"eapsr", 2 }, {"EAPSR", 2 },
14715 {"psr", 3 }, {"PSR", 3 },
14716 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
14717 {"ipsr", 5 }, {"IPSR", 5 },
14718 {"epsr", 6 }, {"EPSR", 6 },
14719 {"iepsr", 7 }, {"IEPSR", 7 },
14720 {"msp", 8 }, {"MSP", 8 },
14721 {"psp", 9 }, {"PSP", 9 },
14722 {"primask", 16}, {"PRIMASK", 16},
14723 {"basepri", 17}, {"BASEPRI", 17},
14724 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
14725 {"faultmask", 19}, {"FAULTMASK", 19},
14726 {"control", 20}, {"CONTROL", 20}
14729 /* Table of all shift-in-operand names. */
14730 static const struct asm_shift_name shift_names [] =
14732 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14733 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14734 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14735 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14736 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14737 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14740 /* Table of all explicit relocation names. */
14741 #ifdef OBJ_ELF
14742 static struct reloc_entry reloc_names[] =
14744 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14745 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14746 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14747 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14748 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14749 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14750 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14751 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14752 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14753 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14754 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14756 #endif
14758 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14759 static const struct asm_cond conds[] =
14761 {"eq", 0x0},
14762 {"ne", 0x1},
14763 {"cs", 0x2}, {"hs", 0x2},
14764 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14765 {"mi", 0x4},
14766 {"pl", 0x5},
14767 {"vs", 0x6},
14768 {"vc", 0x7},
14769 {"hi", 0x8},
14770 {"ls", 0x9},
14771 {"ge", 0xa},
14772 {"lt", 0xb},
14773 {"gt", 0xc},
14774 {"le", 0xd},
14775 {"al", 0xe}
14778 static struct asm_barrier_opt barrier_opt_names[] =
14780 { "sy", 0xf },
14781 { "un", 0x7 },
14782 { "st", 0xe },
14783 { "unst", 0x6 }
14786 /* Table of ARM-format instructions. */
14788 /* Macros for gluing together operand strings. N.B. In all cases
14789 other than OPS0, the trailing OP_stop comes from default
14790 zero-initialization of the unspecified elements of the array. */
14791 #define OPS0() { OP_stop, }
14792 #define OPS1(a) { OP_##a, }
14793 #define OPS2(a,b) { OP_##a,OP_##b, }
14794 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14795 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14796 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14797 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14799 /* These macros abstract out the exact format of the mnemonic table and
14800 save some repeated characters. */
14802 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14803 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14804 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14805 THUMB_VARIANT, do_##ae, do_##te }
14807 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14808 a T_MNEM_xyz enumerator. */
14809 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14810 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14811 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14812 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14814 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14815 infix after the third character. */
14816 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14817 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14818 THUMB_VARIANT, do_##ae, do_##te }
14819 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14820 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14821 THUMB_VARIANT, do_##ae, do_##te }
14822 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14823 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14824 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14825 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14826 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14827 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14828 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14829 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14831 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14832 appear in the condition table. */
14833 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14834 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14835 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14837 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14838 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14839 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14840 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14841 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14842 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14843 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14844 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14845 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14846 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14847 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14848 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14849 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14850 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14851 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14852 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14853 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14854 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14855 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14856 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14858 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14859 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14860 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14861 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14863 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14864 field is still 0xE. Many of the Thumb variants can be executed
14865 conditionally, so this is checked separately. */
14866 #define TUE(mnem, op, top, nops, ops, ae, te) \
14867 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14868 THUMB_VARIANT, do_##ae, do_##te }
14870 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14871 condition code field. */
14872 #define TUF(mnem, op, top, nops, ops, ae, te) \
14873 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14874 THUMB_VARIANT, do_##ae, do_##te }
14876 /* ARM-only variants of all the above. */
14877 #define CE(mnem, op, nops, ops, ae) \
14878 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14880 #define C3(mnem, op, nops, ops, ae) \
14881 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14883 /* Legacy mnemonics that always have conditional infix after the third
14884 character. */
14885 #define CL(mnem, op, nops, ops, ae) \
14886 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14887 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14889 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14890 #define cCE(mnem, op, nops, ops, ae) \
14891 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14893 /* Legacy coprocessor instructions where conditional infix and conditional
14894 suffix are ambiguous. For consistency this includes all FPA instructions,
14895 not just the potentially ambiguous ones. */
14896 #define cCL(mnem, op, nops, ops, ae) \
14897 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14898 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14900 /* Coprocessor, takes either a suffix or a position-3 infix
14901 (for an FPA corner case). */
14902 #define C3E(mnem, op, nops, ops, ae) \
14903 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14904 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14906 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14907 { #m1 #m2 #m3, OPS##nops ops, \
14908 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14909 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14911 #define CM(m1, m2, op, nops, ops, ae) \
14912 xCM_(m1, , m2, op, nops, ops, ae), \
14913 xCM_(m1, eq, m2, op, nops, ops, ae), \
14914 xCM_(m1, ne, m2, op, nops, ops, ae), \
14915 xCM_(m1, cs, m2, op, nops, ops, ae), \
14916 xCM_(m1, hs, m2, op, nops, ops, ae), \
14917 xCM_(m1, cc, m2, op, nops, ops, ae), \
14918 xCM_(m1, ul, m2, op, nops, ops, ae), \
14919 xCM_(m1, lo, m2, op, nops, ops, ae), \
14920 xCM_(m1, mi, m2, op, nops, ops, ae), \
14921 xCM_(m1, pl, m2, op, nops, ops, ae), \
14922 xCM_(m1, vs, m2, op, nops, ops, ae), \
14923 xCM_(m1, vc, m2, op, nops, ops, ae), \
14924 xCM_(m1, hi, m2, op, nops, ops, ae), \
14925 xCM_(m1, ls, m2, op, nops, ops, ae), \
14926 xCM_(m1, ge, m2, op, nops, ops, ae), \
14927 xCM_(m1, lt, m2, op, nops, ops, ae), \
14928 xCM_(m1, gt, m2, op, nops, ops, ae), \
14929 xCM_(m1, le, m2, op, nops, ops, ae), \
14930 xCM_(m1, al, m2, op, nops, ops, ae)
14932 #define UE(mnem, op, nops, ops, ae) \
14933 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14935 #define UF(mnem, op, nops, ops, ae) \
14936 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14938 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14939 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14940 use the same encoding function for each. */
14941 #define NUF(mnem, op, nops, ops, enc) \
14942 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14943 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14945 /* Neon data processing, version which indirects through neon_enc_tab for
14946 the various overloaded versions of opcodes. */
14947 #define nUF(mnem, op, nops, ops, enc) \
14948 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14949 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14951 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14952 version. */
14953 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14954 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14955 THUMB_VARIANT, do_##enc, do_##enc }
14957 #define NCE(mnem, op, nops, ops, enc) \
14958 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14960 #define NCEF(mnem, op, nops, ops, enc) \
14961 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14963 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14964 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14965 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14966 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14968 #define nCE(mnem, op, nops, ops, enc) \
14969 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14971 #define nCEF(mnem, op, nops, ops, enc) \
14972 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14974 #define do_0 0
14976 /* Thumb-only, unconditional. */
14977 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14979 static const struct asm_opcode insns[] =
14981 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14982 #define THUMB_VARIANT &arm_ext_v4t
14983 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14984 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14985 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14986 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14987 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14988 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14989 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14990 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14991 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14992 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14993 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14994 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14995 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14996 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14997 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14998 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
15000 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
15001 for setting PSR flag bits. They are obsolete in V6 and do not
15002 have Thumb equivalents. */
15003 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15004 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15005 CL(tstp, 110f000, 2, (RR, SH), cmp),
15006 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15007 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15008 CL(cmpp, 150f000, 2, (RR, SH), cmp),
15009 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15010 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15011 CL(cmnp, 170f000, 2, (RR, SH), cmp),
15013 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
15014 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
15015 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
15016 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
15018 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
15019 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15020 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
15021 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15023 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15024 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15025 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15026 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15027 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15028 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15030 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
15031 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
15032 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
15033 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
15035 /* Pseudo ops. */
15036 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
15037 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
15038 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
15040 /* Thumb-compatibility pseudo ops. */
15041 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
15042 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
15043 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
15044 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
15045 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
15046 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
15047 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
15048 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
15049 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
15050 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
15051 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
15052 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
15054 /* These may simplify to neg. */
15055 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
15056 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
15058 #undef THUMB_VARIANT
15059 #define THUMB_VARIANT &arm_ext_v6
15060 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
15062 /* V1 instructions with no Thumb analogue prior to V6T2. */
15063 #undef THUMB_VARIANT
15064 #define THUMB_VARIANT &arm_ext_v6t2
15065 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15066 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15067 CL(teqp, 130f000, 2, (RR, SH), cmp),
15069 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
15070 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
15071 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
15072 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
15074 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15075 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15077 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15078 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15080 /* V1 instructions with no Thumb analogue at all. */
15081 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
15082 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
15084 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
15085 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
15086 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
15087 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
15088 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
15089 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
15090 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
15091 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
15093 #undef ARM_VARIANT
15094 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
15095 #undef THUMB_VARIANT
15096 #define THUMB_VARIANT &arm_ext_v4t
15097 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15098 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15100 #undef THUMB_VARIANT
15101 #define THUMB_VARIANT &arm_ext_v6t2
15102 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15103 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
15105 /* Generic coprocessor instructions. */
15106 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15107 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15108 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15109 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15110 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15111 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15112 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15114 #undef ARM_VARIANT
15115 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15116 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15117 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15119 #undef ARM_VARIANT
15120 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15121 #undef THUMB_VARIANT
15122 #define THUMB_VARIANT &arm_ext_msr
15123 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
15124 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
15126 #undef ARM_VARIANT
15127 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15128 #undef THUMB_VARIANT
15129 #define THUMB_VARIANT &arm_ext_v6t2
15130 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15131 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15132 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15133 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15134 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15135 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15136 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15137 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15139 #undef ARM_VARIANT
15140 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15141 #undef THUMB_VARIANT
15142 #define THUMB_VARIANT &arm_ext_v4t
15143 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15144 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15145 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15146 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15147 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15148 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15150 #undef ARM_VARIANT
15151 #define ARM_VARIANT &arm_ext_v4t_5
15152 /* ARM Architecture 4T. */
15153 /* Note: bx (and blx) are required on V5, even if the processor does
15154 not support Thumb. */
15155 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
15157 #undef ARM_VARIANT
15158 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15159 #undef THUMB_VARIANT
15160 #define THUMB_VARIANT &arm_ext_v5t
15161 /* Note: blx has 2 variants; the .value coded here is for
15162 BLX(2). Only this variant has conditional execution. */
15163 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
15164 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
15166 #undef THUMB_VARIANT
15167 #define THUMB_VARIANT &arm_ext_v6t2
15168 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
15169 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15170 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15171 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15172 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15173 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15174 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15175 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15177 #undef ARM_VARIANT
15178 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15179 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15180 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15181 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15182 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15184 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15185 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15187 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15188 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15189 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15190 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15192 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15193 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15194 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15195 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15197 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15198 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15200 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15201 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15202 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15203 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
15205 #undef ARM_VARIANT
15206 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15207 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
15208 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15209 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15211 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15212 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15214 #undef ARM_VARIANT
15215 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15216 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
15218 #undef ARM_VARIANT
15219 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15220 #undef THUMB_VARIANT
15221 #define THUMB_VARIANT &arm_ext_v6
15222 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
15223 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
15224 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15225 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15226 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15227 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15228 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15229 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15230 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15231 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15233 #undef THUMB_VARIANT
15234 #define THUMB_VARIANT &arm_ext_v6t2
15235 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15236 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15237 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15238 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15240 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15241 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15243 /* ARM V6 not included in V7M (eg. integer SIMD). */
15244 #undef THUMB_VARIANT
15245 #define THUMB_VARIANT &arm_ext_v6_notm
15246 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15247 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15248 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15249 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15250 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15251 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15252 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15253 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15254 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15255 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15256 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15257 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15258 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15259 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15260 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15261 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15262 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15263 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15264 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15265 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15266 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15267 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15268 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15269 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15270 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15271 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15272 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15273 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15274 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15275 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15276 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15277 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15278 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15279 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15280 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15281 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15282 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15283 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15284 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15285 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15286 UF(rfeib, 9900a00, 1, (RRw), rfe),
15287 UF(rfeda, 8100a00, 1, (RRw), rfe),
15288 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15289 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15290 UF(rfefa, 9900a00, 1, (RRw), rfe),
15291 UF(rfeea, 8100a00, 1, (RRw), rfe),
15292 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15293 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15294 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15295 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15296 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15297 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15298 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15299 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15300 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15301 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15302 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15303 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15304 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15305 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15306 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15307 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15308 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15309 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15310 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15311 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15312 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15313 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15314 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15315 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15316 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15317 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15318 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15319 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15320 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15321 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15322 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15323 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15324 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15325 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15326 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15327 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15328 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15330 #undef ARM_VARIANT
15331 #define ARM_VARIANT &arm_ext_v6k
15332 #undef THUMB_VARIANT
15333 #define THUMB_VARIANT &arm_ext_v6k
15334 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15335 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15336 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15337 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15339 #undef THUMB_VARIANT
15340 #define THUMB_VARIANT &arm_ext_v6_notm
15341 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15342 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15344 #undef THUMB_VARIANT
15345 #define THUMB_VARIANT &arm_ext_v6t2
15346 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15347 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15348 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15349 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15350 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15352 #undef ARM_VARIANT
15353 #define ARM_VARIANT &arm_ext_v6z
15354 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15356 #undef ARM_VARIANT
15357 #define ARM_VARIANT &arm_ext_v6t2
15358 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15359 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15360 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15361 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15363 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15364 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15365 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15366 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15368 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15369 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15370 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15371 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15373 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15374 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15375 /* ARM does not really have an IT instruction, so always allow it. */
15376 #undef ARM_VARIANT
15377 #define ARM_VARIANT &arm_ext_v1
15378 TUE(it, 0, bf08, 1, (COND), it, t_it),
15379 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15380 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15381 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15382 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15383 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15384 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15385 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15386 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15387 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15388 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15389 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15390 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15391 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15392 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15394 /* Thumb2 only instructions. */
15395 #undef ARM_VARIANT
15396 #define ARM_VARIANT NULL
15398 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15399 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15400 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15401 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15403 /* Thumb-2 hardware division instructions (R and M profiles only). */
15404 #undef THUMB_VARIANT
15405 #define THUMB_VARIANT &arm_ext_div
15406 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15407 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15409 /* ARM V6M/V7 instructions. */
15410 #undef ARM_VARIANT
15411 #define ARM_VARIANT &arm_ext_barrier
15412 #undef THUMB_VARIANT
15413 #define THUMB_VARIANT &arm_ext_barrier
15414 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15415 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15416 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15418 /* ARM V7 instructions. */
15419 #undef ARM_VARIANT
15420 #define ARM_VARIANT &arm_ext_v7
15421 #undef THUMB_VARIANT
15422 #define THUMB_VARIANT &arm_ext_v7
15423 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15424 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15426 #undef ARM_VARIANT
15427 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15428 cCE(wfs, e200110, 1, (RR), rd),
15429 cCE(rfs, e300110, 1, (RR), rd),
15430 cCE(wfc, e400110, 1, (RR), rd),
15431 cCE(rfc, e500110, 1, (RR), rd),
15433 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15434 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15435 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15436 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15438 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15439 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15440 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15441 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15443 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15444 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15445 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15446 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15447 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15448 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15449 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15450 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15451 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15452 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15453 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15454 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15456 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15457 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15458 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15459 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15460 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15461 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15462 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15463 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15464 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15465 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15466 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15467 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15469 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15470 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15471 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15472 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15473 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15474 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15475 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15476 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15477 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15478 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15479 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15480 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15482 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15483 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15484 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15485 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15486 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15487 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15488 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15489 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15490 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15491 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15492 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15493 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15495 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15496 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15497 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15498 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15499 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15500 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15501 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15502 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15503 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15504 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15505 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15506 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15508 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15509 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15510 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15511 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15512 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15513 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15514 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15515 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15516 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15517 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15518 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15519 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15521 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15522 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15523 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15524 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15525 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15526 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15527 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15528 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15529 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15530 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15531 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15532 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15534 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15535 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15536 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15537 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15538 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15539 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15540 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15541 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15542 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15543 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15544 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15545 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15547 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15548 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15549 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15550 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15551 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15552 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15553 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15554 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15555 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15556 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15557 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15558 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15560 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15561 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15562 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15563 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15564 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15565 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15566 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15567 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15568 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15569 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15570 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15571 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15573 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15574 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15575 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15576 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15577 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15578 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15579 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15580 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15581 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15582 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15583 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15584 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15586 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15587 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15588 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15589 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15590 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15591 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15592 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15593 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15594 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15595 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15596 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15597 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15599 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15600 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15601 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15602 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15603 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15604 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15605 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15606 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15607 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15608 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15609 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15610 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15612 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15613 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15614 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15615 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15616 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15617 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15618 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15619 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15620 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15621 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15622 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15623 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15625 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15626 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15627 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15628 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15629 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15630 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15631 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15632 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15633 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15634 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15635 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15636 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15638 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15639 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15640 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15641 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15642 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15643 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15644 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15645 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15646 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15647 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15648 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15649 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15651 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15652 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15653 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15654 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15655 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15656 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15657 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15658 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15659 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15660 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15661 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15662 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15664 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15665 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15666 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15667 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15668 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15669 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15670 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15671 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15672 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15673 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15674 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15675 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15677 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15678 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15679 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15680 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15681 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15682 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15683 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15684 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15685 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15686 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15687 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15688 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15690 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15691 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15692 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15693 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15694 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15695 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15696 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15697 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15698 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15699 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15700 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15701 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15703 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15704 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15705 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15706 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15707 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15708 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15709 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15710 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15711 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15712 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15713 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15714 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15716 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15717 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15718 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15719 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15720 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15721 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15722 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15723 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15724 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15725 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15726 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15727 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15729 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15730 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15731 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15732 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15733 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15734 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15735 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15736 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15737 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15738 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15739 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15740 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15742 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15743 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15744 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15745 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15746 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15747 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15748 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15749 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15750 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15751 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15752 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15753 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15755 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15756 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15757 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15758 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15759 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15760 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15761 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15762 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15763 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15764 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15765 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15766 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15768 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15769 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15770 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15771 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15772 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15773 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15774 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15775 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15776 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15777 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15778 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15779 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15781 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15782 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15783 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15784 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15785 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15786 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15787 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15788 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15789 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15790 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15791 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15792 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15794 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15795 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15796 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15797 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15798 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15799 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15800 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15801 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15802 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15803 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15804 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15805 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15807 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15808 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15809 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15810 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15811 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15812 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15813 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15814 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15815 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15816 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15817 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15818 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15820 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15821 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15822 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15823 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15825 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15826 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15827 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15828 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15829 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15830 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15831 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15832 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15833 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15834 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15835 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15836 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15838 /* The implementation of the FIX instruction is broken on some
15839 assemblers, in that it accepts a precision specifier as well as a
15840 rounding specifier, despite the fact that this is meaningless.
15841 To be more compatible, we accept it as well, though of course it
15842 does not set any bits. */
15843 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15844 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15845 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15846 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15847 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15848 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15849 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15850 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15851 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15852 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15853 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15854 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15855 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15857 /* Instructions that were new with the real FPA, call them V2. */
15858 #undef ARM_VARIANT
15859 #define ARM_VARIANT &fpu_fpa_ext_v2
15860 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15861 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15862 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15863 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15864 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15865 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15867 #undef ARM_VARIANT
15868 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15869 /* Moves and type conversions. */
15870 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15871 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15872 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15873 cCE(fmstat, ef1fa10, 0, (), noargs),
15874 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15875 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15876 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15877 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15878 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15879 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15880 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15881 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15883 /* Memory operations. */
15884 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15885 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15886 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15887 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15888 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15889 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15890 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15891 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15892 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15893 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15894 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15895 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15896 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15897 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15898 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15899 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15900 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15901 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15903 /* Monadic operations. */
15904 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15905 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15906 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15908 /* Dyadic operations. */
15909 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15910 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15911 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15912 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15913 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15914 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15915 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15916 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15917 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15919 /* Comparisons. */
15920 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15921 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15922 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15923 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15925 #undef ARM_VARIANT
15926 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15927 /* Moves and type conversions. */
15928 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15929 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15930 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15931 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15932 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15933 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15934 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15935 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15936 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15937 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15938 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15939 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15940 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15942 /* Memory operations. */
15943 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15944 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15945 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15946 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15947 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15948 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15949 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15950 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15951 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15952 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15954 /* Monadic operations. */
15955 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15956 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15957 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15959 /* Dyadic operations. */
15960 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15961 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15962 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15963 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15964 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15965 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15966 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15967 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15968 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15970 /* Comparisons. */
15971 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15972 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15973 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15974 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15976 #undef ARM_VARIANT
15977 #define ARM_VARIANT &fpu_vfp_ext_v2
15978 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15979 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15980 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15981 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15983 /* Instructions which may belong to either the Neon or VFP instruction sets.
15984 Individual encoder functions perform additional architecture checks. */
15985 #undef ARM_VARIANT
15986 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15987 #undef THUMB_VARIANT
15988 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15989 /* These mnemonics are unique to VFP. */
15990 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15991 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15992 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15993 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15994 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15995 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15996 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15997 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15998 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15999 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
16001 /* Mnemonics shared by Neon and VFP. */
16002 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
16003 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16004 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16006 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16007 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16009 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16010 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16012 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16013 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16014 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16015 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16016 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16017 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16018 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16019 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16021 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
16022 nCEF(vcvtb, vcvt, 2, (RVS, RVS), neon_cvtb),
16023 nCEF(vcvtt, vcvt, 2, (RVS, RVS), neon_cvtt),
16026 /* NOTE: All VMOV encoding is special-cased! */
16027 NCE(vmov, 0, 1, (VMOV), neon_mov),
16028 NCE(vmovq, 0, 1, (VMOV), neon_mov),
16030 #undef THUMB_VARIANT
16031 #define THUMB_VARIANT &fpu_neon_ext_v1
16032 #undef ARM_VARIANT
16033 #define ARM_VARIANT &fpu_neon_ext_v1
16034 /* Data processing with three registers of the same length. */
16035 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
16036 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
16037 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
16038 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16039 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16040 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16041 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16042 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16043 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16044 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
16045 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16046 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16047 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16048 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16049 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16050 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16051 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16052 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16053 /* If not immediate, fall back to neon_dyadic_i64_su.
16054 shl_imm should accept I8 I16 I32 I64,
16055 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
16056 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
16057 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
16058 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
16059 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
16060 /* Logic ops, types optional & ignored. */
16061 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
16062 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
16063 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
16064 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
16065 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
16066 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
16067 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
16068 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
16069 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
16070 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
16071 /* Bitfield ops, untyped. */
16072 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16073 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16074 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16075 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16076 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16077 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16078 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
16079 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16080 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16081 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16082 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16083 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16084 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16085 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
16086 back to neon_dyadic_if_su. */
16087 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16088 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16089 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16090 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16091 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16092 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16093 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16094 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16095 /* Comparison. Type I8 I16 I32 F32. */
16096 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
16097 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
16098 /* As above, D registers only. */
16099 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16100 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16101 /* Int and float variants, signedness unimportant. */
16102 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16103 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16104 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
16105 /* Add/sub take types I8 I16 I32 I64 F32. */
16106 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16107 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16108 /* vtst takes sizes 8, 16, 32. */
16109 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
16110 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
16111 /* VMUL takes I8 I16 I32 F32 P8. */
16112 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
16113 /* VQD{R}MULH takes S16 S32. */
16114 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16115 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16116 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16117 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16118 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16119 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16120 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16121 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16122 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16123 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16124 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16125 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16126 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16127 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16128 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16129 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16131 /* Two address, int/float. Types S8 S16 S32 F32. */
16132 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
16133 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
16135 /* Data processing with two registers and a shift amount. */
16136 /* Right shifts, and variants with rounding.
16137 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16138 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16139 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16140 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16141 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16142 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16143 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16144 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16145 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16146 /* Shift and insert. Sizes accepted 8 16 32 64. */
16147 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
16148 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
16149 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
16150 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
16151 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16152 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
16153 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
16154 /* Right shift immediate, saturating & narrowing, with rounding variants.
16155 Types accepted S16 S32 S64 U16 U32 U64. */
16156 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16157 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16158 /* As above, unsigned. Types accepted S16 S32 S64. */
16159 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16160 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16161 /* Right shift narrowing. Types accepted I16 I32 I64. */
16162 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16163 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16164 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16165 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
16166 /* CVT with optional immediate for fixed-point variant. */
16167 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
16169 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
16170 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
16172 /* Data processing, three registers of different lengths. */
16173 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16174 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
16175 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
16176 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
16177 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
16178 /* If not scalar, fall back to neon_dyadic_long.
16179 Vector types as above, scalar types S16 S32 U16 U32. */
16180 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16181 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16182 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16183 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16184 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16185 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16186 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16187 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16188 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16189 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16190 /* Saturating doubling multiplies. Types S16 S32. */
16191 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16192 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16193 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16194 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16195 S16 S32 U16 U32. */
16196 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
16198 /* Extract. Size 8. */
16199 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
16200 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
16202 /* Two registers, miscellaneous. */
16203 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16204 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
16205 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
16206 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
16207 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
16208 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
16209 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
16210 /* Vector replicate. Sizes 8 16 32. */
16211 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
16212 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
16213 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16214 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
16215 /* VMOVN. Types I16 I32 I64. */
16216 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
16217 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16218 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
16219 /* VQMOVUN. Types S16 S32 S64. */
16220 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
16221 /* VZIP / VUZP. Sizes 8 16 32. */
16222 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
16223 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
16224 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
16225 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
16226 /* VQABS / VQNEG. Types S8 S16 S32. */
16227 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16228 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
16229 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16230 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
16231 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16232 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
16233 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
16234 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
16235 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
16236 /* Reciprocal estimates. Types U32 F32. */
16237 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16238 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16239 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16240 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16241 /* VCLS. Types S8 S16 S32. */
16242 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16243 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16244 /* VCLZ. Types I8 I16 I32. */
16245 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16246 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16247 /* VCNT. Size 8. */
16248 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16249 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16250 /* Two address, untyped. */
16251 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16252 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16253 /* VTRN. Sizes 8 16 32. */
16254 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16255 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16257 /* Table lookup. Size 8. */
16258 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16259 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16261 #undef THUMB_VARIANT
16262 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16263 #undef ARM_VARIANT
16264 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16265 /* Neon element/structure load/store. */
16266 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16267 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16268 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16269 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16270 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16271 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16272 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16273 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16275 #undef THUMB_VARIANT
16276 #define THUMB_VARIANT &fpu_vfp_ext_v3
16277 #undef ARM_VARIANT
16278 #define ARM_VARIANT &fpu_vfp_ext_v3
16279 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16280 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16281 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16282 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16283 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16284 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16285 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16286 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16287 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16288 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16289 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16290 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16291 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16292 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16293 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16294 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16295 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16296 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16298 #undef THUMB_VARIANT
16299 #undef ARM_VARIANT
16300 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16301 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16302 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16303 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16304 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16305 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16306 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16307 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16308 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16310 #undef ARM_VARIANT
16311 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16312 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16313 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16314 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16315 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16316 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16317 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16318 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16319 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16320 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16321 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16322 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16323 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16324 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16325 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16326 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16327 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16328 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16329 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16330 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16331 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16332 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16333 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16334 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16335 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16336 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16337 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16338 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16339 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16340 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16341 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16342 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16343 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16344 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16345 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16346 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16347 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16348 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16349 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16350 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16351 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16352 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16353 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16354 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16355 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16356 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16357 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16358 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16359 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16360 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16361 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16362 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16363 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16364 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16365 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16366 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16367 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16368 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16369 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16370 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16371 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16372 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16373 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16374 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16375 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16376 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16377 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16378 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16379 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16380 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16381 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16382 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16383 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16384 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16385 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16386 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16387 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16388 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16389 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16390 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16391 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16392 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16393 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16394 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16395 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16396 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16397 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16398 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16399 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16400 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16401 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16402 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16403 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16404 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16405 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16406 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16407 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16408 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16409 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16410 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16411 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16412 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16413 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16414 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16415 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16416 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16417 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16418 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16419 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16420 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16421 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16422 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16423 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16424 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16425 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16426 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16427 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16428 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16429 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16430 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16431 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16432 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16433 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16434 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16435 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16436 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16437 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16438 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16439 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16440 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16441 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16442 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16443 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16444 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16445 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16446 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16447 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16448 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16449 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16450 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16451 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16452 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16453 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16454 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16455 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16456 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16457 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16458 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16459 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16460 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16461 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16462 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16463 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16464 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16465 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16466 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16467 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16468 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16469 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16470 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16471 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16472 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16473 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16475 #undef ARM_VARIANT
16476 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16477 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16478 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16479 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16480 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16481 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16482 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16483 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16484 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16485 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16486 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16487 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16488 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16489 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16490 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16491 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16492 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16493 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16494 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16495 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16496 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16497 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16498 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16499 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16500 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16501 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16502 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16503 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16504 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16505 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16506 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16507 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16508 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16509 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16510 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16511 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16512 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16513 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16514 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16515 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16516 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16517 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16518 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16519 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16520 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16521 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16522 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16523 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16524 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16525 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16526 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16527 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16528 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16529 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16530 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16531 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16532 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16533 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16535 #undef ARM_VARIANT
16536 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16537 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16538 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16539 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16540 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16541 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16542 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16543 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16544 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16545 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16546 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16547 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16548 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16549 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16550 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16551 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16552 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16553 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16554 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16555 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16556 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16557 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16558 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16559 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16560 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16561 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16562 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16563 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16564 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16565 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16566 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16567 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16568 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16569 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16570 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16571 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16572 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16573 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16574 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16575 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16576 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16577 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16578 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16579 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16580 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16581 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16582 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16583 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16584 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16585 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16586 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16587 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16588 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16589 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16590 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16591 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16592 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16593 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16594 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16595 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16596 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16597 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16598 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16599 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16600 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16601 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16602 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16603 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16604 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16605 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16606 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16607 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16608 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16609 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16610 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16611 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16612 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16614 #undef ARM_VARIANT
16615 #undef THUMB_VARIANT
16616 #undef TCE
16617 #undef TCM
16618 #undef TUE
16619 #undef TUF
16620 #undef TCC
16621 #undef cCE
16622 #undef cCL
16623 #undef C3E
16624 #undef CE
16625 #undef CM
16626 #undef UE
16627 #undef UF
16628 #undef UT
16629 #undef NUF
16630 #undef nUF
16631 #undef NCE
16632 #undef nCE
16633 #undef OPS0
16634 #undef OPS1
16635 #undef OPS2
16636 #undef OPS3
16637 #undef OPS4
16638 #undef OPS5
16639 #undef OPS6
16640 #undef do_0
16642 /* MD interface: bits in the object file. */
16644 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16645 for use in the a.out file, and stores them in the array pointed to by buf.
16646 This knows about the endian-ness of the target machine and does
16647 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16648 2 (short) and 4 (long) Floating numbers are put out as a series of
16649 LITTLENUMS (shorts, here at least). */
16651 void
16652 md_number_to_chars (char * buf, valueT val, int n)
16654 if (target_big_endian)
16655 number_to_chars_bigendian (buf, val, n);
16656 else
16657 number_to_chars_littleendian (buf, val, n);
16660 static valueT
16661 md_chars_to_number (char * buf, int n)
16663 valueT result = 0;
16664 unsigned char * where = (unsigned char *) buf;
16666 if (target_big_endian)
16668 while (n--)
16670 result <<= 8;
16671 result |= (*where++ & 255);
16674 else
16676 while (n--)
16678 result <<= 8;
16679 result |= (where[n] & 255);
16683 return result;
16686 /* MD interface: Sections. */
16688 /* Estimate the size of a frag before relaxing. Assume everything fits in
16689 2 bytes. */
16692 md_estimate_size_before_relax (fragS * fragp,
16693 segT segtype ATTRIBUTE_UNUSED)
16695 fragp->fr_var = 2;
16696 return 2;
16699 /* Convert a machine dependent frag. */
16701 void
16702 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16704 unsigned long insn;
16705 unsigned long old_op;
16706 char *buf;
16707 expressionS exp;
16708 fixS *fixp;
16709 int reloc_type;
16710 int pc_rel;
16711 int opcode;
16713 buf = fragp->fr_literal + fragp->fr_fix;
16715 old_op = bfd_get_16(abfd, buf);
16716 if (fragp->fr_symbol)
16718 exp.X_op = O_symbol;
16719 exp.X_add_symbol = fragp->fr_symbol;
16721 else
16723 exp.X_op = O_constant;
16725 exp.X_add_number = fragp->fr_offset;
16726 opcode = fragp->fr_subtype;
16727 switch (opcode)
16729 case T_MNEM_ldr_pc:
16730 case T_MNEM_ldr_pc2:
16731 case T_MNEM_ldr_sp:
16732 case T_MNEM_str_sp:
16733 case T_MNEM_ldr:
16734 case T_MNEM_ldrb:
16735 case T_MNEM_ldrh:
16736 case T_MNEM_str:
16737 case T_MNEM_strb:
16738 case T_MNEM_strh:
16739 if (fragp->fr_var == 4)
16741 insn = THUMB_OP32 (opcode);
16742 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16744 insn |= (old_op & 0x700) << 4;
16746 else
16748 insn |= (old_op & 7) << 12;
16749 insn |= (old_op & 0x38) << 13;
16751 insn |= 0x00000c00;
16752 put_thumb32_insn (buf, insn);
16753 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16755 else
16757 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16759 pc_rel = (opcode == T_MNEM_ldr_pc2);
16760 break;
16761 case T_MNEM_adr:
16762 if (fragp->fr_var == 4)
16764 insn = THUMB_OP32 (opcode);
16765 insn |= (old_op & 0xf0) << 4;
16766 put_thumb32_insn (buf, insn);
16767 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16769 else
16771 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16772 exp.X_add_number -= 4;
16774 pc_rel = 1;
16775 break;
16776 case T_MNEM_mov:
16777 case T_MNEM_movs:
16778 case T_MNEM_cmp:
16779 case T_MNEM_cmn:
16780 if (fragp->fr_var == 4)
16782 int r0off = (opcode == T_MNEM_mov
16783 || opcode == T_MNEM_movs) ? 0 : 8;
16784 insn = THUMB_OP32 (opcode);
16785 insn = (insn & 0xe1ffffff) | 0x10000000;
16786 insn |= (old_op & 0x700) << r0off;
16787 put_thumb32_insn (buf, insn);
16788 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16790 else
16792 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16794 pc_rel = 0;
16795 break;
16796 case T_MNEM_b:
16797 if (fragp->fr_var == 4)
16799 insn = THUMB_OP32(opcode);
16800 put_thumb32_insn (buf, insn);
16801 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16803 else
16804 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16805 pc_rel = 1;
16806 break;
16807 case T_MNEM_bcond:
16808 if (fragp->fr_var == 4)
16810 insn = THUMB_OP32(opcode);
16811 insn |= (old_op & 0xf00) << 14;
16812 put_thumb32_insn (buf, insn);
16813 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16815 else
16816 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16817 pc_rel = 1;
16818 break;
16819 case T_MNEM_add_sp:
16820 case T_MNEM_add_pc:
16821 case T_MNEM_inc_sp:
16822 case T_MNEM_dec_sp:
16823 if (fragp->fr_var == 4)
16825 /* ??? Choose between add and addw. */
16826 insn = THUMB_OP32 (opcode);
16827 insn |= (old_op & 0xf0) << 4;
16828 put_thumb32_insn (buf, insn);
16829 if (opcode == T_MNEM_add_pc)
16830 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16831 else
16832 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16834 else
16835 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16836 pc_rel = 0;
16837 break;
16839 case T_MNEM_addi:
16840 case T_MNEM_addis:
16841 case T_MNEM_subi:
16842 case T_MNEM_subis:
16843 if (fragp->fr_var == 4)
16845 insn = THUMB_OP32 (opcode);
16846 insn |= (old_op & 0xf0) << 4;
16847 insn |= (old_op & 0xf) << 16;
16848 put_thumb32_insn (buf, insn);
16849 if (insn & (1 << 20))
16850 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16851 else
16852 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16854 else
16855 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16856 pc_rel = 0;
16857 break;
16858 default:
16859 abort ();
16861 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16862 reloc_type);
16863 fixp->fx_file = fragp->fr_file;
16864 fixp->fx_line = fragp->fr_line;
16865 fragp->fr_fix += fragp->fr_var;
16868 /* Return the size of a relaxable immediate operand instruction.
16869 SHIFT and SIZE specify the form of the allowable immediate. */
16870 static int
16871 relax_immediate (fragS *fragp, int size, int shift)
16873 offsetT offset;
16874 offsetT mask;
16875 offsetT low;
16877 /* ??? Should be able to do better than this. */
16878 if (fragp->fr_symbol)
16879 return 4;
16881 low = (1 << shift) - 1;
16882 mask = (1 << (shift + size)) - (1 << shift);
16883 offset = fragp->fr_offset;
16884 /* Force misaligned offsets to 32-bit variant. */
16885 if (offset & low)
16886 return 4;
16887 if (offset & ~mask)
16888 return 4;
16889 return 2;
16892 /* Get the address of a symbol during relaxation. */
16893 static addressT
16894 relaxed_symbol_addr (fragS *fragp, long stretch)
16896 fragS *sym_frag;
16897 addressT addr;
16898 symbolS *sym;
16900 sym = fragp->fr_symbol;
16901 sym_frag = symbol_get_frag (sym);
16902 know (S_GET_SEGMENT (sym) != absolute_section
16903 || sym_frag == &zero_address_frag);
16904 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16906 /* If frag has yet to be reached on this pass, assume it will
16907 move by STRETCH just as we did. If this is not so, it will
16908 be because some frag between grows, and that will force
16909 another pass. */
16911 if (stretch != 0
16912 && sym_frag->relax_marker != fragp->relax_marker)
16914 fragS *f;
16916 /* Adjust stretch for any alignment frag. Note that if have
16917 been expanding the earlier code, the symbol may be
16918 defined in what appears to be an earlier frag. FIXME:
16919 This doesn't handle the fr_subtype field, which specifies
16920 a maximum number of bytes to skip when doing an
16921 alignment. */
16922 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
16924 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
16926 if (stretch < 0)
16927 stretch = - ((- stretch)
16928 & ~ ((1 << (int) f->fr_offset) - 1));
16929 else
16930 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
16931 if (stretch == 0)
16932 break;
16935 if (f != NULL)
16936 addr += stretch;
16939 return addr;
16942 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16943 load. */
16944 static int
16945 relax_adr (fragS *fragp, asection *sec, long stretch)
16947 addressT addr;
16948 offsetT val;
16950 /* Assume worst case for symbols not known to be in the same section. */
16951 if (!S_IS_DEFINED (fragp->fr_symbol)
16952 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16953 return 4;
16955 val = relaxed_symbol_addr (fragp, stretch);
16956 addr = fragp->fr_address + fragp->fr_fix;
16957 addr = (addr + 4) & ~3;
16958 /* Force misaligned targets to 32-bit variant. */
16959 if (val & 3)
16960 return 4;
16961 val -= addr;
16962 if (val < 0 || val > 1020)
16963 return 4;
16964 return 2;
16967 /* Return the size of a relaxable add/sub immediate instruction. */
16968 static int
16969 relax_addsub (fragS *fragp, asection *sec)
16971 char *buf;
16972 int op;
16974 buf = fragp->fr_literal + fragp->fr_fix;
16975 op = bfd_get_16(sec->owner, buf);
16976 if ((op & 0xf) == ((op >> 4) & 0xf))
16977 return relax_immediate (fragp, 8, 0);
16978 else
16979 return relax_immediate (fragp, 3, 0);
16983 /* Return the size of a relaxable branch instruction. BITS is the
16984 size of the offset field in the narrow instruction. */
16986 static int
16987 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16989 addressT addr;
16990 offsetT val;
16991 offsetT limit;
16993 /* Assume worst case for symbols not known to be in the same section. */
16994 if (!S_IS_DEFINED (fragp->fr_symbol)
16995 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16996 return 4;
16998 val = relaxed_symbol_addr (fragp, stretch);
16999 addr = fragp->fr_address + fragp->fr_fix + 4;
17000 val -= addr;
17002 /* Offset is a signed value *2 */
17003 limit = 1 << bits;
17004 if (val >= limit || val < -limit)
17005 return 4;
17006 return 2;
17010 /* Relax a machine dependent frag. This returns the amount by which
17011 the current size of the frag should change. */
17014 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
17016 int oldsize;
17017 int newsize;
17019 oldsize = fragp->fr_var;
17020 switch (fragp->fr_subtype)
17022 case T_MNEM_ldr_pc2:
17023 newsize = relax_adr (fragp, sec, stretch);
17024 break;
17025 case T_MNEM_ldr_pc:
17026 case T_MNEM_ldr_sp:
17027 case T_MNEM_str_sp:
17028 newsize = relax_immediate (fragp, 8, 2);
17029 break;
17030 case T_MNEM_ldr:
17031 case T_MNEM_str:
17032 newsize = relax_immediate (fragp, 5, 2);
17033 break;
17034 case T_MNEM_ldrh:
17035 case T_MNEM_strh:
17036 newsize = relax_immediate (fragp, 5, 1);
17037 break;
17038 case T_MNEM_ldrb:
17039 case T_MNEM_strb:
17040 newsize = relax_immediate (fragp, 5, 0);
17041 break;
17042 case T_MNEM_adr:
17043 newsize = relax_adr (fragp, sec, stretch);
17044 break;
17045 case T_MNEM_mov:
17046 case T_MNEM_movs:
17047 case T_MNEM_cmp:
17048 case T_MNEM_cmn:
17049 newsize = relax_immediate (fragp, 8, 0);
17050 break;
17051 case T_MNEM_b:
17052 newsize = relax_branch (fragp, sec, 11, stretch);
17053 break;
17054 case T_MNEM_bcond:
17055 newsize = relax_branch (fragp, sec, 8, stretch);
17056 break;
17057 case T_MNEM_add_sp:
17058 case T_MNEM_add_pc:
17059 newsize = relax_immediate (fragp, 8, 2);
17060 break;
17061 case T_MNEM_inc_sp:
17062 case T_MNEM_dec_sp:
17063 newsize = relax_immediate (fragp, 7, 2);
17064 break;
17065 case T_MNEM_addi:
17066 case T_MNEM_addis:
17067 case T_MNEM_subi:
17068 case T_MNEM_subis:
17069 newsize = relax_addsub (fragp, sec);
17070 break;
17071 default:
17072 abort ();
17075 fragp->fr_var = newsize;
17076 /* Freeze wide instructions that are at or before the same location as
17077 in the previous pass. This avoids infinite loops.
17078 Don't freeze them unconditionally because targets may be artificially
17079 misaligned by the expansion of preceding frags. */
17080 if (stretch <= 0 && newsize > 2)
17082 md_convert_frag (sec->owner, sec, fragp);
17083 frag_wane (fragp);
17086 return newsize - oldsize;
17089 /* Round up a section size to the appropriate boundary. */
17091 valueT
17092 md_section_align (segT segment ATTRIBUTE_UNUSED,
17093 valueT size)
17095 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
17096 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
17098 /* For a.out, force the section size to be aligned. If we don't do
17099 this, BFD will align it for us, but it will not write out the
17100 final bytes of the section. This may be a bug in BFD, but it is
17101 easier to fix it here since that is how the other a.out targets
17102 work. */
17103 int align;
17105 align = bfd_get_section_alignment (stdoutput, segment);
17106 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
17108 #endif
17110 return size;
17113 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
17114 of an rs_align_code fragment. */
17116 void
17117 arm_handle_align (fragS * fragP)
17119 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
17120 static char const thumb_noop[2] = { 0xc0, 0x46 };
17121 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
17122 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
17124 int bytes, fix, noop_size;
17125 char * p;
17126 const char * noop;
17128 if (fragP->fr_type != rs_align_code)
17129 return;
17131 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
17132 p = fragP->fr_literal + fragP->fr_fix;
17133 fix = 0;
17135 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
17136 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
17138 if (fragP->tc_frag_data)
17140 if (target_big_endian)
17141 noop = thumb_bigend_noop;
17142 else
17143 noop = thumb_noop;
17144 noop_size = sizeof (thumb_noop);
17146 else
17148 if (target_big_endian)
17149 noop = arm_bigend_noop;
17150 else
17151 noop = arm_noop;
17152 noop_size = sizeof (arm_noop);
17155 if (bytes & (noop_size - 1))
17157 fix = bytes & (noop_size - 1);
17158 memset (p, 0, fix);
17159 p += fix;
17160 bytes -= fix;
17163 while (bytes >= noop_size)
17165 memcpy (p, noop, noop_size);
17166 p += noop_size;
17167 bytes -= noop_size;
17168 fix += noop_size;
17171 fragP->fr_fix += fix;
17172 fragP->fr_var = noop_size;
17175 /* Called from md_do_align. Used to create an alignment
17176 frag in a code section. */
17178 void
17179 arm_frag_align_code (int n, int max)
17181 char * p;
17183 /* We assume that there will never be a requirement
17184 to support alignments greater than 32 bytes. */
17185 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
17186 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17188 p = frag_var (rs_align_code,
17189 MAX_MEM_FOR_RS_ALIGN_CODE,
17191 (relax_substateT) max,
17192 (symbolS *) NULL,
17193 (offsetT) n,
17194 (char *) NULL);
17195 *p = 0;
17198 /* Perform target specific initialisation of a frag. */
17200 void
17201 arm_init_frag (fragS * fragP)
17203 /* Record whether this frag is in an ARM or a THUMB area. */
17204 fragP->tc_frag_data = thumb_mode;
17207 #ifdef OBJ_ELF
17208 /* When we change sections we need to issue a new mapping symbol. */
17210 void
17211 arm_elf_change_section (void)
17213 flagword flags;
17214 segment_info_type *seginfo;
17216 /* Link an unlinked unwind index table section to the .text section. */
17217 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
17218 && elf_linked_to_section (now_seg) == NULL)
17219 elf_linked_to_section (now_seg) = text_section;
17221 if (!SEG_NORMAL (now_seg))
17222 return;
17224 flags = bfd_get_section_flags (stdoutput, now_seg);
17226 /* We can ignore sections that only contain debug info. */
17227 if ((flags & SEC_ALLOC) == 0)
17228 return;
17230 seginfo = seg_info (now_seg);
17231 mapstate = seginfo->tc_segment_info_data.mapstate;
17232 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
17236 arm_elf_section_type (const char * str, size_t len)
17238 if (len == 5 && strncmp (str, "exidx", 5) == 0)
17239 return SHT_ARM_EXIDX;
17241 return -1;
17244 /* Code to deal with unwinding tables. */
17246 static void add_unwind_adjustsp (offsetT);
17248 /* Generate any deferred unwind frame offset. */
17250 static void
17251 flush_pending_unwind (void)
17253 offsetT offset;
17255 offset = unwind.pending_offset;
17256 unwind.pending_offset = 0;
17257 if (offset != 0)
17258 add_unwind_adjustsp (offset);
17261 /* Add an opcode to this list for this function. Two-byte opcodes should
17262 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17263 order. */
17265 static void
17266 add_unwind_opcode (valueT op, int length)
17268 /* Add any deferred stack adjustment. */
17269 if (unwind.pending_offset)
17270 flush_pending_unwind ();
17272 unwind.sp_restored = 0;
17274 if (unwind.opcode_count + length > unwind.opcode_alloc)
17276 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17277 if (unwind.opcodes)
17278 unwind.opcodes = xrealloc (unwind.opcodes,
17279 unwind.opcode_alloc);
17280 else
17281 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17283 while (length > 0)
17285 length--;
17286 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17287 op >>= 8;
17288 unwind.opcode_count++;
17292 /* Add unwind opcodes to adjust the stack pointer. */
17294 static void
17295 add_unwind_adjustsp (offsetT offset)
17297 valueT op;
17299 if (offset > 0x200)
17301 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17302 char bytes[5];
17303 int n;
17304 valueT o;
17306 /* Long form: 0xb2, uleb128. */
17307 /* This might not fit in a word so add the individual bytes,
17308 remembering the list is built in reverse order. */
17309 o = (valueT) ((offset - 0x204) >> 2);
17310 if (o == 0)
17311 add_unwind_opcode (0, 1);
17313 /* Calculate the uleb128 encoding of the offset. */
17314 n = 0;
17315 while (o)
17317 bytes[n] = o & 0x7f;
17318 o >>= 7;
17319 if (o)
17320 bytes[n] |= 0x80;
17321 n++;
17323 /* Add the insn. */
17324 for (; n; n--)
17325 add_unwind_opcode (bytes[n - 1], 1);
17326 add_unwind_opcode (0xb2, 1);
17328 else if (offset > 0x100)
17330 /* Two short opcodes. */
17331 add_unwind_opcode (0x3f, 1);
17332 op = (offset - 0x104) >> 2;
17333 add_unwind_opcode (op, 1);
17335 else if (offset > 0)
17337 /* Short opcode. */
17338 op = (offset - 4) >> 2;
17339 add_unwind_opcode (op, 1);
17341 else if (offset < 0)
17343 offset = -offset;
17344 while (offset > 0x100)
17346 add_unwind_opcode (0x7f, 1);
17347 offset -= 0x100;
17349 op = ((offset - 4) >> 2) | 0x40;
17350 add_unwind_opcode (op, 1);
17354 /* Finish the list of unwind opcodes for this function. */
17355 static void
17356 finish_unwind_opcodes (void)
17358 valueT op;
17360 if (unwind.fp_used)
17362 /* Adjust sp as necessary. */
17363 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17364 flush_pending_unwind ();
17366 /* After restoring sp from the frame pointer. */
17367 op = 0x90 | unwind.fp_reg;
17368 add_unwind_opcode (op, 1);
17370 else
17371 flush_pending_unwind ();
17375 /* Start an exception table entry. If idx is nonzero this is an index table
17376 entry. */
17378 static void
17379 start_unwind_section (const segT text_seg, int idx)
17381 const char * text_name;
17382 const char * prefix;
17383 const char * prefix_once;
17384 const char * group_name;
17385 size_t prefix_len;
17386 size_t text_len;
17387 char * sec_name;
17388 size_t sec_name_len;
17389 int type;
17390 int flags;
17391 int linkonce;
17393 if (idx)
17395 prefix = ELF_STRING_ARM_unwind;
17396 prefix_once = ELF_STRING_ARM_unwind_once;
17397 type = SHT_ARM_EXIDX;
17399 else
17401 prefix = ELF_STRING_ARM_unwind_info;
17402 prefix_once = ELF_STRING_ARM_unwind_info_once;
17403 type = SHT_PROGBITS;
17406 text_name = segment_name (text_seg);
17407 if (streq (text_name, ".text"))
17408 text_name = "";
17410 if (strncmp (text_name, ".gnu.linkonce.t.",
17411 strlen (".gnu.linkonce.t.")) == 0)
17413 prefix = prefix_once;
17414 text_name += strlen (".gnu.linkonce.t.");
17417 prefix_len = strlen (prefix);
17418 text_len = strlen (text_name);
17419 sec_name_len = prefix_len + text_len;
17420 sec_name = xmalloc (sec_name_len + 1);
17421 memcpy (sec_name, prefix, prefix_len);
17422 memcpy (sec_name + prefix_len, text_name, text_len);
17423 sec_name[prefix_len + text_len] = '\0';
17425 flags = SHF_ALLOC;
17426 linkonce = 0;
17427 group_name = 0;
17429 /* Handle COMDAT group. */
17430 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17432 group_name = elf_group_name (text_seg);
17433 if (group_name == NULL)
17435 as_bad (_("Group section `%s' has no group signature"),
17436 segment_name (text_seg));
17437 ignore_rest_of_line ();
17438 return;
17440 flags |= SHF_GROUP;
17441 linkonce = 1;
17444 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17446 /* Set the section link for index tables. */
17447 if (idx)
17448 elf_linked_to_section (now_seg) = text_seg;
17452 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17453 personality routine data. Returns zero, or the index table value for
17454 and inline entry. */
17456 static valueT
17457 create_unwind_entry (int have_data)
17459 int size;
17460 addressT where;
17461 char *ptr;
17462 /* The current word of data. */
17463 valueT data;
17464 /* The number of bytes left in this word. */
17465 int n;
17467 finish_unwind_opcodes ();
17469 /* Remember the current text section. */
17470 unwind.saved_seg = now_seg;
17471 unwind.saved_subseg = now_subseg;
17473 start_unwind_section (now_seg, 0);
17475 if (unwind.personality_routine == NULL)
17477 if (unwind.personality_index == -2)
17479 if (have_data)
17480 as_bad (_("handlerdata in cantunwind frame"));
17481 return 1; /* EXIDX_CANTUNWIND. */
17484 /* Use a default personality routine if none is specified. */
17485 if (unwind.personality_index == -1)
17487 if (unwind.opcode_count > 3)
17488 unwind.personality_index = 1;
17489 else
17490 unwind.personality_index = 0;
17493 /* Space for the personality routine entry. */
17494 if (unwind.personality_index == 0)
17496 if (unwind.opcode_count > 3)
17497 as_bad (_("too many unwind opcodes for personality routine 0"));
17499 if (!have_data)
17501 /* All the data is inline in the index table. */
17502 data = 0x80;
17503 n = 3;
17504 while (unwind.opcode_count > 0)
17506 unwind.opcode_count--;
17507 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17508 n--;
17511 /* Pad with "finish" opcodes. */
17512 while (n--)
17513 data = (data << 8) | 0xb0;
17515 return data;
17517 size = 0;
17519 else
17520 /* We get two opcodes "free" in the first word. */
17521 size = unwind.opcode_count - 2;
17523 else
17524 /* An extra byte is required for the opcode count. */
17525 size = unwind.opcode_count + 1;
17527 size = (size + 3) >> 2;
17528 if (size > 0xff)
17529 as_bad (_("too many unwind opcodes"));
17531 frag_align (2, 0, 0);
17532 record_alignment (now_seg, 2);
17533 unwind.table_entry = expr_build_dot ();
17535 /* Allocate the table entry. */
17536 ptr = frag_more ((size << 2) + 4);
17537 where = frag_now_fix () - ((size << 2) + 4);
17539 switch (unwind.personality_index)
17541 case -1:
17542 /* ??? Should this be a PLT generating relocation? */
17543 /* Custom personality routine. */
17544 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17545 BFD_RELOC_ARM_PREL31);
17547 where += 4;
17548 ptr += 4;
17550 /* Set the first byte to the number of additional words. */
17551 data = size - 1;
17552 n = 3;
17553 break;
17555 /* ABI defined personality routines. */
17556 case 0:
17557 /* Three opcodes bytes are packed into the first word. */
17558 data = 0x80;
17559 n = 3;
17560 break;
17562 case 1:
17563 case 2:
17564 /* The size and first two opcode bytes go in the first word. */
17565 data = ((0x80 + unwind.personality_index) << 8) | size;
17566 n = 2;
17567 break;
17569 default:
17570 /* Should never happen. */
17571 abort ();
17574 /* Pack the opcodes into words (MSB first), reversing the list at the same
17575 time. */
17576 while (unwind.opcode_count > 0)
17578 if (n == 0)
17580 md_number_to_chars (ptr, data, 4);
17581 ptr += 4;
17582 n = 4;
17583 data = 0;
17585 unwind.opcode_count--;
17586 n--;
17587 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17590 /* Finish off the last word. */
17591 if (n < 4)
17593 /* Pad with "finish" opcodes. */
17594 while (n--)
17595 data = (data << 8) | 0xb0;
17597 md_number_to_chars (ptr, data, 4);
17600 if (!have_data)
17602 /* Add an empty descriptor if there is no user-specified data. */
17603 ptr = frag_more (4);
17604 md_number_to_chars (ptr, 0, 4);
17607 return 0;
17611 /* Initialize the DWARF-2 unwind information for this procedure. */
17613 void
17614 tc_arm_frame_initial_instructions (void)
17616 cfi_add_CFA_def_cfa (REG_SP, 0);
17618 #endif /* OBJ_ELF */
17620 /* Convert REGNAME to a DWARF-2 register number. */
17623 tc_arm_regname_to_dw2regnum (char *regname)
17625 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17627 if (reg == FAIL)
17628 return -1;
17630 return reg;
17633 #ifdef TE_PE
17634 void
17635 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17637 expressionS expr;
17639 expr.X_op = O_secrel;
17640 expr.X_add_symbol = symbol;
17641 expr.X_add_number = 0;
17642 emit_expr (&expr, size);
17644 #endif
17646 /* MD interface: Symbol and relocation handling. */
17648 /* Return the address within the segment that a PC-relative fixup is
17649 relative to. For ARM, PC-relative fixups applied to instructions
17650 are generally relative to the location of the fixup plus 8 bytes.
17651 Thumb branches are offset by 4, and Thumb loads relative to PC
17652 require special handling. */
17654 long
17655 md_pcrel_from_section (fixS * fixP, segT seg)
17657 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17659 /* If this is pc-relative and we are going to emit a relocation
17660 then we just want to put out any pipeline compensation that the linker
17661 will need. Otherwise we want to use the calculated base.
17662 For WinCE we skip the bias for externals as well, since this
17663 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17664 if (fixP->fx_pcrel
17665 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17666 || (arm_force_relocation (fixP)
17667 #ifdef TE_WINCE
17668 && !S_IS_EXTERNAL (fixP->fx_addsy)
17669 #endif
17671 base = 0;
17673 switch (fixP->fx_r_type)
17675 /* PC relative addressing on the Thumb is slightly odd as the
17676 bottom two bits of the PC are forced to zero for the
17677 calculation. This happens *after* application of the
17678 pipeline offset. However, Thumb adrl already adjusts for
17679 this, so we need not do it again. */
17680 case BFD_RELOC_ARM_THUMB_ADD:
17681 return base & ~3;
17683 case BFD_RELOC_ARM_THUMB_OFFSET:
17684 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17685 case BFD_RELOC_ARM_T32_ADD_PC12:
17686 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17687 return (base + 4) & ~3;
17689 /* Thumb branches are simply offset by +4. */
17690 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17691 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17692 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17693 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17694 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17695 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17696 case BFD_RELOC_THUMB_PCREL_BLX:
17697 return base + 4;
17699 /* ARM mode branches are offset by +8. However, the Windows CE
17700 loader expects the relocation not to take this into account. */
17701 case BFD_RELOC_ARM_PCREL_BRANCH:
17702 case BFD_RELOC_ARM_PCREL_CALL:
17703 case BFD_RELOC_ARM_PCREL_JUMP:
17704 case BFD_RELOC_ARM_PCREL_BLX:
17705 case BFD_RELOC_ARM_PLT32:
17706 #ifdef TE_WINCE
17707 /* When handling fixups immediately, because we have already
17708 discovered the value of a symbol, or the address of the frag involved
17709 we must account for the offset by +8, as the OS loader will never see the reloc.
17710 see fixup_segment() in write.c
17711 The S_IS_EXTERNAL test handles the case of global symbols.
17712 Those need the calculated base, not just the pipe compensation the linker will need. */
17713 if (fixP->fx_pcrel
17714 && fixP->fx_addsy != NULL
17715 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17716 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17717 return base + 8;
17718 return base;
17719 #else
17720 return base + 8;
17721 #endif
17723 /* ARM mode loads relative to PC are also offset by +8. Unlike
17724 branches, the Windows CE loader *does* expect the relocation
17725 to take this into account. */
17726 case BFD_RELOC_ARM_OFFSET_IMM:
17727 case BFD_RELOC_ARM_OFFSET_IMM8:
17728 case BFD_RELOC_ARM_HWLITERAL:
17729 case BFD_RELOC_ARM_LITERAL:
17730 case BFD_RELOC_ARM_CP_OFF_IMM:
17731 return base + 8;
17734 /* Other PC-relative relocations are un-offset. */
17735 default:
17736 return base;
17740 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17741 Otherwise we have no need to default values of symbols. */
17743 symbolS *
17744 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17746 #ifdef OBJ_ELF
17747 if (name[0] == '_' && name[1] == 'G'
17748 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17750 if (!GOT_symbol)
17752 if (symbol_find (name))
17753 as_bad (_("GOT already in the symbol table"));
17755 GOT_symbol = symbol_new (name, undefined_section,
17756 (valueT) 0, & zero_address_frag);
17759 return GOT_symbol;
17761 #endif
17763 return 0;
17766 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17767 computed as two separate immediate values, added together. We
17768 already know that this value cannot be computed by just one ARM
17769 instruction. */
17771 static unsigned int
17772 validate_immediate_twopart (unsigned int val,
17773 unsigned int * highpart)
17775 unsigned int a;
17776 unsigned int i;
17778 for (i = 0; i < 32; i += 2)
17779 if (((a = rotate_left (val, i)) & 0xff) != 0)
17781 if (a & 0xff00)
17783 if (a & ~ 0xffff)
17784 continue;
17785 * highpart = (a >> 8) | ((i + 24) << 7);
17787 else if (a & 0xff0000)
17789 if (a & 0xff000000)
17790 continue;
17791 * highpart = (a >> 16) | ((i + 16) << 7);
17793 else
17795 assert (a & 0xff000000);
17796 * highpart = (a >> 24) | ((i + 8) << 7);
17799 return (a & 0xff) | (i << 7);
17802 return FAIL;
17805 static int
17806 validate_offset_imm (unsigned int val, int hwse)
17808 if ((hwse && val > 255) || val > 4095)
17809 return FAIL;
17810 return val;
17813 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17814 negative immediate constant by altering the instruction. A bit of
17815 a hack really.
17816 MOV <-> MVN
17817 AND <-> BIC
17818 ADC <-> SBC
17819 by inverting the second operand, and
17820 ADD <-> SUB
17821 CMP <-> CMN
17822 by negating the second operand. */
17824 static int
17825 negate_data_op (unsigned long * instruction,
17826 unsigned long value)
17828 int op, new_inst;
17829 unsigned long negated, inverted;
17831 negated = encode_arm_immediate (-value);
17832 inverted = encode_arm_immediate (~value);
17834 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17835 switch (op)
17837 /* First negates. */
17838 case OPCODE_SUB: /* ADD <-> SUB */
17839 new_inst = OPCODE_ADD;
17840 value = negated;
17841 break;
17843 case OPCODE_ADD:
17844 new_inst = OPCODE_SUB;
17845 value = negated;
17846 break;
17848 case OPCODE_CMP: /* CMP <-> CMN */
17849 new_inst = OPCODE_CMN;
17850 value = negated;
17851 break;
17853 case OPCODE_CMN:
17854 new_inst = OPCODE_CMP;
17855 value = negated;
17856 break;
17858 /* Now Inverted ops. */
17859 case OPCODE_MOV: /* MOV <-> MVN */
17860 new_inst = OPCODE_MVN;
17861 value = inverted;
17862 break;
17864 case OPCODE_MVN:
17865 new_inst = OPCODE_MOV;
17866 value = inverted;
17867 break;
17869 case OPCODE_AND: /* AND <-> BIC */
17870 new_inst = OPCODE_BIC;
17871 value = inverted;
17872 break;
17874 case OPCODE_BIC:
17875 new_inst = OPCODE_AND;
17876 value = inverted;
17877 break;
17879 case OPCODE_ADC: /* ADC <-> SBC */
17880 new_inst = OPCODE_SBC;
17881 value = inverted;
17882 break;
17884 case OPCODE_SBC:
17885 new_inst = OPCODE_ADC;
17886 value = inverted;
17887 break;
17889 /* We cannot do anything. */
17890 default:
17891 return FAIL;
17894 if (value == (unsigned) FAIL)
17895 return FAIL;
17897 *instruction &= OPCODE_MASK;
17898 *instruction |= new_inst << DATA_OP_SHIFT;
17899 return value;
17902 /* Like negate_data_op, but for Thumb-2. */
17904 static unsigned int
17905 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17907 int op, new_inst;
17908 int rd;
17909 unsigned int negated, inverted;
17911 negated = encode_thumb32_immediate (-value);
17912 inverted = encode_thumb32_immediate (~value);
17914 rd = (*instruction >> 8) & 0xf;
17915 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17916 switch (op)
17918 /* ADD <-> SUB. Includes CMP <-> CMN. */
17919 case T2_OPCODE_SUB:
17920 new_inst = T2_OPCODE_ADD;
17921 value = negated;
17922 break;
17924 case T2_OPCODE_ADD:
17925 new_inst = T2_OPCODE_SUB;
17926 value = negated;
17927 break;
17929 /* ORR <-> ORN. Includes MOV <-> MVN. */
17930 case T2_OPCODE_ORR:
17931 new_inst = T2_OPCODE_ORN;
17932 value = inverted;
17933 break;
17935 case T2_OPCODE_ORN:
17936 new_inst = T2_OPCODE_ORR;
17937 value = inverted;
17938 break;
17940 /* AND <-> BIC. TST has no inverted equivalent. */
17941 case T2_OPCODE_AND:
17942 new_inst = T2_OPCODE_BIC;
17943 if (rd == 15)
17944 value = FAIL;
17945 else
17946 value = inverted;
17947 break;
17949 case T2_OPCODE_BIC:
17950 new_inst = T2_OPCODE_AND;
17951 value = inverted;
17952 break;
17954 /* ADC <-> SBC */
17955 case T2_OPCODE_ADC:
17956 new_inst = T2_OPCODE_SBC;
17957 value = inverted;
17958 break;
17960 case T2_OPCODE_SBC:
17961 new_inst = T2_OPCODE_ADC;
17962 value = inverted;
17963 break;
17965 /* We cannot do anything. */
17966 default:
17967 return FAIL;
17970 if (value == (unsigned int)FAIL)
17971 return FAIL;
17973 *instruction &= T2_OPCODE_MASK;
17974 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17975 return value;
17978 /* Read a 32-bit thumb instruction from buf. */
17979 static unsigned long
17980 get_thumb32_insn (char * buf)
17982 unsigned long insn;
17983 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17984 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17986 return insn;
17990 /* We usually want to set the low bit on the address of thumb function
17991 symbols. In particular .word foo - . should have the low bit set.
17992 Generic code tries to fold the difference of two symbols to
17993 a constant. Prevent this and force a relocation when the first symbols
17994 is a thumb function. */
17996 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17998 if (op == O_subtract
17999 && l->X_op == O_symbol
18000 && r->X_op == O_symbol
18001 && THUMB_IS_FUNC (l->X_add_symbol))
18003 l->X_op = O_subtract;
18004 l->X_op_symbol = r->X_add_symbol;
18005 l->X_add_number -= r->X_add_number;
18006 return 1;
18008 /* Process as normal. */
18009 return 0;
18012 void
18013 md_apply_fix (fixS * fixP,
18014 valueT * valP,
18015 segT seg)
18017 offsetT value = * valP;
18018 offsetT newval;
18019 unsigned int newimm;
18020 unsigned long temp;
18021 int sign;
18022 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
18024 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
18026 /* Note whether this will delete the relocation. */
18028 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
18029 fixP->fx_done = 1;
18031 /* On a 64-bit host, silently truncate 'value' to 32 bits for
18032 consistency with the behaviour on 32-bit hosts. Remember value
18033 for emit_reloc. */
18034 value &= 0xffffffff;
18035 value ^= 0x80000000;
18036 value -= 0x80000000;
18038 *valP = value;
18039 fixP->fx_addnumber = value;
18041 /* Same treatment for fixP->fx_offset. */
18042 fixP->fx_offset &= 0xffffffff;
18043 fixP->fx_offset ^= 0x80000000;
18044 fixP->fx_offset -= 0x80000000;
18046 switch (fixP->fx_r_type)
18048 case BFD_RELOC_NONE:
18049 /* This will need to go in the object file. */
18050 fixP->fx_done = 0;
18051 break;
18053 case BFD_RELOC_ARM_IMMEDIATE:
18054 /* We claim that this fixup has been processed here,
18055 even if in fact we generate an error because we do
18056 not have a reloc for it, so tc_gen_reloc will reject it. */
18057 fixP->fx_done = 1;
18059 if (fixP->fx_addsy
18060 && ! S_IS_DEFINED (fixP->fx_addsy))
18062 as_bad_where (fixP->fx_file, fixP->fx_line,
18063 _("undefined symbol %s used as an immediate value"),
18064 S_GET_NAME (fixP->fx_addsy));
18065 break;
18068 newimm = encode_arm_immediate (value);
18069 temp = md_chars_to_number (buf, INSN_SIZE);
18071 /* If the instruction will fail, see if we can fix things up by
18072 changing the opcode. */
18073 if (newimm == (unsigned int) FAIL
18074 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
18076 as_bad_where (fixP->fx_file, fixP->fx_line,
18077 _("invalid constant (%lx) after fixup"),
18078 (unsigned long) value);
18079 break;
18082 newimm |= (temp & 0xfffff000);
18083 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18084 break;
18086 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18088 unsigned int highpart = 0;
18089 unsigned int newinsn = 0xe1a00000; /* nop. */
18091 newimm = encode_arm_immediate (value);
18092 temp = md_chars_to_number (buf, INSN_SIZE);
18094 /* If the instruction will fail, see if we can fix things up by
18095 changing the opcode. */
18096 if (newimm == (unsigned int) FAIL
18097 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
18099 /* No ? OK - try using two ADD instructions to generate
18100 the value. */
18101 newimm = validate_immediate_twopart (value, & highpart);
18103 /* Yes - then make sure that the second instruction is
18104 also an add. */
18105 if (newimm != (unsigned int) FAIL)
18106 newinsn = temp;
18107 /* Still No ? Try using a negated value. */
18108 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
18109 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
18110 /* Otherwise - give up. */
18111 else
18113 as_bad_where (fixP->fx_file, fixP->fx_line,
18114 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
18115 (long) value);
18116 break;
18119 /* Replace the first operand in the 2nd instruction (which
18120 is the PC) with the destination register. We have
18121 already added in the PC in the first instruction and we
18122 do not want to do it again. */
18123 newinsn &= ~ 0xf0000;
18124 newinsn |= ((newinsn & 0x0f000) << 4);
18127 newimm |= (temp & 0xfffff000);
18128 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18130 highpart |= (newinsn & 0xfffff000);
18131 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
18133 break;
18135 case BFD_RELOC_ARM_OFFSET_IMM:
18136 if (!fixP->fx_done && seg->use_rela_p)
18137 value = 0;
18139 case BFD_RELOC_ARM_LITERAL:
18140 sign = value >= 0;
18142 if (value < 0)
18143 value = - value;
18145 if (validate_offset_imm (value, 0) == FAIL)
18147 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
18148 as_bad_where (fixP->fx_file, fixP->fx_line,
18149 _("invalid literal constant: pool needs to be closer"));
18150 else
18151 as_bad_where (fixP->fx_file, fixP->fx_line,
18152 _("bad immediate value for offset (%ld)"),
18153 (long) value);
18154 break;
18157 newval = md_chars_to_number (buf, INSN_SIZE);
18158 newval &= 0xff7ff000;
18159 newval |= value | (sign ? INDEX_UP : 0);
18160 md_number_to_chars (buf, newval, INSN_SIZE);
18161 break;
18163 case BFD_RELOC_ARM_OFFSET_IMM8:
18164 case BFD_RELOC_ARM_HWLITERAL:
18165 sign = value >= 0;
18167 if (value < 0)
18168 value = - value;
18170 if (validate_offset_imm (value, 1) == FAIL)
18172 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
18173 as_bad_where (fixP->fx_file, fixP->fx_line,
18174 _("invalid literal constant: pool needs to be closer"));
18175 else
18176 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18177 (long) value);
18178 break;
18181 newval = md_chars_to_number (buf, INSN_SIZE);
18182 newval &= 0xff7ff0f0;
18183 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
18184 md_number_to_chars (buf, newval, INSN_SIZE);
18185 break;
18187 case BFD_RELOC_ARM_T32_OFFSET_U8:
18188 if (value < 0 || value > 1020 || value % 4 != 0)
18189 as_bad_where (fixP->fx_file, fixP->fx_line,
18190 _("bad immediate value for offset (%ld)"), (long) value);
18191 value /= 4;
18193 newval = md_chars_to_number (buf+2, THUMB_SIZE);
18194 newval |= value;
18195 md_number_to_chars (buf+2, newval, THUMB_SIZE);
18196 break;
18198 case BFD_RELOC_ARM_T32_OFFSET_IMM:
18199 /* This is a complicated relocation used for all varieties of Thumb32
18200 load/store instruction with immediate offset:
18202 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18203 *4, optional writeback(W)
18204 (doubleword load/store)
18206 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18207 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18208 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18209 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18210 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18212 Uppercase letters indicate bits that are already encoded at
18213 this point. Lowercase letters are our problem. For the
18214 second block of instructions, the secondary opcode nybble
18215 (bits 8..11) is present, and bit 23 is zero, even if this is
18216 a PC-relative operation. */
18217 newval = md_chars_to_number (buf, THUMB_SIZE);
18218 newval <<= 16;
18219 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
18221 if ((newval & 0xf0000000) == 0xe0000000)
18223 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18224 if (value >= 0)
18225 newval |= (1 << 23);
18226 else
18227 value = -value;
18228 if (value % 4 != 0)
18230 as_bad_where (fixP->fx_file, fixP->fx_line,
18231 _("offset not a multiple of 4"));
18232 break;
18234 value /= 4;
18235 if (value > 0xff)
18237 as_bad_where (fixP->fx_file, fixP->fx_line,
18238 _("offset out of range"));
18239 break;
18241 newval &= ~0xff;
18243 else if ((newval & 0x000f0000) == 0x000f0000)
18245 /* PC-relative, 12-bit offset. */
18246 if (value >= 0)
18247 newval |= (1 << 23);
18248 else
18249 value = -value;
18250 if (value > 0xfff)
18252 as_bad_where (fixP->fx_file, fixP->fx_line,
18253 _("offset out of range"));
18254 break;
18256 newval &= ~0xfff;
18258 else if ((newval & 0x00000100) == 0x00000100)
18260 /* Writeback: 8-bit, +/- offset. */
18261 if (value >= 0)
18262 newval |= (1 << 9);
18263 else
18264 value = -value;
18265 if (value > 0xff)
18267 as_bad_where (fixP->fx_file, fixP->fx_line,
18268 _("offset out of range"));
18269 break;
18271 newval &= ~0xff;
18273 else if ((newval & 0x00000f00) == 0x00000e00)
18275 /* T-instruction: positive 8-bit offset. */
18276 if (value < 0 || value > 0xff)
18278 as_bad_where (fixP->fx_file, fixP->fx_line,
18279 _("offset out of range"));
18280 break;
18282 newval &= ~0xff;
18283 newval |= value;
18285 else
18287 /* Positive 12-bit or negative 8-bit offset. */
18288 int limit;
18289 if (value >= 0)
18291 newval |= (1 << 23);
18292 limit = 0xfff;
18294 else
18296 value = -value;
18297 limit = 0xff;
18299 if (value > limit)
18301 as_bad_where (fixP->fx_file, fixP->fx_line,
18302 _("offset out of range"));
18303 break;
18305 newval &= ~limit;
18308 newval |= value;
18309 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18310 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18311 break;
18313 case BFD_RELOC_ARM_SHIFT_IMM:
18314 newval = md_chars_to_number (buf, INSN_SIZE);
18315 if (((unsigned long) value) > 32
18316 || (value == 32
18317 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18319 as_bad_where (fixP->fx_file, fixP->fx_line,
18320 _("shift expression is too large"));
18321 break;
18324 if (value == 0)
18325 /* Shifts of zero must be done as lsl. */
18326 newval &= ~0x60;
18327 else if (value == 32)
18328 value = 0;
18329 newval &= 0xfffff07f;
18330 newval |= (value & 0x1f) << 7;
18331 md_number_to_chars (buf, newval, INSN_SIZE);
18332 break;
18334 case BFD_RELOC_ARM_T32_IMMEDIATE:
18335 case BFD_RELOC_ARM_T32_ADD_IMM:
18336 case BFD_RELOC_ARM_T32_IMM12:
18337 case BFD_RELOC_ARM_T32_ADD_PC12:
18338 /* We claim that this fixup has been processed here,
18339 even if in fact we generate an error because we do
18340 not have a reloc for it, so tc_gen_reloc will reject it. */
18341 fixP->fx_done = 1;
18343 if (fixP->fx_addsy
18344 && ! S_IS_DEFINED (fixP->fx_addsy))
18346 as_bad_where (fixP->fx_file, fixP->fx_line,
18347 _("undefined symbol %s used as an immediate value"),
18348 S_GET_NAME (fixP->fx_addsy));
18349 break;
18352 newval = md_chars_to_number (buf, THUMB_SIZE);
18353 newval <<= 16;
18354 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18356 newimm = FAIL;
18357 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18358 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18360 newimm = encode_thumb32_immediate (value);
18361 if (newimm == (unsigned int) FAIL)
18362 newimm = thumb32_negate_data_op (&newval, value);
18364 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18365 && newimm == (unsigned int) FAIL)
18367 /* Turn add/sum into addw/subw. */
18368 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18369 newval = (newval & 0xfeffffff) | 0x02000000;
18371 /* 12 bit immediate for addw/subw. */
18372 if (value < 0)
18374 value = -value;
18375 newval ^= 0x00a00000;
18377 if (value > 0xfff)
18378 newimm = (unsigned int) FAIL;
18379 else
18380 newimm = value;
18383 if (newimm == (unsigned int)FAIL)
18385 as_bad_where (fixP->fx_file, fixP->fx_line,
18386 _("invalid constant (%lx) after fixup"),
18387 (unsigned long) value);
18388 break;
18391 newval |= (newimm & 0x800) << 15;
18392 newval |= (newimm & 0x700) << 4;
18393 newval |= (newimm & 0x0ff);
18395 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18396 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18397 break;
18399 case BFD_RELOC_ARM_SMC:
18400 if (((unsigned long) value) > 0xffff)
18401 as_bad_where (fixP->fx_file, fixP->fx_line,
18402 _("invalid smc expression"));
18403 newval = md_chars_to_number (buf, INSN_SIZE);
18404 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18405 md_number_to_chars (buf, newval, INSN_SIZE);
18406 break;
18408 case BFD_RELOC_ARM_SWI:
18409 if (fixP->tc_fix_data != 0)
18411 if (((unsigned long) value) > 0xff)
18412 as_bad_where (fixP->fx_file, fixP->fx_line,
18413 _("invalid swi expression"));
18414 newval = md_chars_to_number (buf, THUMB_SIZE);
18415 newval |= value;
18416 md_number_to_chars (buf, newval, THUMB_SIZE);
18418 else
18420 if (((unsigned long) value) > 0x00ffffff)
18421 as_bad_where (fixP->fx_file, fixP->fx_line,
18422 _("invalid swi expression"));
18423 newval = md_chars_to_number (buf, INSN_SIZE);
18424 newval |= value;
18425 md_number_to_chars (buf, newval, INSN_SIZE);
18427 break;
18429 case BFD_RELOC_ARM_MULTI:
18430 if (((unsigned long) value) > 0xffff)
18431 as_bad_where (fixP->fx_file, fixP->fx_line,
18432 _("invalid expression in load/store multiple"));
18433 newval = value | md_chars_to_number (buf, INSN_SIZE);
18434 md_number_to_chars (buf, newval, INSN_SIZE);
18435 break;
18437 #ifdef OBJ_ELF
18438 case BFD_RELOC_ARM_PCREL_CALL:
18439 newval = md_chars_to_number (buf, INSN_SIZE);
18440 if ((newval & 0xf0000000) == 0xf0000000)
18441 temp = 1;
18442 else
18443 temp = 3;
18444 goto arm_branch_common;
18446 case BFD_RELOC_ARM_PCREL_JUMP:
18447 case BFD_RELOC_ARM_PLT32:
18448 #endif
18449 case BFD_RELOC_ARM_PCREL_BRANCH:
18450 temp = 3;
18451 goto arm_branch_common;
18453 case BFD_RELOC_ARM_PCREL_BLX:
18454 temp = 1;
18455 arm_branch_common:
18456 /* We are going to store value (shifted right by two) in the
18457 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18458 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18459 also be be clear. */
18460 if (value & temp)
18461 as_bad_where (fixP->fx_file, fixP->fx_line,
18462 _("misaligned branch destination"));
18463 if ((value & (offsetT)0xfe000000) != (offsetT)0
18464 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18465 as_bad_where (fixP->fx_file, fixP->fx_line,
18466 _("branch out of range"));
18468 if (fixP->fx_done || !seg->use_rela_p)
18470 newval = md_chars_to_number (buf, INSN_SIZE);
18471 newval |= (value >> 2) & 0x00ffffff;
18472 /* Set the H bit on BLX instructions. */
18473 if (temp == 1)
18475 if (value & 2)
18476 newval |= 0x01000000;
18477 else
18478 newval &= ~0x01000000;
18480 md_number_to_chars (buf, newval, INSN_SIZE);
18482 break;
18484 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18485 /* CBZ can only branch forward. */
18487 /* Attempts to use CBZ to branch to the next instruction
18488 (which, strictly speaking, are prohibited) will be turned into
18489 no-ops.
18491 FIXME: It may be better to remove the instruction completely and
18492 perform relaxation. */
18493 if (value == -2)
18495 newval = md_chars_to_number (buf, THUMB_SIZE);
18496 newval = 0xbf00; /* NOP encoding T1 */
18497 md_number_to_chars (buf, newval, THUMB_SIZE);
18499 else
18501 if (value & ~0x7e)
18502 as_bad_where (fixP->fx_file, fixP->fx_line,
18503 _("branch out of range"));
18505 if (fixP->fx_done || !seg->use_rela_p)
18507 newval = md_chars_to_number (buf, THUMB_SIZE);
18508 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18509 md_number_to_chars (buf, newval, THUMB_SIZE);
18512 break;
18514 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18515 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18516 as_bad_where (fixP->fx_file, fixP->fx_line,
18517 _("branch out of range"));
18519 if (fixP->fx_done || !seg->use_rela_p)
18521 newval = md_chars_to_number (buf, THUMB_SIZE);
18522 newval |= (value & 0x1ff) >> 1;
18523 md_number_to_chars (buf, newval, THUMB_SIZE);
18525 break;
18527 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18528 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18529 as_bad_where (fixP->fx_file, fixP->fx_line,
18530 _("branch out of range"));
18532 if (fixP->fx_done || !seg->use_rela_p)
18534 newval = md_chars_to_number (buf, THUMB_SIZE);
18535 newval |= (value & 0xfff) >> 1;
18536 md_number_to_chars (buf, newval, THUMB_SIZE);
18538 break;
18540 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18541 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18542 as_bad_where (fixP->fx_file, fixP->fx_line,
18543 _("conditional branch out of range"));
18545 if (fixP->fx_done || !seg->use_rela_p)
18547 offsetT newval2;
18548 addressT S, J1, J2, lo, hi;
18550 S = (value & 0x00100000) >> 20;
18551 J2 = (value & 0x00080000) >> 19;
18552 J1 = (value & 0x00040000) >> 18;
18553 hi = (value & 0x0003f000) >> 12;
18554 lo = (value & 0x00000ffe) >> 1;
18556 newval = md_chars_to_number (buf, THUMB_SIZE);
18557 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18558 newval |= (S << 10) | hi;
18559 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18560 md_number_to_chars (buf, newval, THUMB_SIZE);
18561 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18563 break;
18565 case BFD_RELOC_THUMB_PCREL_BLX:
18566 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18567 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18568 as_bad_where (fixP->fx_file, fixP->fx_line,
18569 _("branch out of range"));
18571 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18572 /* For a BLX instruction, make sure that the relocation is rounded up
18573 to a word boundary. This follows the semantics of the instruction
18574 which specifies that bit 1 of the target address will come from bit
18575 1 of the base address. */
18576 value = (value + 1) & ~ 1;
18578 if (fixP->fx_done || !seg->use_rela_p)
18580 offsetT newval2;
18582 newval = md_chars_to_number (buf, THUMB_SIZE);
18583 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18584 newval |= (value & 0x7fffff) >> 12;
18585 newval2 |= (value & 0xfff) >> 1;
18586 md_number_to_chars (buf, newval, THUMB_SIZE);
18587 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18589 break;
18591 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18592 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18593 as_bad_where (fixP->fx_file, fixP->fx_line,
18594 _("branch out of range"));
18596 if (fixP->fx_done || !seg->use_rela_p)
18598 offsetT newval2;
18599 addressT S, I1, I2, lo, hi;
18601 S = (value & 0x01000000) >> 24;
18602 I1 = (value & 0x00800000) >> 23;
18603 I2 = (value & 0x00400000) >> 22;
18604 hi = (value & 0x003ff000) >> 12;
18605 lo = (value & 0x00000ffe) >> 1;
18607 I1 = !(I1 ^ S);
18608 I2 = !(I2 ^ S);
18610 newval = md_chars_to_number (buf, THUMB_SIZE);
18611 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18612 newval |= (S << 10) | hi;
18613 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18614 md_number_to_chars (buf, newval, THUMB_SIZE);
18615 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18617 break;
18619 case BFD_RELOC_8:
18620 if (fixP->fx_done || !seg->use_rela_p)
18621 md_number_to_chars (buf, value, 1);
18622 break;
18624 case BFD_RELOC_16:
18625 if (fixP->fx_done || !seg->use_rela_p)
18626 md_number_to_chars (buf, value, 2);
18627 break;
18629 #ifdef OBJ_ELF
18630 case BFD_RELOC_ARM_TLS_GD32:
18631 case BFD_RELOC_ARM_TLS_LE32:
18632 case BFD_RELOC_ARM_TLS_IE32:
18633 case BFD_RELOC_ARM_TLS_LDM32:
18634 case BFD_RELOC_ARM_TLS_LDO32:
18635 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18636 /* fall through */
18638 case BFD_RELOC_ARM_GOT32:
18639 case BFD_RELOC_ARM_GOTOFF:
18640 case BFD_RELOC_ARM_TARGET2:
18641 if (fixP->fx_done || !seg->use_rela_p)
18642 md_number_to_chars (buf, 0, 4);
18643 break;
18644 #endif
18646 case BFD_RELOC_RVA:
18647 case BFD_RELOC_32:
18648 case BFD_RELOC_ARM_TARGET1:
18649 case BFD_RELOC_ARM_ROSEGREL32:
18650 case BFD_RELOC_ARM_SBREL32:
18651 case BFD_RELOC_32_PCREL:
18652 #ifdef TE_PE
18653 case BFD_RELOC_32_SECREL:
18654 #endif
18655 if (fixP->fx_done || !seg->use_rela_p)
18656 #ifdef TE_WINCE
18657 /* For WinCE we only do this for pcrel fixups. */
18658 if (fixP->fx_done || fixP->fx_pcrel)
18659 #endif
18660 md_number_to_chars (buf, value, 4);
18661 break;
18663 #ifdef OBJ_ELF
18664 case BFD_RELOC_ARM_PREL31:
18665 if (fixP->fx_done || !seg->use_rela_p)
18667 newval = md_chars_to_number (buf, 4) & 0x80000000;
18668 if ((value ^ (value >> 1)) & 0x40000000)
18670 as_bad_where (fixP->fx_file, fixP->fx_line,
18671 _("rel31 relocation overflow"));
18673 newval |= value & 0x7fffffff;
18674 md_number_to_chars (buf, newval, 4);
18676 break;
18677 #endif
18679 case BFD_RELOC_ARM_CP_OFF_IMM:
18680 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18681 if (value < -1023 || value > 1023 || (value & 3))
18682 as_bad_where (fixP->fx_file, fixP->fx_line,
18683 _("co-processor offset out of range"));
18684 cp_off_common:
18685 sign = value >= 0;
18686 if (value < 0)
18687 value = -value;
18688 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18689 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18690 newval = md_chars_to_number (buf, INSN_SIZE);
18691 else
18692 newval = get_thumb32_insn (buf);
18693 newval &= 0xff7fff00;
18694 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18695 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18696 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18697 md_number_to_chars (buf, newval, INSN_SIZE);
18698 else
18699 put_thumb32_insn (buf, newval);
18700 break;
18702 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18703 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18704 if (value < -255 || value > 255)
18705 as_bad_where (fixP->fx_file, fixP->fx_line,
18706 _("co-processor offset out of range"));
18707 value *= 4;
18708 goto cp_off_common;
18710 case BFD_RELOC_ARM_THUMB_OFFSET:
18711 newval = md_chars_to_number (buf, THUMB_SIZE);
18712 /* Exactly what ranges, and where the offset is inserted depends
18713 on the type of instruction, we can establish this from the
18714 top 4 bits. */
18715 switch (newval >> 12)
18717 case 4: /* PC load. */
18718 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18719 forced to zero for these loads; md_pcrel_from has already
18720 compensated for this. */
18721 if (value & 3)
18722 as_bad_where (fixP->fx_file, fixP->fx_line,
18723 _("invalid offset, target not word aligned (0x%08lX)"),
18724 (((unsigned long) fixP->fx_frag->fr_address
18725 + (unsigned long) fixP->fx_where) & ~3)
18726 + (unsigned long) value);
18728 if (value & ~0x3fc)
18729 as_bad_where (fixP->fx_file, fixP->fx_line,
18730 _("invalid offset, value too big (0x%08lX)"),
18731 (long) value);
18733 newval |= value >> 2;
18734 break;
18736 case 9: /* SP load/store. */
18737 if (value & ~0x3fc)
18738 as_bad_where (fixP->fx_file, fixP->fx_line,
18739 _("invalid offset, value too big (0x%08lX)"),
18740 (long) value);
18741 newval |= value >> 2;
18742 break;
18744 case 6: /* Word load/store. */
18745 if (value & ~0x7c)
18746 as_bad_where (fixP->fx_file, fixP->fx_line,
18747 _("invalid offset, value too big (0x%08lX)"),
18748 (long) value);
18749 newval |= value << 4; /* 6 - 2. */
18750 break;
18752 case 7: /* Byte load/store. */
18753 if (value & ~0x1f)
18754 as_bad_where (fixP->fx_file, fixP->fx_line,
18755 _("invalid offset, value too big (0x%08lX)"),
18756 (long) value);
18757 newval |= value << 6;
18758 break;
18760 case 8: /* Halfword load/store. */
18761 if (value & ~0x3e)
18762 as_bad_where (fixP->fx_file, fixP->fx_line,
18763 _("invalid offset, value too big (0x%08lX)"),
18764 (long) value);
18765 newval |= value << 5; /* 6 - 1. */
18766 break;
18768 default:
18769 as_bad_where (fixP->fx_file, fixP->fx_line,
18770 "Unable to process relocation for thumb opcode: %lx",
18771 (unsigned long) newval);
18772 break;
18774 md_number_to_chars (buf, newval, THUMB_SIZE);
18775 break;
18777 case BFD_RELOC_ARM_THUMB_ADD:
18778 /* This is a complicated relocation, since we use it for all of
18779 the following immediate relocations:
18781 3bit ADD/SUB
18782 8bit ADD/SUB
18783 9bit ADD/SUB SP word-aligned
18784 10bit ADD PC/SP word-aligned
18786 The type of instruction being processed is encoded in the
18787 instruction field:
18789 0x8000 SUB
18790 0x00F0 Rd
18791 0x000F Rs
18793 newval = md_chars_to_number (buf, THUMB_SIZE);
18795 int rd = (newval >> 4) & 0xf;
18796 int rs = newval & 0xf;
18797 int subtract = !!(newval & 0x8000);
18799 /* Check for HI regs, only very restricted cases allowed:
18800 Adjusting SP, and using PC or SP to get an address. */
18801 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18802 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18803 as_bad_where (fixP->fx_file, fixP->fx_line,
18804 _("invalid Hi register with immediate"));
18806 /* If value is negative, choose the opposite instruction. */
18807 if (value < 0)
18809 value = -value;
18810 subtract = !subtract;
18811 if (value < 0)
18812 as_bad_where (fixP->fx_file, fixP->fx_line,
18813 _("immediate value out of range"));
18816 if (rd == REG_SP)
18818 if (value & ~0x1fc)
18819 as_bad_where (fixP->fx_file, fixP->fx_line,
18820 _("invalid immediate for stack address calculation"));
18821 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18822 newval |= value >> 2;
18824 else if (rs == REG_PC || rs == REG_SP)
18826 if (subtract || value & ~0x3fc)
18827 as_bad_where (fixP->fx_file, fixP->fx_line,
18828 _("invalid immediate for address calculation (value = 0x%08lX)"),
18829 (unsigned long) value);
18830 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18831 newval |= rd << 8;
18832 newval |= value >> 2;
18834 else if (rs == rd)
18836 if (value & ~0xff)
18837 as_bad_where (fixP->fx_file, fixP->fx_line,
18838 _("immediate value out of range"));
18839 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18840 newval |= (rd << 8) | value;
18842 else
18844 if (value & ~0x7)
18845 as_bad_where (fixP->fx_file, fixP->fx_line,
18846 _("immediate value out of range"));
18847 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18848 newval |= rd | (rs << 3) | (value << 6);
18851 md_number_to_chars (buf, newval, THUMB_SIZE);
18852 break;
18854 case BFD_RELOC_ARM_THUMB_IMM:
18855 newval = md_chars_to_number (buf, THUMB_SIZE);
18856 if (value < 0 || value > 255)
18857 as_bad_where (fixP->fx_file, fixP->fx_line,
18858 _("invalid immediate: %ld is out of range"),
18859 (long) value);
18860 newval |= value;
18861 md_number_to_chars (buf, newval, THUMB_SIZE);
18862 break;
18864 case BFD_RELOC_ARM_THUMB_SHIFT:
18865 /* 5bit shift value (0..32). LSL cannot take 32. */
18866 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18867 temp = newval & 0xf800;
18868 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18869 as_bad_where (fixP->fx_file, fixP->fx_line,
18870 _("invalid shift value: %ld"), (long) value);
18871 /* Shifts of zero must be encoded as LSL. */
18872 if (value == 0)
18873 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18874 /* Shifts of 32 are encoded as zero. */
18875 else if (value == 32)
18876 value = 0;
18877 newval |= value << 6;
18878 md_number_to_chars (buf, newval, THUMB_SIZE);
18879 break;
18881 case BFD_RELOC_VTABLE_INHERIT:
18882 case BFD_RELOC_VTABLE_ENTRY:
18883 fixP->fx_done = 0;
18884 return;
18886 case BFD_RELOC_ARM_MOVW:
18887 case BFD_RELOC_ARM_MOVT:
18888 case BFD_RELOC_ARM_THUMB_MOVW:
18889 case BFD_RELOC_ARM_THUMB_MOVT:
18890 if (fixP->fx_done || !seg->use_rela_p)
18892 /* REL format relocations are limited to a 16-bit addend. */
18893 if (!fixP->fx_done)
18895 if (value < -0x8000 || value > 0x7fff)
18896 as_bad_where (fixP->fx_file, fixP->fx_line,
18897 _("offset out of range"));
18899 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18900 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18902 value >>= 16;
18905 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18906 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18908 newval = get_thumb32_insn (buf);
18909 newval &= 0xfbf08f00;
18910 newval |= (value & 0xf000) << 4;
18911 newval |= (value & 0x0800) << 15;
18912 newval |= (value & 0x0700) << 4;
18913 newval |= (value & 0x00ff);
18914 put_thumb32_insn (buf, newval);
18916 else
18918 newval = md_chars_to_number (buf, 4);
18919 newval &= 0xfff0f000;
18920 newval |= value & 0x0fff;
18921 newval |= (value & 0xf000) << 4;
18922 md_number_to_chars (buf, newval, 4);
18925 return;
18927 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18928 case BFD_RELOC_ARM_ALU_PC_G0:
18929 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18930 case BFD_RELOC_ARM_ALU_PC_G1:
18931 case BFD_RELOC_ARM_ALU_PC_G2:
18932 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18933 case BFD_RELOC_ARM_ALU_SB_G0:
18934 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18935 case BFD_RELOC_ARM_ALU_SB_G1:
18936 case BFD_RELOC_ARM_ALU_SB_G2:
18937 assert (!fixP->fx_done);
18938 if (!seg->use_rela_p)
18940 bfd_vma insn;
18941 bfd_vma encoded_addend;
18942 bfd_vma addend_abs = abs (value);
18944 /* Check that the absolute value of the addend can be
18945 expressed as an 8-bit constant plus a rotation. */
18946 encoded_addend = encode_arm_immediate (addend_abs);
18947 if (encoded_addend == (unsigned int) FAIL)
18948 as_bad_where (fixP->fx_file, fixP->fx_line,
18949 _("the offset 0x%08lX is not representable"),
18950 (unsigned long) addend_abs);
18952 /* Extract the instruction. */
18953 insn = md_chars_to_number (buf, INSN_SIZE);
18955 /* If the addend is positive, use an ADD instruction.
18956 Otherwise use a SUB. Take care not to destroy the S bit. */
18957 insn &= 0xff1fffff;
18958 if (value < 0)
18959 insn |= 1 << 22;
18960 else
18961 insn |= 1 << 23;
18963 /* Place the encoded addend into the first 12 bits of the
18964 instruction. */
18965 insn &= 0xfffff000;
18966 insn |= encoded_addend;
18968 /* Update the instruction. */
18969 md_number_to_chars (buf, insn, INSN_SIZE);
18971 break;
18973 case BFD_RELOC_ARM_LDR_PC_G0:
18974 case BFD_RELOC_ARM_LDR_PC_G1:
18975 case BFD_RELOC_ARM_LDR_PC_G2:
18976 case BFD_RELOC_ARM_LDR_SB_G0:
18977 case BFD_RELOC_ARM_LDR_SB_G1:
18978 case BFD_RELOC_ARM_LDR_SB_G2:
18979 assert (!fixP->fx_done);
18980 if (!seg->use_rela_p)
18982 bfd_vma insn;
18983 bfd_vma addend_abs = abs (value);
18985 /* Check that the absolute value of the addend can be
18986 encoded in 12 bits. */
18987 if (addend_abs >= 0x1000)
18988 as_bad_where (fixP->fx_file, fixP->fx_line,
18989 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18990 (unsigned long) addend_abs);
18992 /* Extract the instruction. */
18993 insn = md_chars_to_number (buf, INSN_SIZE);
18995 /* If the addend is negative, clear bit 23 of the instruction.
18996 Otherwise set it. */
18997 if (value < 0)
18998 insn &= ~(1 << 23);
18999 else
19000 insn |= 1 << 23;
19002 /* Place the absolute value of the addend into the first 12 bits
19003 of the instruction. */
19004 insn &= 0xfffff000;
19005 insn |= addend_abs;
19007 /* Update the instruction. */
19008 md_number_to_chars (buf, insn, INSN_SIZE);
19010 break;
19012 case BFD_RELOC_ARM_LDRS_PC_G0:
19013 case BFD_RELOC_ARM_LDRS_PC_G1:
19014 case BFD_RELOC_ARM_LDRS_PC_G2:
19015 case BFD_RELOC_ARM_LDRS_SB_G0:
19016 case BFD_RELOC_ARM_LDRS_SB_G1:
19017 case BFD_RELOC_ARM_LDRS_SB_G2:
19018 assert (!fixP->fx_done);
19019 if (!seg->use_rela_p)
19021 bfd_vma insn;
19022 bfd_vma addend_abs = abs (value);
19024 /* Check that the absolute value of the addend can be
19025 encoded in 8 bits. */
19026 if (addend_abs >= 0x100)
19027 as_bad_where (fixP->fx_file, fixP->fx_line,
19028 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
19029 (unsigned long) addend_abs);
19031 /* Extract the instruction. */
19032 insn = md_chars_to_number (buf, INSN_SIZE);
19034 /* If the addend is negative, clear bit 23 of the instruction.
19035 Otherwise set it. */
19036 if (value < 0)
19037 insn &= ~(1 << 23);
19038 else
19039 insn |= 1 << 23;
19041 /* Place the first four bits of the absolute value of the addend
19042 into the first 4 bits of the instruction, and the remaining
19043 four into bits 8 .. 11. */
19044 insn &= 0xfffff0f0;
19045 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
19047 /* Update the instruction. */
19048 md_number_to_chars (buf, insn, INSN_SIZE);
19050 break;
19052 case BFD_RELOC_ARM_LDC_PC_G0:
19053 case BFD_RELOC_ARM_LDC_PC_G1:
19054 case BFD_RELOC_ARM_LDC_PC_G2:
19055 case BFD_RELOC_ARM_LDC_SB_G0:
19056 case BFD_RELOC_ARM_LDC_SB_G1:
19057 case BFD_RELOC_ARM_LDC_SB_G2:
19058 assert (!fixP->fx_done);
19059 if (!seg->use_rela_p)
19061 bfd_vma insn;
19062 bfd_vma addend_abs = abs (value);
19064 /* Check that the absolute value of the addend is a multiple of
19065 four and, when divided by four, fits in 8 bits. */
19066 if (addend_abs & 0x3)
19067 as_bad_where (fixP->fx_file, fixP->fx_line,
19068 _("bad offset 0x%08lX (must be word-aligned)"),
19069 (unsigned long) addend_abs);
19071 if ((addend_abs >> 2) > 0xff)
19072 as_bad_where (fixP->fx_file, fixP->fx_line,
19073 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
19074 (unsigned long) addend_abs);
19076 /* Extract the instruction. */
19077 insn = md_chars_to_number (buf, INSN_SIZE);
19079 /* If the addend is negative, clear bit 23 of the instruction.
19080 Otherwise set it. */
19081 if (value < 0)
19082 insn &= ~(1 << 23);
19083 else
19084 insn |= 1 << 23;
19086 /* Place the addend (divided by four) into the first eight
19087 bits of the instruction. */
19088 insn &= 0xfffffff0;
19089 insn |= addend_abs >> 2;
19091 /* Update the instruction. */
19092 md_number_to_chars (buf, insn, INSN_SIZE);
19094 break;
19096 case BFD_RELOC_ARM_V4BX:
19097 /* This will need to go in the object file. */
19098 fixP->fx_done = 0;
19099 break;
19101 case BFD_RELOC_UNUSED:
19102 default:
19103 as_bad_where (fixP->fx_file, fixP->fx_line,
19104 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
19108 /* Translate internal representation of relocation info to BFD target
19109 format. */
19111 arelent *
19112 tc_gen_reloc (asection *section, fixS *fixp)
19114 arelent * reloc;
19115 bfd_reloc_code_real_type code;
19117 reloc = xmalloc (sizeof (arelent));
19119 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
19120 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
19121 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
19123 if (fixp->fx_pcrel)
19125 if (section->use_rela_p)
19126 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
19127 else
19128 fixp->fx_offset = reloc->address;
19130 reloc->addend = fixp->fx_offset;
19132 switch (fixp->fx_r_type)
19134 case BFD_RELOC_8:
19135 if (fixp->fx_pcrel)
19137 code = BFD_RELOC_8_PCREL;
19138 break;
19141 case BFD_RELOC_16:
19142 if (fixp->fx_pcrel)
19144 code = BFD_RELOC_16_PCREL;
19145 break;
19148 case BFD_RELOC_32:
19149 if (fixp->fx_pcrel)
19151 code = BFD_RELOC_32_PCREL;
19152 break;
19155 case BFD_RELOC_ARM_MOVW:
19156 if (fixp->fx_pcrel)
19158 code = BFD_RELOC_ARM_MOVW_PCREL;
19159 break;
19162 case BFD_RELOC_ARM_MOVT:
19163 if (fixp->fx_pcrel)
19165 code = BFD_RELOC_ARM_MOVT_PCREL;
19166 break;
19169 case BFD_RELOC_ARM_THUMB_MOVW:
19170 if (fixp->fx_pcrel)
19172 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
19173 break;
19176 case BFD_RELOC_ARM_THUMB_MOVT:
19177 if (fixp->fx_pcrel)
19179 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
19180 break;
19183 case BFD_RELOC_NONE:
19184 case BFD_RELOC_ARM_PCREL_BRANCH:
19185 case BFD_RELOC_ARM_PCREL_BLX:
19186 case BFD_RELOC_RVA:
19187 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19188 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19189 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19190 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19191 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19192 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19193 case BFD_RELOC_THUMB_PCREL_BLX:
19194 case BFD_RELOC_VTABLE_ENTRY:
19195 case BFD_RELOC_VTABLE_INHERIT:
19196 #ifdef TE_PE
19197 case BFD_RELOC_32_SECREL:
19198 #endif
19199 code = fixp->fx_r_type;
19200 break;
19202 case BFD_RELOC_ARM_LITERAL:
19203 case BFD_RELOC_ARM_HWLITERAL:
19204 /* If this is called then the a literal has
19205 been referenced across a section boundary. */
19206 as_bad_where (fixp->fx_file, fixp->fx_line,
19207 _("literal referenced across section boundary"));
19208 return NULL;
19210 #ifdef OBJ_ELF
19211 case BFD_RELOC_ARM_GOT32:
19212 case BFD_RELOC_ARM_GOTOFF:
19213 case BFD_RELOC_ARM_PLT32:
19214 case BFD_RELOC_ARM_TARGET1:
19215 case BFD_RELOC_ARM_ROSEGREL32:
19216 case BFD_RELOC_ARM_SBREL32:
19217 case BFD_RELOC_ARM_PREL31:
19218 case BFD_RELOC_ARM_TARGET2:
19219 case BFD_RELOC_ARM_TLS_LE32:
19220 case BFD_RELOC_ARM_TLS_LDO32:
19221 case BFD_RELOC_ARM_PCREL_CALL:
19222 case BFD_RELOC_ARM_PCREL_JUMP:
19223 case BFD_RELOC_ARM_ALU_PC_G0_NC:
19224 case BFD_RELOC_ARM_ALU_PC_G0:
19225 case BFD_RELOC_ARM_ALU_PC_G1_NC:
19226 case BFD_RELOC_ARM_ALU_PC_G1:
19227 case BFD_RELOC_ARM_ALU_PC_G2:
19228 case BFD_RELOC_ARM_LDR_PC_G0:
19229 case BFD_RELOC_ARM_LDR_PC_G1:
19230 case BFD_RELOC_ARM_LDR_PC_G2:
19231 case BFD_RELOC_ARM_LDRS_PC_G0:
19232 case BFD_RELOC_ARM_LDRS_PC_G1:
19233 case BFD_RELOC_ARM_LDRS_PC_G2:
19234 case BFD_RELOC_ARM_LDC_PC_G0:
19235 case BFD_RELOC_ARM_LDC_PC_G1:
19236 case BFD_RELOC_ARM_LDC_PC_G2:
19237 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19238 case BFD_RELOC_ARM_ALU_SB_G0:
19239 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19240 case BFD_RELOC_ARM_ALU_SB_G1:
19241 case BFD_RELOC_ARM_ALU_SB_G2:
19242 case BFD_RELOC_ARM_LDR_SB_G0:
19243 case BFD_RELOC_ARM_LDR_SB_G1:
19244 case BFD_RELOC_ARM_LDR_SB_G2:
19245 case BFD_RELOC_ARM_LDRS_SB_G0:
19246 case BFD_RELOC_ARM_LDRS_SB_G1:
19247 case BFD_RELOC_ARM_LDRS_SB_G2:
19248 case BFD_RELOC_ARM_LDC_SB_G0:
19249 case BFD_RELOC_ARM_LDC_SB_G1:
19250 case BFD_RELOC_ARM_LDC_SB_G2:
19251 case BFD_RELOC_ARM_V4BX:
19252 code = fixp->fx_r_type;
19253 break;
19255 case BFD_RELOC_ARM_TLS_GD32:
19256 case BFD_RELOC_ARM_TLS_IE32:
19257 case BFD_RELOC_ARM_TLS_LDM32:
19258 /* BFD will include the symbol's address in the addend.
19259 But we don't want that, so subtract it out again here. */
19260 if (!S_IS_COMMON (fixp->fx_addsy))
19261 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
19262 code = fixp->fx_r_type;
19263 break;
19264 #endif
19266 case BFD_RELOC_ARM_IMMEDIATE:
19267 as_bad_where (fixp->fx_file, fixp->fx_line,
19268 _("internal relocation (type: IMMEDIATE) not fixed up"));
19269 return NULL;
19271 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19272 as_bad_where (fixp->fx_file, fixp->fx_line,
19273 _("ADRL used for a symbol not defined in the same file"));
19274 return NULL;
19276 case BFD_RELOC_ARM_OFFSET_IMM:
19277 if (section->use_rela_p)
19279 code = fixp->fx_r_type;
19280 break;
19283 if (fixp->fx_addsy != NULL
19284 && !S_IS_DEFINED (fixp->fx_addsy)
19285 && S_IS_LOCAL (fixp->fx_addsy))
19287 as_bad_where (fixp->fx_file, fixp->fx_line,
19288 _("undefined local label `%s'"),
19289 S_GET_NAME (fixp->fx_addsy));
19290 return NULL;
19293 as_bad_where (fixp->fx_file, fixp->fx_line,
19294 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19295 return NULL;
19297 default:
19299 char * type;
19301 switch (fixp->fx_r_type)
19303 case BFD_RELOC_NONE: type = "NONE"; break;
19304 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19305 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19306 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19307 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19308 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19309 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19310 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19311 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19312 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19313 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19314 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19315 default: type = _("<unknown>"); break;
19317 as_bad_where (fixp->fx_file, fixp->fx_line,
19318 _("cannot represent %s relocation in this object file format"),
19319 type);
19320 return NULL;
19324 #ifdef OBJ_ELF
19325 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19326 && GOT_symbol
19327 && fixp->fx_addsy == GOT_symbol)
19329 code = BFD_RELOC_ARM_GOTPC;
19330 reloc->addend = fixp->fx_offset = reloc->address;
19332 #endif
19334 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19336 if (reloc->howto == NULL)
19338 as_bad_where (fixp->fx_file, fixp->fx_line,
19339 _("cannot represent %s relocation in this object file format"),
19340 bfd_get_reloc_code_name (code));
19341 return NULL;
19344 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19345 vtable entry to be used in the relocation's section offset. */
19346 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19347 reloc->address = fixp->fx_offset;
19349 return reloc;
19352 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19354 void
19355 cons_fix_new_arm (fragS * frag,
19356 int where,
19357 int size,
19358 expressionS * exp)
19360 bfd_reloc_code_real_type type;
19361 int pcrel = 0;
19363 /* Pick a reloc.
19364 FIXME: @@ Should look at CPU word size. */
19365 switch (size)
19367 case 1:
19368 type = BFD_RELOC_8;
19369 break;
19370 case 2:
19371 type = BFD_RELOC_16;
19372 break;
19373 case 4:
19374 default:
19375 type = BFD_RELOC_32;
19376 break;
19377 case 8:
19378 type = BFD_RELOC_64;
19379 break;
19382 #ifdef TE_PE
19383 if (exp->X_op == O_secrel)
19385 exp->X_op = O_symbol;
19386 type = BFD_RELOC_32_SECREL;
19388 #endif
19390 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19393 #if defined OBJ_COFF || defined OBJ_ELF
19394 void
19395 arm_validate_fix (fixS * fixP)
19397 /* If the destination of the branch is a defined symbol which does not have
19398 the THUMB_FUNC attribute, then we must be calling a function which has
19399 the (interfacearm) attribute. We look for the Thumb entry point to that
19400 function and change the branch to refer to that function instead. */
19401 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19402 && fixP->fx_addsy != NULL
19403 && S_IS_DEFINED (fixP->fx_addsy)
19404 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19406 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19409 #endif
19412 arm_force_relocation (struct fix * fixp)
19414 #if defined (OBJ_COFF) && defined (TE_PE)
19415 if (fixp->fx_r_type == BFD_RELOC_RVA)
19416 return 1;
19417 #endif
19419 /* Resolve these relocations even if the symbol is extern or weak. */
19420 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19421 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19422 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19423 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19424 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19425 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19426 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19427 return 0;
19429 /* Always leave these relocations for the linker. */
19430 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19431 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19432 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19433 return 1;
19435 /* Always generate relocations against function symbols. */
19436 if (fixp->fx_r_type == BFD_RELOC_32
19437 && fixp->fx_addsy
19438 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19439 return 1;
19441 return generic_force_reloc (fixp);
19444 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19445 /* Relocations against function names must be left unadjusted,
19446 so that the linker can use this information to generate interworking
19447 stubs. The MIPS version of this function
19448 also prevents relocations that are mips-16 specific, but I do not
19449 know why it does this.
19451 FIXME:
19452 There is one other problem that ought to be addressed here, but
19453 which currently is not: Taking the address of a label (rather
19454 than a function) and then later jumping to that address. Such
19455 addresses also ought to have their bottom bit set (assuming that
19456 they reside in Thumb code), but at the moment they will not. */
19458 bfd_boolean
19459 arm_fix_adjustable (fixS * fixP)
19461 if (fixP->fx_addsy == NULL)
19462 return 1;
19464 /* Preserve relocations against symbols with function type. */
19465 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19466 return 0;
19468 if (THUMB_IS_FUNC (fixP->fx_addsy)
19469 && fixP->fx_subsy == NULL)
19470 return 0;
19472 /* We need the symbol name for the VTABLE entries. */
19473 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19474 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19475 return 0;
19477 /* Don't allow symbols to be discarded on GOT related relocs. */
19478 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19479 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19480 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19481 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19482 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19483 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19484 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19485 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19486 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19487 return 0;
19489 /* Similarly for group relocations. */
19490 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19491 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19492 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19493 return 0;
19495 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
19496 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
19497 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
19498 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
19499 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
19500 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
19501 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
19502 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
19503 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
19504 return 0;
19506 return 1;
19508 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19510 #ifdef OBJ_ELF
19512 const char *
19513 elf32_arm_target_format (void)
19515 #ifdef TE_SYMBIAN
19516 return (target_big_endian
19517 ? "elf32-bigarm-symbian"
19518 : "elf32-littlearm-symbian");
19519 #elif defined (TE_VXWORKS)
19520 return (target_big_endian
19521 ? "elf32-bigarm-vxworks"
19522 : "elf32-littlearm-vxworks");
19523 #else
19524 if (target_big_endian)
19525 return "elf32-bigarm";
19526 else
19527 return "elf32-littlearm";
19528 #endif
19531 void
19532 armelf_frob_symbol (symbolS * symp,
19533 int * puntp)
19535 elf_frob_symbol (symp, puntp);
19537 #endif
19539 /* MD interface: Finalization. */
19541 /* A good place to do this, although this was probably not intended
19542 for this kind of use. We need to dump the literal pool before
19543 references are made to a null symbol pointer. */
19545 void
19546 arm_cleanup (void)
19548 literal_pool * pool;
19550 for (pool = list_of_pools; pool; pool = pool->next)
19552 /* Put it at the end of the relevant section. */
19553 subseg_set (pool->section, pool->sub_section);
19554 #ifdef OBJ_ELF
19555 arm_elf_change_section ();
19556 #endif
19557 s_ltorg (0);
19561 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19562 ARM ones. */
19564 void
19565 arm_adjust_symtab (void)
19567 #ifdef OBJ_COFF
19568 symbolS * sym;
19570 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19572 if (ARM_IS_THUMB (sym))
19574 if (THUMB_IS_FUNC (sym))
19576 /* Mark the symbol as a Thumb function. */
19577 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19578 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19579 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19581 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19582 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19583 else
19584 as_bad (_("%s: unexpected function type: %d"),
19585 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19587 else switch (S_GET_STORAGE_CLASS (sym))
19589 case C_EXT:
19590 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19591 break;
19592 case C_STAT:
19593 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19594 break;
19595 case C_LABEL:
19596 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19597 break;
19598 default:
19599 /* Do nothing. */
19600 break;
19604 if (ARM_IS_INTERWORK (sym))
19605 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19607 #endif
19608 #ifdef OBJ_ELF
19609 symbolS * sym;
19610 char bind;
19612 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19614 if (ARM_IS_THUMB (sym))
19616 elf_symbol_type * elf_sym;
19618 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19619 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19621 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19622 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19624 /* If it's a .thumb_func, declare it as so,
19625 otherwise tag label as .code 16. */
19626 if (THUMB_IS_FUNC (sym))
19627 elf_sym->internal_elf_sym.st_info =
19628 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19629 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19630 elf_sym->internal_elf_sym.st_info =
19631 ELF_ST_INFO (bind, STT_ARM_16BIT);
19635 #endif
19638 /* MD interface: Initialization. */
19640 static void
19641 set_constant_flonums (void)
19643 int i;
19645 for (i = 0; i < NUM_FLOAT_VALS; i++)
19646 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19647 abort ();
19650 /* Auto-select Thumb mode if it's the only available instruction set for the
19651 given architecture. */
19653 static void
19654 autoselect_thumb_from_cpu_variant (void)
19656 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19657 opcode_select (16);
19660 void
19661 md_begin (void)
19663 unsigned mach;
19664 unsigned int i;
19666 if ( (arm_ops_hsh = hash_new ()) == NULL
19667 || (arm_cond_hsh = hash_new ()) == NULL
19668 || (arm_shift_hsh = hash_new ()) == NULL
19669 || (arm_psr_hsh = hash_new ()) == NULL
19670 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19671 || (arm_reg_hsh = hash_new ()) == NULL
19672 || (arm_reloc_hsh = hash_new ()) == NULL
19673 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19674 as_fatal (_("virtual memory exhausted"));
19676 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19677 hash_insert (arm_ops_hsh, insns[i].template, (void *) (insns + i));
19678 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19679 hash_insert (arm_cond_hsh, conds[i].template, (void *) (conds + i));
19680 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19681 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
19682 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19683 hash_insert (arm_psr_hsh, psrs[i].template, (void *) (psrs + i));
19684 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19685 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (void *) (v7m_psrs + i));
19686 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19687 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
19688 for (i = 0;
19689 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19690 i++)
19691 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19692 (void *) (barrier_opt_names + i));
19693 #ifdef OBJ_ELF
19694 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19695 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
19696 #endif
19698 set_constant_flonums ();
19700 /* Set the cpu variant based on the command-line options. We prefer
19701 -mcpu= over -march= if both are set (as for GCC); and we prefer
19702 -mfpu= over any other way of setting the floating point unit.
19703 Use of legacy options with new options are faulted. */
19704 if (legacy_cpu)
19706 if (mcpu_cpu_opt || march_cpu_opt)
19707 as_bad (_("use of old and new-style options to set CPU type"));
19709 mcpu_cpu_opt = legacy_cpu;
19711 else if (!mcpu_cpu_opt)
19712 mcpu_cpu_opt = march_cpu_opt;
19714 if (legacy_fpu)
19716 if (mfpu_opt)
19717 as_bad (_("use of old and new-style options to set FPU type"));
19719 mfpu_opt = legacy_fpu;
19721 else if (!mfpu_opt)
19723 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19724 /* Some environments specify a default FPU. If they don't, infer it
19725 from the processor. */
19726 if (mcpu_fpu_opt)
19727 mfpu_opt = mcpu_fpu_opt;
19728 else
19729 mfpu_opt = march_fpu_opt;
19730 #else
19731 mfpu_opt = &fpu_default;
19732 #endif
19735 if (!mfpu_opt)
19737 if (mcpu_cpu_opt != NULL)
19738 mfpu_opt = &fpu_default;
19739 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19740 mfpu_opt = &fpu_arch_vfp_v2;
19741 else
19742 mfpu_opt = &fpu_arch_fpa;
19745 #ifdef CPU_DEFAULT
19746 if (!mcpu_cpu_opt)
19748 mcpu_cpu_opt = &cpu_default;
19749 selected_cpu = cpu_default;
19751 #else
19752 if (mcpu_cpu_opt)
19753 selected_cpu = *mcpu_cpu_opt;
19754 else
19755 mcpu_cpu_opt = &arm_arch_any;
19756 #endif
19758 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19760 autoselect_thumb_from_cpu_variant ();
19762 arm_arch_used = thumb_arch_used = arm_arch_none;
19764 #if defined OBJ_COFF || defined OBJ_ELF
19766 unsigned int flags = 0;
19768 #if defined OBJ_ELF
19769 flags = meabi_flags;
19771 switch (meabi_flags)
19773 case EF_ARM_EABI_UNKNOWN:
19774 #endif
19775 /* Set the flags in the private structure. */
19776 if (uses_apcs_26) flags |= F_APCS26;
19777 if (support_interwork) flags |= F_INTERWORK;
19778 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19779 if (pic_code) flags |= F_PIC;
19780 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19781 flags |= F_SOFT_FLOAT;
19783 switch (mfloat_abi_opt)
19785 case ARM_FLOAT_ABI_SOFT:
19786 case ARM_FLOAT_ABI_SOFTFP:
19787 flags |= F_SOFT_FLOAT;
19788 break;
19790 case ARM_FLOAT_ABI_HARD:
19791 if (flags & F_SOFT_FLOAT)
19792 as_bad (_("hard-float conflicts with specified fpu"));
19793 break;
19796 /* Using pure-endian doubles (even if soft-float). */
19797 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19798 flags |= F_VFP_FLOAT;
19800 #if defined OBJ_ELF
19801 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19802 flags |= EF_ARM_MAVERICK_FLOAT;
19803 break;
19805 case EF_ARM_EABI_VER4:
19806 case EF_ARM_EABI_VER5:
19807 /* No additional flags to set. */
19808 break;
19810 default:
19811 abort ();
19813 #endif
19814 bfd_set_private_flags (stdoutput, flags);
19816 /* We have run out flags in the COFF header to encode the
19817 status of ATPCS support, so instead we create a dummy,
19818 empty, debug section called .arm.atpcs. */
19819 if (atpcs)
19821 asection * sec;
19823 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19825 if (sec != NULL)
19827 bfd_set_section_flags
19828 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19829 bfd_set_section_size (stdoutput, sec, 0);
19830 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19834 #endif
19836 /* Record the CPU type as well. */
19837 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19838 mach = bfd_mach_arm_iWMMXt2;
19839 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19840 mach = bfd_mach_arm_iWMMXt;
19841 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19842 mach = bfd_mach_arm_XScale;
19843 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19844 mach = bfd_mach_arm_ep9312;
19845 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19846 mach = bfd_mach_arm_5TE;
19847 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19849 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19850 mach = bfd_mach_arm_5T;
19851 else
19852 mach = bfd_mach_arm_5;
19854 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19856 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19857 mach = bfd_mach_arm_4T;
19858 else
19859 mach = bfd_mach_arm_4;
19861 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19862 mach = bfd_mach_arm_3M;
19863 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19864 mach = bfd_mach_arm_3;
19865 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19866 mach = bfd_mach_arm_2a;
19867 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19868 mach = bfd_mach_arm_2;
19869 else
19870 mach = bfd_mach_arm_unknown;
19872 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19875 /* Command line processing. */
19877 /* md_parse_option
19878 Invocation line includes a switch not recognized by the base assembler.
19879 See if it's a processor-specific option.
19881 This routine is somewhat complicated by the need for backwards
19882 compatibility (since older releases of gcc can't be changed).
19883 The new options try to make the interface as compatible as
19884 possible with GCC.
19886 New options (supported) are:
19888 -mcpu=<cpu name> Assemble for selected processor
19889 -march=<architecture name> Assemble for selected architecture
19890 -mfpu=<fpu architecture> Assemble for selected FPU.
19891 -EB/-mbig-endian Big-endian
19892 -EL/-mlittle-endian Little-endian
19893 -k Generate PIC code
19894 -mthumb Start in Thumb mode
19895 -mthumb-interwork Code supports ARM/Thumb interworking
19897 For now we will also provide support for:
19899 -mapcs-32 32-bit Program counter
19900 -mapcs-26 26-bit Program counter
19901 -macps-float Floats passed in FP registers
19902 -mapcs-reentrant Reentrant code
19903 -matpcs
19904 (sometime these will probably be replaced with -mapcs=<list of options>
19905 and -matpcs=<list of options>)
19907 The remaining options are only supported for back-wards compatibility.
19908 Cpu variants, the arm part is optional:
19909 -m[arm]1 Currently not supported.
19910 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19911 -m[arm]3 Arm 3 processor
19912 -m[arm]6[xx], Arm 6 processors
19913 -m[arm]7[xx][t][[d]m] Arm 7 processors
19914 -m[arm]8[10] Arm 8 processors
19915 -m[arm]9[20][tdmi] Arm 9 processors
19916 -mstrongarm[110[0]] StrongARM processors
19917 -mxscale XScale processors
19918 -m[arm]v[2345[t[e]]] Arm architectures
19919 -mall All (except the ARM1)
19920 FP variants:
19921 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19922 -mfpe-old (No float load/store multiples)
19923 -mvfpxd VFP Single precision
19924 -mvfp All VFP
19925 -mno-fpu Disable all floating point instructions
19927 The following CPU names are recognized:
19928 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19929 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19930 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19931 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19932 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19933 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19934 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19938 const char * md_shortopts = "m:k";
19940 #ifdef ARM_BI_ENDIAN
19941 #define OPTION_EB (OPTION_MD_BASE + 0)
19942 #define OPTION_EL (OPTION_MD_BASE + 1)
19943 #else
19944 #if TARGET_BYTES_BIG_ENDIAN
19945 #define OPTION_EB (OPTION_MD_BASE + 0)
19946 #else
19947 #define OPTION_EL (OPTION_MD_BASE + 1)
19948 #endif
19949 #endif
19950 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
19952 struct option md_longopts[] =
19954 #ifdef OPTION_EB
19955 {"EB", no_argument, NULL, OPTION_EB},
19956 #endif
19957 #ifdef OPTION_EL
19958 {"EL", no_argument, NULL, OPTION_EL},
19959 #endif
19960 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
19961 {NULL, no_argument, NULL, 0}
19964 size_t md_longopts_size = sizeof (md_longopts);
19966 struct arm_option_table
19968 char *option; /* Option name to match. */
19969 char *help; /* Help information. */
19970 int *var; /* Variable to change. */
19971 int value; /* What to change it to. */
19972 char *deprecated; /* If non-null, print this message. */
19975 struct arm_option_table arm_opts[] =
19977 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19978 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19979 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19980 &support_interwork, 1, NULL},
19981 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19982 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19983 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19984 1, NULL},
19985 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19986 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19987 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19988 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19989 NULL},
19991 /* These are recognized by the assembler, but have no affect on code. */
19992 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19993 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19994 {NULL, NULL, NULL, 0, NULL}
19997 struct arm_legacy_option_table
19999 char *option; /* Option name to match. */
20000 const arm_feature_set **var; /* Variable to change. */
20001 const arm_feature_set value; /* What to change it to. */
20002 char *deprecated; /* If non-null, print this message. */
20005 const struct arm_legacy_option_table arm_legacy_opts[] =
20007 /* DON'T add any new processors to this list -- we want the whole list
20008 to go away... Add them to the processors table instead. */
20009 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20010 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20011 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20012 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20013 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20014 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20015 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20016 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20017 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20018 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20019 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20020 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20021 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20022 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20023 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20024 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20025 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20026 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20027 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20028 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20029 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20030 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20031 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20032 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20033 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20034 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20035 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20036 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20037 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20038 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20039 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20040 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20041 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20042 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20043 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20044 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20045 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20046 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20047 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20048 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20049 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20050 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20051 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20052 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20053 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20054 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20055 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20056 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20057 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20058 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20059 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20060 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20061 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20062 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20063 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20064 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20065 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20066 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20067 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20068 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20069 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20070 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20071 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20072 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20073 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20074 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20075 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20076 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20077 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
20078 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
20079 N_("use -mcpu=strongarm110")},
20080 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
20081 N_("use -mcpu=strongarm1100")},
20082 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
20083 N_("use -mcpu=strongarm1110")},
20084 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
20085 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
20086 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
20088 /* Architecture variants -- don't add any more to this list either. */
20089 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20090 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20091 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20092 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20093 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20094 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20095 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20096 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20097 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20098 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20099 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20100 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20101 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20102 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20103 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20104 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20105 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20106 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20108 /* Floating point variants -- don't add any more to this list either. */
20109 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
20110 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
20111 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
20112 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
20113 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
20115 {NULL, NULL, ARM_ARCH_NONE, NULL}
20118 struct arm_cpu_option_table
20120 char *name;
20121 const arm_feature_set value;
20122 /* For some CPUs we assume an FPU unless the user explicitly sets
20123 -mfpu=... */
20124 const arm_feature_set default_fpu;
20125 /* The canonical name of the CPU, or NULL to use NAME converted to upper
20126 case. */
20127 const char *canonical_name;
20130 /* This list should, at a minimum, contain all the cpu names
20131 recognized by GCC. */
20132 static const struct arm_cpu_option_table arm_cpus[] =
20134 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
20135 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
20136 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
20137 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20138 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20139 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20140 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20141 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20142 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20143 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20144 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20145 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20146 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20147 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20148 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20149 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20150 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20151 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20152 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20153 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20154 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20155 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20156 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20157 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20158 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20159 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20160 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20161 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20162 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20163 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20164 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20165 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20166 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20167 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20168 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20169 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20170 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20171 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20172 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20173 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
20174 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20175 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20176 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20177 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20178 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20179 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20180 /* For V5 or later processors we default to using VFP; but the user
20181 should really set the FPU type explicitly. */
20182 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20183 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20184 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20185 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20186 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20187 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20188 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
20189 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20190 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20191 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
20192 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20193 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20194 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20195 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20196 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20197 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
20198 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20199 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20200 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20201 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
20202 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20203 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
20204 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20205 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
20206 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
20207 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
20208 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
20209 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
20210 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
20211 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
20212 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
20213 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
20214 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
20215 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20216 | FPU_NEON_EXT_V1),
20217 NULL},
20218 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20219 | FPU_NEON_EXT_V1),
20220 NULL},
20221 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
20222 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
20223 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
20224 /* ??? XSCALE is really an architecture. */
20225 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20226 /* ??? iwmmxt is not a processor. */
20227 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
20228 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
20229 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20230 /* Maverick */
20231 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
20232 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
20235 struct arm_arch_option_table
20237 char *name;
20238 const arm_feature_set value;
20239 const arm_feature_set default_fpu;
20242 /* This list should, at a minimum, contain all the architecture names
20243 recognized by GCC. */
20244 static const struct arm_arch_option_table arm_archs[] =
20246 {"all", ARM_ANY, FPU_ARCH_FPA},
20247 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
20248 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
20249 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
20250 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
20251 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
20252 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
20253 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
20254 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
20255 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
20256 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
20257 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
20258 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
20259 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
20260 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
20261 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
20262 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
20263 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
20264 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
20265 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
20266 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
20267 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
20268 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
20269 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
20270 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
20271 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
20272 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
20273 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
20274 /* The official spelling of the ARMv7 profile variants is the dashed form.
20275 Accept the non-dashed form for compatibility with old toolchains. */
20276 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20277 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20278 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20279 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20280 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20281 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20282 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
20283 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
20284 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
20285 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
20288 /* ISA extensions in the co-processor space. */
20289 struct arm_option_cpu_value_table
20291 char *name;
20292 const arm_feature_set value;
20295 static const struct arm_option_cpu_value_table arm_extensions[] =
20297 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20298 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20299 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20300 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20301 {NULL, ARM_ARCH_NONE}
20304 /* This list should, at a minimum, contain all the fpu names
20305 recognized by GCC. */
20306 static const struct arm_option_cpu_value_table arm_fpus[] =
20308 {"softfpa", FPU_NONE},
20309 {"fpe", FPU_ARCH_FPE},
20310 {"fpe2", FPU_ARCH_FPE},
20311 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20312 {"fpa", FPU_ARCH_FPA},
20313 {"fpa10", FPU_ARCH_FPA},
20314 {"fpa11", FPU_ARCH_FPA},
20315 {"arm7500fe", FPU_ARCH_FPA},
20316 {"softvfp", FPU_ARCH_VFP},
20317 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20318 {"vfp", FPU_ARCH_VFP_V2},
20319 {"vfp9", FPU_ARCH_VFP_V2},
20320 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
20321 {"vfp10", FPU_ARCH_VFP_V2},
20322 {"vfp10-r0", FPU_ARCH_VFP_V1},
20323 {"vfpxd", FPU_ARCH_VFP_V1xD},
20324 {"vfpv2", FPU_ARCH_VFP_V2},
20325 {"vfpv3", FPU_ARCH_VFP_V3},
20326 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
20327 {"arm1020t", FPU_ARCH_VFP_V1},
20328 {"arm1020e", FPU_ARCH_VFP_V2},
20329 {"arm1136jfs", FPU_ARCH_VFP_V2},
20330 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20331 {"maverick", FPU_ARCH_MAVERICK},
20332 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20333 {"neon-fp16", FPU_ARCH_NEON_FP16},
20334 {NULL, ARM_ARCH_NONE}
20337 struct arm_option_value_table
20339 char *name;
20340 long value;
20343 static const struct arm_option_value_table arm_float_abis[] =
20345 {"hard", ARM_FLOAT_ABI_HARD},
20346 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20347 {"soft", ARM_FLOAT_ABI_SOFT},
20348 {NULL, 0}
20351 #ifdef OBJ_ELF
20352 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20353 static const struct arm_option_value_table arm_eabis[] =
20355 {"gnu", EF_ARM_EABI_UNKNOWN},
20356 {"4", EF_ARM_EABI_VER4},
20357 {"5", EF_ARM_EABI_VER5},
20358 {NULL, 0}
20360 #endif
20362 struct arm_long_option_table
20364 char * option; /* Substring to match. */
20365 char * help; /* Help information. */
20366 int (* func) (char * subopt); /* Function to decode sub-option. */
20367 char * deprecated; /* If non-null, print this message. */
20370 static int
20371 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20373 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20375 /* Copy the feature set, so that we can modify it. */
20376 *ext_set = **opt_p;
20377 *opt_p = ext_set;
20379 while (str != NULL && *str != 0)
20381 const struct arm_option_cpu_value_table * opt;
20382 char * ext;
20383 int optlen;
20385 if (*str != '+')
20387 as_bad (_("invalid architectural extension"));
20388 return 0;
20391 str++;
20392 ext = strchr (str, '+');
20394 if (ext != NULL)
20395 optlen = ext - str;
20396 else
20397 optlen = strlen (str);
20399 if (optlen == 0)
20401 as_bad (_("missing architectural extension"));
20402 return 0;
20405 for (opt = arm_extensions; opt->name != NULL; opt++)
20406 if (strncmp (opt->name, str, optlen) == 0)
20408 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20409 break;
20412 if (opt->name == NULL)
20414 as_bad (_("unknown architectural extension `%s'"), str);
20415 return 0;
20418 str = ext;
20421 return 1;
20424 static int
20425 arm_parse_cpu (char * str)
20427 const struct arm_cpu_option_table * opt;
20428 char * ext = strchr (str, '+');
20429 int optlen;
20431 if (ext != NULL)
20432 optlen = ext - str;
20433 else
20434 optlen = strlen (str);
20436 if (optlen == 0)
20438 as_bad (_("missing cpu name `%s'"), str);
20439 return 0;
20442 for (opt = arm_cpus; opt->name != NULL; opt++)
20443 if (strncmp (opt->name, str, optlen) == 0)
20445 mcpu_cpu_opt = &opt->value;
20446 mcpu_fpu_opt = &opt->default_fpu;
20447 if (opt->canonical_name)
20448 strcpy (selected_cpu_name, opt->canonical_name);
20449 else
20451 int i;
20452 for (i = 0; i < optlen; i++)
20453 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20454 selected_cpu_name[i] = 0;
20457 if (ext != NULL)
20458 return arm_parse_extension (ext, &mcpu_cpu_opt);
20460 return 1;
20463 as_bad (_("unknown cpu `%s'"), str);
20464 return 0;
20467 static int
20468 arm_parse_arch (char * str)
20470 const struct arm_arch_option_table *opt;
20471 char *ext = strchr (str, '+');
20472 int optlen;
20474 if (ext != NULL)
20475 optlen = ext - str;
20476 else
20477 optlen = strlen (str);
20479 if (optlen == 0)
20481 as_bad (_("missing architecture name `%s'"), str);
20482 return 0;
20485 for (opt = arm_archs; opt->name != NULL; opt++)
20486 if (streq (opt->name, str))
20488 march_cpu_opt = &opt->value;
20489 march_fpu_opt = &opt->default_fpu;
20490 strcpy (selected_cpu_name, opt->name);
20492 if (ext != NULL)
20493 return arm_parse_extension (ext, &march_cpu_opt);
20495 return 1;
20498 as_bad (_("unknown architecture `%s'\n"), str);
20499 return 0;
20502 static int
20503 arm_parse_fpu (char * str)
20505 const struct arm_option_cpu_value_table * opt;
20507 for (opt = arm_fpus; opt->name != NULL; opt++)
20508 if (streq (opt->name, str))
20510 mfpu_opt = &opt->value;
20511 return 1;
20514 as_bad (_("unknown floating point format `%s'\n"), str);
20515 return 0;
20518 static int
20519 arm_parse_float_abi (char * str)
20521 const struct arm_option_value_table * opt;
20523 for (opt = arm_float_abis; opt->name != NULL; opt++)
20524 if (streq (opt->name, str))
20526 mfloat_abi_opt = opt->value;
20527 return 1;
20530 as_bad (_("unknown floating point abi `%s'\n"), str);
20531 return 0;
20534 #ifdef OBJ_ELF
20535 static int
20536 arm_parse_eabi (char * str)
20538 const struct arm_option_value_table *opt;
20540 for (opt = arm_eabis; opt->name != NULL; opt++)
20541 if (streq (opt->name, str))
20543 meabi_flags = opt->value;
20544 return 1;
20546 as_bad (_("unknown EABI `%s'\n"), str);
20547 return 0;
20549 #endif
20551 struct arm_long_option_table arm_long_opts[] =
20553 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20554 arm_parse_cpu, NULL},
20555 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20556 arm_parse_arch, NULL},
20557 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20558 arm_parse_fpu, NULL},
20559 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20560 arm_parse_float_abi, NULL},
20561 #ifdef OBJ_ELF
20562 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
20563 arm_parse_eabi, NULL},
20564 #endif
20565 {NULL, NULL, 0, NULL}
20569 md_parse_option (int c, char * arg)
20571 struct arm_option_table *opt;
20572 const struct arm_legacy_option_table *fopt;
20573 struct arm_long_option_table *lopt;
20575 switch (c)
20577 #ifdef OPTION_EB
20578 case OPTION_EB:
20579 target_big_endian = 1;
20580 break;
20581 #endif
20583 #ifdef OPTION_EL
20584 case OPTION_EL:
20585 target_big_endian = 0;
20586 break;
20587 #endif
20589 case OPTION_FIX_V4BX:
20590 fix_v4bx = TRUE;
20591 break;
20593 case 'a':
20594 /* Listing option. Just ignore these, we don't support additional
20595 ones. */
20596 return 0;
20598 default:
20599 for (opt = arm_opts; opt->option != NULL; opt++)
20601 if (c == opt->option[0]
20602 && ((arg == NULL && opt->option[1] == 0)
20603 || streq (arg, opt->option + 1)))
20605 #if WARN_DEPRECATED
20606 /* If the option is deprecated, tell the user. */
20607 if (opt->deprecated != NULL)
20608 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20609 arg ? arg : "", _(opt->deprecated));
20610 #endif
20612 if (opt->var != NULL)
20613 *opt->var = opt->value;
20615 return 1;
20619 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20621 if (c == fopt->option[0]
20622 && ((arg == NULL && fopt->option[1] == 0)
20623 || streq (arg, fopt->option + 1)))
20625 #if WARN_DEPRECATED
20626 /* If the option is deprecated, tell the user. */
20627 if (fopt->deprecated != NULL)
20628 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20629 arg ? arg : "", _(fopt->deprecated));
20630 #endif
20632 if (fopt->var != NULL)
20633 *fopt->var = &fopt->value;
20635 return 1;
20639 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20641 /* These options are expected to have an argument. */
20642 if (c == lopt->option[0]
20643 && arg != NULL
20644 && strncmp (arg, lopt->option + 1,
20645 strlen (lopt->option + 1)) == 0)
20647 #if WARN_DEPRECATED
20648 /* If the option is deprecated, tell the user. */
20649 if (lopt->deprecated != NULL)
20650 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20651 _(lopt->deprecated));
20652 #endif
20654 /* Call the sup-option parser. */
20655 return lopt->func (arg + strlen (lopt->option) - 1);
20659 return 0;
20662 return 1;
20665 void
20666 md_show_usage (FILE * fp)
20668 struct arm_option_table *opt;
20669 struct arm_long_option_table *lopt;
20671 fprintf (fp, _(" ARM-specific assembler options:\n"));
20673 for (opt = arm_opts; opt->option != NULL; opt++)
20674 if (opt->help != NULL)
20675 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20677 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20678 if (lopt->help != NULL)
20679 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20681 #ifdef OPTION_EB
20682 fprintf (fp, _("\
20683 -EB assemble code for a big-endian cpu\n"));
20684 #endif
20686 #ifdef OPTION_EL
20687 fprintf (fp, _("\
20688 -EL assemble code for a little-endian cpu\n"));
20689 #endif
20691 fprintf (fp, _("\
20692 --fix-v4bx Allow BX in ARMv4 code\n"));
20696 #ifdef OBJ_ELF
20697 typedef struct
20699 int val;
20700 arm_feature_set flags;
20701 } cpu_arch_ver_table;
20703 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20704 least features first. */
20705 static const cpu_arch_ver_table cpu_arch_ver[] =
20707 {1, ARM_ARCH_V4},
20708 {2, ARM_ARCH_V4T},
20709 {3, ARM_ARCH_V5},
20710 {4, ARM_ARCH_V5TE},
20711 {5, ARM_ARCH_V5TEJ},
20712 {6, ARM_ARCH_V6},
20713 {7, ARM_ARCH_V6Z},
20714 {9, ARM_ARCH_V6K},
20715 {9, ARM_ARCH_V6M},
20716 {8, ARM_ARCH_V6T2},
20717 {10, ARM_ARCH_V7A},
20718 {10, ARM_ARCH_V7R},
20719 {10, ARM_ARCH_V7M},
20720 {0, ARM_ARCH_NONE}
20723 /* Set the public EABI object attributes. */
20724 static void
20725 aeabi_set_public_attributes (void)
20727 int arch;
20728 arm_feature_set flags;
20729 arm_feature_set tmp;
20730 const cpu_arch_ver_table *p;
20732 /* Choose the architecture based on the capabilities of the requested cpu
20733 (if any) and/or the instructions actually used. */
20734 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20735 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20736 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20737 /*Allow the user to override the reported architecture. */
20738 if (object_arch)
20740 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20741 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20744 tmp = flags;
20745 arch = 0;
20746 for (p = cpu_arch_ver; p->val; p++)
20748 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20750 arch = p->val;
20751 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20755 /* Tag_CPU_name. */
20756 if (selected_cpu_name[0])
20758 char *p;
20760 p = selected_cpu_name;
20761 if (strncmp (p, "armv", 4) == 0)
20763 int i;
20765 p += 4;
20766 for (i = 0; p[i]; i++)
20767 p[i] = TOUPPER (p[i]);
20769 bfd_elf_add_proc_attr_string (stdoutput, 5, p);
20771 /* Tag_CPU_arch. */
20772 bfd_elf_add_proc_attr_int (stdoutput, 6, arch);
20773 /* Tag_CPU_arch_profile. */
20774 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20775 bfd_elf_add_proc_attr_int (stdoutput, 7, 'A');
20776 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20777 bfd_elf_add_proc_attr_int (stdoutput, 7, 'R');
20778 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
20779 bfd_elf_add_proc_attr_int (stdoutput, 7, 'M');
20780 /* Tag_ARM_ISA_use. */
20781 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20782 bfd_elf_add_proc_attr_int (stdoutput, 8, 1);
20783 /* Tag_THUMB_ISA_use. */
20784 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20785 bfd_elf_add_proc_attr_int (stdoutput, 9,
20786 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20787 /* Tag_VFP_arch. */
20788 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_d32)
20789 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_d32))
20790 bfd_elf_add_proc_attr_int (stdoutput, 10, 4);
20791 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20792 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20793 bfd_elf_add_proc_attr_int (stdoutput, 10, 3);
20794 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20795 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20796 bfd_elf_add_proc_attr_int (stdoutput, 10, 2);
20797 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20798 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20799 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20800 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20801 bfd_elf_add_proc_attr_int (stdoutput, 10, 1);
20802 /* Tag_WMMX_arch. */
20803 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20804 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20805 bfd_elf_add_proc_attr_int (stdoutput, 11, 1);
20806 /* Tag_NEON_arch. */
20807 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
20808 bfd_elf_add_proc_attr_int (stdoutput, 12, 1);
20809 /* Tag_NEON_FP16_arch. */
20810 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_fp16))
20811 bfd_elf_add_proc_attr_int (stdoutput, 36, 1);
20814 /* Add the default contents for the .ARM.attributes section. */
20815 void
20816 arm_md_end (void)
20818 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20819 return;
20821 aeabi_set_public_attributes ();
20823 #endif /* OBJ_ELF */
20826 /* Parse a .cpu directive. */
20828 static void
20829 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20831 const struct arm_cpu_option_table *opt;
20832 char *name;
20833 char saved_char;
20835 name = input_line_pointer;
20836 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
20837 input_line_pointer++;
20838 saved_char = *input_line_pointer;
20839 *input_line_pointer = 0;
20841 /* Skip the first "all" entry. */
20842 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20843 if (streq (opt->name, name))
20845 mcpu_cpu_opt = &opt->value;
20846 selected_cpu = opt->value;
20847 if (opt->canonical_name)
20848 strcpy (selected_cpu_name, opt->canonical_name);
20849 else
20851 int i;
20852 for (i = 0; opt->name[i]; i++)
20853 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20854 selected_cpu_name[i] = 0;
20856 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20857 *input_line_pointer = saved_char;
20858 demand_empty_rest_of_line ();
20859 return;
20861 as_bad (_("unknown cpu `%s'"), name);
20862 *input_line_pointer = saved_char;
20863 ignore_rest_of_line ();
20867 /* Parse a .arch directive. */
20869 static void
20870 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20872 const struct arm_arch_option_table *opt;
20873 char saved_char;
20874 char *name;
20876 name = input_line_pointer;
20877 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
20878 input_line_pointer++;
20879 saved_char = *input_line_pointer;
20880 *input_line_pointer = 0;
20882 /* Skip the first "all" entry. */
20883 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20884 if (streq (opt->name, name))
20886 mcpu_cpu_opt = &opt->value;
20887 selected_cpu = opt->value;
20888 strcpy (selected_cpu_name, opt->name);
20889 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20890 *input_line_pointer = saved_char;
20891 demand_empty_rest_of_line ();
20892 return;
20895 as_bad (_("unknown architecture `%s'\n"), name);
20896 *input_line_pointer = saved_char;
20897 ignore_rest_of_line ();
20901 /* Parse a .object_arch directive. */
20903 static void
20904 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20906 const struct arm_arch_option_table *opt;
20907 char saved_char;
20908 char *name;
20910 name = input_line_pointer;
20911 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
20912 input_line_pointer++;
20913 saved_char = *input_line_pointer;
20914 *input_line_pointer = 0;
20916 /* Skip the first "all" entry. */
20917 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20918 if (streq (opt->name, name))
20920 object_arch = &opt->value;
20921 *input_line_pointer = saved_char;
20922 demand_empty_rest_of_line ();
20923 return;
20926 as_bad (_("unknown architecture `%s'\n"), name);
20927 *input_line_pointer = saved_char;
20928 ignore_rest_of_line ();
20932 /* Parse a .fpu directive. */
20934 static void
20935 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20937 const struct arm_option_cpu_value_table *opt;
20938 char saved_char;
20939 char *name;
20941 name = input_line_pointer;
20942 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
20943 input_line_pointer++;
20944 saved_char = *input_line_pointer;
20945 *input_line_pointer = 0;
20947 for (opt = arm_fpus; opt->name != NULL; opt++)
20948 if (streq (opt->name, name))
20950 mfpu_opt = &opt->value;
20951 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20952 *input_line_pointer = saved_char;
20953 demand_empty_rest_of_line ();
20954 return;
20957 as_bad (_("unknown floating point format `%s'\n"), name);
20958 *input_line_pointer = saved_char;
20959 ignore_rest_of_line ();
20962 /* Copy symbol information. */
20963 void
20964 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
20966 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);