4 #include <mach-o/arm/reloc.h>
12 #include "atof-ieee.h"
13 #include "input-scrub.h"
15 #include "dwarf2dbg.h"
17 #include "opcode/arm.h"
19 #define ISALNUM(xXx) (isalnum(xXx))
22 * These are the default cputype and cpusubtype for the arm architecture.
24 const cpu_type_t md_cputype
= CPU_TYPE_ARM
;
25 cpu_subtype_t md_cpusubtype
= CPU_SUBTYPE_ARM_V4T
;
27 /* This is the byte sex for the arm architecture */
28 const enum byte_sex md_target_byte_sex
= LITTLE_ENDIAN_BYTE_SEX
;
30 /* These characters start a comment anywhere on the line */
31 const char md_comment_chars
[] = "@";
33 /* These characters only start a comment at the beginning of a line */
34 const char md_line_comment_chars
[] = "#";
37 * These characters can be used to separate mantissa decimal digits from
38 * exponent decimal digits in floating point numbers.
40 const char md_EXP_CHARS
[] = "eE";
43 * The characters after a leading 0 that means this number is a floating point
44 * constant as in 0f123.456 or 0d1.234E-12 (see md_EXP_CHARS above).
46 const char md_FLT_CHARS
[] = "dDfF";
48 /* HACK here to forward declare this md_* routine, only in the ARM assembler */
49 symbolS
*md_undefined_symbol (char * name ATTRIBUTE_UNUSED
);
51 /* HACKS for bfd_* and BFD_RELOC_* These would come from bfd/reloc.c */
52 typedef int bfd_reloc_code_real_type
;
54 #define BFD_RELOC_UNUSED 0
56 BFD_RELOC_ARM_IMMEDIATE
= NO_RELOC
+1,
57 BFD_RELOC_ARM_ADRL_IMMEDIATE
,
58 BFD_RELOC_ARM_OFFSET_IMM
,
59 BFD_RELOC_ARM_SHIFT_IMM
,
63 BFD_RELOC_ARM_LITERAL
,
64 BFD_RELOC_ARM_OFFSET_IMM8
,
65 BFD_RELOC_ARM_HWLITERAL
,
66 BFD_RELOC_ARM_THUMB_ADD
,
67 BFD_RELOC_ARM_THUMB_IMM
,
68 BFD_RELOC_ARM_THUMB_SHIFT
,
69 BFD_RELOC_ARM_THUMB_OFFSET
,
70 BFD_RELOC_THUMB_PCREL_BRANCH9
,
71 BFD_RELOC_THUMB_PCREL_BRANCH12
,
72 BFD_RELOC_THUMB_PCREL_BLX
,
73 BFD_RELOC_ARM_PCREL_BLX
,
74 BFD_RELOC_ARM_CP_OFF_IMM
,
75 BFD_RELOC_ARM_CP_OFF_IMM_S2
,
76 BFD_RELOC_ARM_ALU_PC_G0
,
77 BFD_RELOC_ARM_ALU_PC_G0_NC
,
78 BFD_RELOC_ARM_ALU_PC_G1
,
79 BFD_RELOC_ARM_ALU_PC_G1_NC
,
80 BFD_RELOC_ARM_ALU_PC_G2
,
81 BFD_RELOC_ARM_ALU_SB_G0
,
82 BFD_RELOC_ARM_ALU_SB_G0_NC
,
83 BFD_RELOC_ARM_ALU_SB_G1
,
84 BFD_RELOC_ARM_ALU_SB_G1_NC
,
85 BFD_RELOC_ARM_ALU_SB_G2
,
86 BFD_RELOC_ARM_LDC_PC_G0
,
87 BFD_RELOC_ARM_LDC_PC_G1
,
88 BFD_RELOC_ARM_LDC_PC_G2
,
89 BFD_RELOC_ARM_LDC_SB_G0
,
90 BFD_RELOC_ARM_LDC_SB_G1
,
91 BFD_RELOC_ARM_LDC_SB_G2
,
92 BFD_RELOC_ARM_LDRS_PC_G0
,
93 BFD_RELOC_ARM_LDRS_PC_G1
,
94 BFD_RELOC_ARM_LDRS_PC_G2
,
95 BFD_RELOC_ARM_LDRS_SB_G0
,
96 BFD_RELOC_ARM_LDRS_SB_G1
,
97 BFD_RELOC_ARM_LDRS_SB_G2
,
98 BFD_RELOC_ARM_LDR_PC_G0
,
99 BFD_RELOC_ARM_LDR_PC_G1
,
100 BFD_RELOC_ARM_LDR_PC_G2
,
101 BFD_RELOC_ARM_LDR_SB_G0
,
102 BFD_RELOC_ARM_LDR_SB_G1
,
103 BFD_RELOC_ARM_LDR_SB_G2
,
106 BFD_RELOC_ARM_T32_CP_OFF_IMM
,
109 BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
,
110 BFD_RELOC_ARM_T32_OFFSET_IMM
,
111 BFD_RELOC_ARM_T32_IMM12
,
112 BFD_RELOC_ARM_T32_IMMEDIATE
,
113 BFD_RELOC_ARM_T32_ADD_IMM
,
114 BFD_RELOC_ARM_T32_ADD_PC12
,
115 BFD_RELOC_THUMB_PCREL_BRANCH25
,
116 BFD_RELOC_THUMB_PCREL_BRANCH7
,
117 BFD_RELOC_ARM_T32_OFFSET_U8
,
118 BFD_RELOC_ARM_THUMB_MOVW
,
119 BFD_RELOC_ARM_THUMB_MOVT
,
120 BFD_RELOC_ARM_PCREL_CALL
,
121 BFD_RELOC_ARM_PCREL_JUMP
,
126 BFD_RELOC_ARM_TARGET1
,
127 BFD_RELOC_ARM_ROSEGREL32
,
128 BFD_RELOC_ARM_SBREL32
,
130 BFD_RELOC_THUMB_PCREL_BRANCH20
,
131 BFD_RELOC_THUMB_PCREL_BRANCH23
= ARM_THUMB_RELOC_BR22
,
132 BFD_RELOC_ARM_PCREL_BRANCH
= ARM_RELOC_BR24
135 /* HACKS for the change in gas/expr.h to change from X_seg to X_op (expr type)*/
137 #define X_op_symbol X_add_symbol
139 /* An illegal expression. */
140 O_illegal
= SEG_NONE
,
143 /* HACKS for as_tsktsk() warning routine */
144 #define as_tsktsk as_warn
146 /* STUFF FROM gas/asintl.h */
147 # define _(String) (String)
148 # define N_(String) (String)
150 /* STUFF FROM gas/as.h */
152 COMMON subsegT now_subseg
;
154 /* STUFF FROM gas/config/tc-arm.h */
155 #define ARM_FLAG_THUMB (1 << 0) /* The symbol is a Thumb symbol rather than an Arm symbol. */
156 #define ARM_FLAG_INTERWORK (1 << 1) /* The symbol is attached to code that supports interworking. */
157 #define THUMB_FLAG_FUNC (1 << 2) /* The symbol is attached to the start of a Thumb function. */
159 #define ARM_GET_FLAG(s) (*symbol_get_tc (s))
160 #define ARM_SET_FLAG(s,v) (*symbol_get_tc (s) |= (v))
161 #define ARM_RESET_FLAG(s,v) (*symbol_get_tc (s) &= ~(v))
163 #define ARM_IS_THUMB(s) (ARM_GET_FLAG (s) & ARM_FLAG_THUMB)
164 #define ARM_IS_INTERWORK(s) (ARM_GET_FLAG (s) & ARM_FLAG_INTERWORK)
165 #define THUMB_IS_FUNC(s) (ARM_GET_FLAG (s) & THUMB_FLAG_FUNC)
167 #define ARM_SET_THUMB(s,t) ((t) ? ARM_SET_FLAG (s, ARM_FLAG_THUMB) : ARM_RESET_FLAG (s, ARM_FLAG_THUMB))
168 #define ARM_SET_INTERWORK(s,t) ((t) ? ARM_SET_FLAG (s, ARM_FLAG_INTERWORK) : ARM_RESET_FLAG (s, ARM_FLAG_INTERWORK))
169 #define THUMB_SET_FUNC(s,t) ((t) ? ARM_SET_FLAG (s, THUMB_FLAG_FUNC) : ARM_RESET_FLAG (s, THUMB_FLAG_FUNC))
171 /* STUFF FROM gas/config/tc-arm.c */
172 /* line 30 is #include "safe-ctype.h" to avoid this these HACKS are used */
174 #define ISALPHA(c) isalpha(c)
175 #define ISDIGIT(c) isdigit(c)
176 #define TOUPPER(c) toupper(c)
177 #define TOLOWER(c) tolower(c)
179 #define streq(a, b) (strcmp (a, b) == 0)
181 /* On darwin, default to arm 920 for now. */
182 static arm_feature_set cpu_variant
/* HACK */ = ARM_FEATURE (ARM_AEXT_V4T
, FPU_FPA
);
183 static arm_feature_set arm_arch_used
;
184 static arm_feature_set thumb_arch_used
;
186 /* Constants for known architecture features. */
187 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
188 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
189 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
190 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
191 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
192 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
193 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
194 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
196 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
197 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
198 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
199 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
200 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
201 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
202 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
203 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
204 static const arm_feature_set arm_ext_v4t_5
=
205 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
206 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
207 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
208 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
209 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
210 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
211 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
212 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
213 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
214 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
215 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
216 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
217 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
218 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
219 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
221 static const arm_feature_set arm_arch_any
= ARM_ANY
;
222 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
223 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
224 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
226 static const arm_feature_set arm_cext_iwmmxt2
=
227 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
228 static const arm_feature_set arm_cext_iwmmxt
=
229 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
230 static const arm_feature_set arm_cext_xscale
=
231 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
232 static const arm_feature_set arm_cext_maverick
=
233 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
234 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
235 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
236 static const arm_feature_set fpu_vfp_ext_v1xd
=
237 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
238 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
239 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
240 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
241 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
242 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
243 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
245 /* Prefix characters that indicate the start of an immediate
247 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
249 /* 0: assemble for ARM,
250 1: assemble for Thumb,
251 2: assemble for Thumb even though target CPU does not support thumb
255 /* If unified_syntax is true, we are processing the new unified
256 ARM/Thumb syntax. Important differences from the old ARM mode:
258 - Immediate operands do not require a # prefix.
259 - Conditional affixes always appear at the end of the
260 instruction. (For backward compatibility, those instructions
261 that formerly had them in the middle, continue to accept them
263 - The IT instruction may appear, and if it does is validated
264 against subsequent conditional affixes. It does not generate
267 Important differences from the old Thumb mode:
269 - Immediate operands do not require a # prefix.
270 - Most of the V6T2 instructions are only available in unified mode.
271 - The .N and .W suffixes are recognized and honored (it is an error
272 if they cannot be honored).
273 - All instructions set the flags if and only if they have an 's' affix.
274 - Conditional affixes may be used. They are validated against
275 preceding IT instructions. Unlike ARM mode, you cannot use a
276 conditional affix except in the scope of an IT instruction. */
278 static bfd_boolean unified_syntax
= FALSE
;
293 enum neon_el_type type
;
297 #define NEON_MAX_TYPE_ELS 4
301 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
308 uint32_t instruction
;
312 /* "uncond_value" is set to the value in place of the conditional field in
313 unconditional versions of the instruction, or -1 if nothing is
316 struct neon_type vectype
;
317 /* Set to the opcode if the instruction needs relaxation.
318 Zero if the instruction is not relaxed. */
322 bfd_reloc_code_real_type type
;
325 /* HACK_GUESS, force relocation entry to support scatteed loading */
333 struct neon_type_el vectype
;
334 unsigned present
: 1; /* Operand present. */
335 unsigned isreg
: 1; /* Operand was a register. */
336 unsigned immisreg
: 1; /* .imm field is a second register. */
337 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
338 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
339 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
340 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
341 instructions. This allows us to disambiguate ARM <-> vector insns. */
342 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
343 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
344 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
345 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
346 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
347 unsigned writeback
: 1; /* Operand has trailing ! */
348 unsigned preind
: 1; /* Preindexed address. */
349 unsigned postind
: 1; /* Postindexed address. */
350 unsigned negative
: 1; /* Index register was negated. */
351 unsigned shifted
: 1; /* Shift applied to operation. */
352 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
356 static struct arm_it inst
;
358 #define NUM_FLOAT_VALS 8
360 const char * fp_const
[] =
362 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
365 /* Number of littlenums required to hold an extended precision number. */
366 #define MAX_LITTLENUMS 6
368 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
378 #define CP_T_X 0x00008000
379 #define CP_T_Y 0x00400000
381 #define CONDS_BIT 0x00100000
382 #define LOAD_BIT 0x00100000
384 #define DOUBLE_LOAD_FLAG 0x00000001
388 const char * template;
392 #define COND_ALWAYS 0xE
396 const char *template;
400 struct asm_barrier_opt
402 const char *template;
406 /* The bit that distinguishes CPSR and SPSR. */
407 #define SPSR_BIT (1 << 22)
409 /* The individual PSR flag bits. */
410 #define PSR_c (1 << 16)
411 #define PSR_x (1 << 17)
412 #define PSR_s (1 << 18)
413 #define PSR_f (1 << 19)
418 bfd_reloc_code_real_type reloc
;
423 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
424 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
429 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
432 /* Bits for DEFINED field in neon_typed_alias. */
433 #define NTA_HASTYPE 1
434 #define NTA_HASINDEX 2
436 struct neon_typed_alias
438 unsigned char defined
;
440 struct neon_type_el eltype
;
443 /* ARM register categories. This includes coprocessor numbers and various
444 architecture extensions' registers. */
470 /* Structure for a hash table entry for a register.
471 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
472 information which states whether a vector type or index is specified (for a
473 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
477 unsigned char number
;
479 unsigned char builtin
;
480 struct neon_typed_alias
*neon
;
483 /* Diagnostics used when we don't get a register of the expected type. */
484 const char *const reg_expected_msgs
[] =
486 N_("ARM register expected"),
487 N_("bad or missing co-processor number"),
488 N_("co-processor register expected"),
489 N_("FPA register expected"),
490 N_("VFP single precision register expected"),
491 N_("VFP/Neon double precision register expected"),
492 N_("Neon quad precision register expected"),
493 N_("VFP single or double precision register expected"),
494 N_("Neon double or quad precision register expected"),
495 N_("VFP single, double or Neon quad precision register expected"),
496 N_("VFP system register expected"),
497 N_("Maverick MVF register expected"),
498 N_("Maverick MVD register expected"),
499 N_("Maverick MVFX register expected"),
500 N_("Maverick MVDX register expected"),
501 N_("Maverick MVAX register expected"),
502 N_("Maverick DSPSC register expected"),
503 N_("iWMMXt data register expected"),
504 N_("iWMMXt control register expected"),
505 N_("iWMMXt scalar register expected"),
506 N_("XScale accumulator register expected"),
509 /* Some well known registers that we refer to directly elsewhere. */
514 /* ARM instructions take 4bytes in the object file, Thumb instructions
520 /* Basic string to match. */
521 const char *template;
523 /* Parameters to instruction. */
524 unsigned char operands
[8];
526 /* Conditional tag - see opcode_lookup. */
527 unsigned int tag
: 4;
529 /* Basic instruction code. */
530 unsigned int avalue
: 28;
532 /* Thumb-format instruction code. */
535 /* Which architecture variant provides this instruction. */
536 const arm_feature_set
*avariant
;
537 const arm_feature_set
*tvariant
;
539 /* Function to call to encode instruction in ARM format. */
540 void (* aencode
) (void);
542 /* Function to call to encode instruction in Thumb format. */
543 void (* tencode
) (void);
546 /* Defines for various bits that we will want to toggle. */
547 #define INST_IMMEDIATE 0x02000000
548 #define OFFSET_REG 0x02000000
549 #define HWOFFSET_IMM 0x00400000
550 #define SHIFT_BY_REG 0x00000010
551 #define PRE_INDEX 0x01000000
552 #define INDEX_UP 0x00800000
553 #define WRITE_BACK 0x00200000
554 #define LDM_TYPE_2_OR_3 0x00400000
555 #define CPSI_MMOD 0x00020000
557 #define LITERAL_MASK 0xf000f000
558 #define OPCODE_MASK 0xfe1fffff
559 #define V4_STR_BIT 0x00000020
561 #define T2_SUBS_PC_LR 0xf3de8f00
563 #define DATA_OP_SHIFT 21
565 #define T2_OPCODE_MASK 0xfe1fffff
566 #define T2_DATA_OP_SHIFT 21
568 /* Codes to distinguish the arithmetic instructions. */
579 #define OPCODE_CMP 10
580 #define OPCODE_CMN 11
581 #define OPCODE_ORR 12
582 #define OPCODE_MOV 13
583 #define OPCODE_BIC 14
584 #define OPCODE_MVN 15
586 #define T2_OPCODE_AND 0
587 #define T2_OPCODE_BIC 1
588 #define T2_OPCODE_ORR 2
589 #define T2_OPCODE_ORN 3
590 #define T2_OPCODE_EOR 4
591 #define T2_OPCODE_ADD 8
592 #define T2_OPCODE_ADC 10
593 #define T2_OPCODE_SBC 11
594 #define T2_OPCODE_SUB 13
595 #define T2_OPCODE_RSB 14
597 #define T_OPCODE_MUL 0x4340
598 #define T_OPCODE_TST 0x4200
599 #define T_OPCODE_CMN 0x42c0
600 #define T_OPCODE_NEG 0x4240
601 #define T_OPCODE_MVN 0x43c0
603 #define T_OPCODE_ADD_R3 0x1800
604 #define T_OPCODE_SUB_R3 0x1a00
605 #define T_OPCODE_ADD_HI 0x4400
606 #define T_OPCODE_ADD_ST 0xb000
607 #define T_OPCODE_SUB_ST 0xb080
608 #define T_OPCODE_ADD_SP 0xa800
609 #define T_OPCODE_ADD_PC 0xa000
610 #define T_OPCODE_ADD_I8 0x3000
611 #define T_OPCODE_SUB_I8 0x3800
612 #define T_OPCODE_ADD_I3 0x1c00
613 #define T_OPCODE_SUB_I3 0x1e00
615 #define T_OPCODE_ASR_R 0x4100
616 #define T_OPCODE_LSL_R 0x4080
617 #define T_OPCODE_LSR_R 0x40c0
618 #define T_OPCODE_ROR_R 0x41c0
619 #define T_OPCODE_ASR_I 0x1000
620 #define T_OPCODE_LSL_I 0x0000
621 #define T_OPCODE_LSR_I 0x0800
623 #define T_OPCODE_MOV_I8 0x2000
624 #define T_OPCODE_CMP_I8 0x2800
625 #define T_OPCODE_CMP_LR 0x4280
626 #define T_OPCODE_MOV_HR 0x4600
627 #define T_OPCODE_CMP_HR 0x4500
629 #define T_OPCODE_LDR_PC 0x4800
630 #define T_OPCODE_LDR_SP 0x9800
631 #define T_OPCODE_STR_SP 0x9000
632 #define T_OPCODE_LDR_IW 0x6800
633 #define T_OPCODE_STR_IW 0x6000
634 #define T_OPCODE_LDR_IH 0x8800
635 #define T_OPCODE_STR_IH 0x8000
636 #define T_OPCODE_LDR_IB 0x7800
637 #define T_OPCODE_STR_IB 0x7000
638 #define T_OPCODE_LDR_RW 0x5800
639 #define T_OPCODE_STR_RW 0x5000
640 #define T_OPCODE_LDR_RH 0x5a00
641 #define T_OPCODE_STR_RH 0x5200
642 #define T_OPCODE_LDR_RB 0x5c00
643 #define T_OPCODE_STR_RB 0x5400
645 #define T_OPCODE_PUSH 0xb400
646 #define T_OPCODE_POP 0xbc00
648 #define T_OPCODE_BRANCH 0xe000
650 #define THUMB_SIZE 2 /* Size of thumb instruction. */
651 #define THUMB_PP_PC_LR 0x0100
652 #define THUMB_LOAD_BIT 0x0800
653 #define THUMB2_LOAD_BIT 0x00100000
655 #define BAD_ARGS _("bad arguments to instruction")
656 #define BAD_PC _("r15 not allowed here")
657 #define BAD_COND _("instruction cannot be conditional")
658 #define BAD_OVERLAP _("registers may not be the same")
659 #define BAD_HIREG _("lo register required")
660 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
661 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
662 #define BAD_BRANCH _("branch must be last instruction in IT block")
663 #define BAD_NOT_IT _("instruction not allowed in IT block")
664 #define BAD_FPU _("selected FPU does not support instruction")
666 static struct hash_control
*arm_ops_hsh
;
667 static struct hash_control
*arm_cond_hsh
;
668 static struct hash_control
*arm_shift_hsh
;
669 static struct hash_control
*arm_psr_hsh
;
670 static struct hash_control
*arm_v7m_psr_hsh
;
671 static struct hash_control
*arm_reg_hsh
;
672 static struct hash_control
*arm_reloc_hsh
;
673 static struct hash_control
*arm_barrier_opt_hsh
;
675 /* Stuff needed to resolve the label ambiguity
685 symbolS
* last_label_seen
;
686 static int label_is_thumb_function_name
= FALSE
;
688 /* Literal Pool stuff. */
690 #define MAX_LITERAL_POOL_SIZE 1024
692 /* Literal pool structure. Held on a per-section
693 and per-sub-section basis. */
695 typedef struct literal_pool
697 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
698 unsigned int next_free_entry
;
703 struct literal_pool
* next
;
706 /* Pointer to a linked list of literal pools. */
707 literal_pool
* list_of_pools
= NULL
;
709 /* State variables for IT block handling. */
710 static bfd_boolean current_it_mask
= 0;
711 static int current_cc
;
713 /* Separator character handling. */
715 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
718 skip_past_char (char ** str
, char c
)
728 #define skip_past_comma(str) skip_past_char (str, ',')
730 /* Arithmetic expressions (possibly involving symbols). */
732 /* Return TRUE if anything in the expression is a bignum. */
735 walk_no_bignums (symbolS
* sp
)
738 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
741 if (symbol_get_value_expression (sp
)->X_add_symbol
)
743 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
744 || (symbol_get_value_expression (sp
)->X_op_symbol
745 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
752 static int in_my_get_expression
= 0;
754 /* Third argument to my_get_expression. */
755 #define GE_NO_PREFIX 0
756 #define GE_IMM_PREFIX 1
757 #define GE_OPT_PREFIX 2
758 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
759 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
760 #define GE_OPT_PREFIX_BIG 3
763 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
768 /* In unified syntax, all prefixes are optional. */
770 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
775 case GE_NO_PREFIX
: break;
777 if (!is_immediate_prefix (**str
))
779 inst
.error
= _("immediate expression requires a # prefix");
785 case GE_OPT_PREFIX_BIG
:
786 if (is_immediate_prefix (**str
))
792 memset (ep
, 0, sizeof (expressionS
));
794 save_in
= input_line_pointer
;
795 input_line_pointer
= *str
;
796 in_my_get_expression
= 1;
797 seg
= expression (ep
);
798 in_my_get_expression
= 0;
800 if (ep
->X_op
== O_illegal
)
802 /* We found a bad expression in md_operand(). */
803 *str
= input_line_pointer
;
804 input_line_pointer
= save_in
;
805 if (inst
.error
== NULL
)
806 inst
.error
= _("bad expression");
811 if (seg
!= absolute_section
812 && seg
!= text_section
813 && seg
!= data_section
814 && seg
!= bss_section
815 && seg
!= undefined_section
)
817 inst
.error
= _("bad segment");
818 *str
= input_line_pointer
;
819 input_line_pointer
= save_in
;
824 /* Get rid of any bignums now, so that we don't generate an error for which
825 we can't establish a line number later on. Big numbers are never valid
826 in instructions, which is where this routine is always called. */
827 if (prefix_mode
!= GE_OPT_PREFIX_BIG
828 && (ep
->X_op
== O_big
830 && (walk_no_bignums (ep
->X_add_symbol
)
832 && walk_no_bignums (ep
->X_op_symbol
))))))
834 inst
.error
= _("invalid constant");
835 *str
= input_line_pointer
;
836 input_line_pointer
= save_in
;
840 *str
= input_line_pointer
;
841 input_line_pointer
= save_in
;
845 /* Turn a string in input_line_pointer into a floating point constant
846 of type TYPE, and store the appropriate bytes in *LITP. The number
847 of LITTLENUMS emitted is stored in *SIZEP. An error message is
848 returned, or NULL on OK.
850 Note that fp constants aren't represent in the normal way on the ARM.
851 In big endian mode, things are as expected. However, in little endian
852 mode fp constants are big-endian word-wise, and little-endian byte-wise
853 within the words. For example, (double) 1.1 in big endian mode is
854 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
855 the byte sequence 99 99 f1 3f 9a 99 99 99.
857 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
860 md_atof (int type
, char * litP
, int * sizeP
)
863 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
895 return _("bad call to MD_ATOF()");
898 t
= atof_ieee (input_line_pointer
, type
, words
);
900 input_line_pointer
= t
;
903 if (target_big_endian
)
905 for (i
= 0; i
< prec
; i
++)
907 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
913 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
914 for (i
= prec
- 1; i
>= 0; i
--)
916 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
920 /* For a 4 byte float the order of elements in `words' is 1 0.
921 For an 8 byte float the order is 1 0 3 2. */
922 for (i
= 0; i
< prec
; i
+= 2)
924 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
925 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
933 /* We handle all bad expressions here, so that we can report the faulty
934 instruction in the error message. */
936 md_operand (expressionS
* expr
)
938 if (in_my_get_expression
)
939 expr
->X_op
= O_illegal
;
942 /* Register parsing. */
944 /* Generic register parser. CCP points to what should be the
945 beginning of a register name. If it is indeed a valid register
946 name, advance CCP over it and return the reg_entry structure;
947 otherwise return NULL. Does not issue diagnostics. */
949 static struct reg_entry
*
950 arm_reg_parse_multi (char **ccp
)
954 struct reg_entry
*reg
;
956 #ifdef REGISTER_PREFIX
957 if (*start
!= REGISTER_PREFIX
)
961 #ifdef OPTIONAL_REGISTER_PREFIX
962 if (*start
== OPTIONAL_REGISTER_PREFIX
)
967 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
972 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
974 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
984 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
985 enum arm_reg_type type
)
987 /* Alternative syntaxes are accepted for a few register classes. */
994 /* Generic coprocessor register names are allowed for these. */
995 if (reg
&& reg
->type
== REG_TYPE_CN
)
1000 /* For backward compatibility, a bare number is valid here. */
1002 uint32_t processor
= strtoul (start
, ccp
, 10);
1003 if (*ccp
!= start
&& processor
<= 15)
1007 case REG_TYPE_MMXWC
:
1008 /* WC includes WCG. ??? I'm not sure this is true for all
1009 instructions that take WC registers. */
1010 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1021 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1022 return value is the register number or FAIL. */
1025 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1028 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1031 /* Do not allow a scalar (reg+index) to parse as a register. */
1032 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1035 if (reg
&& reg
->type
== type
)
1038 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1045 /* Parse a Neon type specifier. *STR should point at the leading '.'
1046 character. Does no verification at this stage that the type fits the opcode
1053 Can all be legally parsed by this function.
1055 Fills in neon_type struct pointer with parsed information, and updates STR
1056 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1057 type, FAIL if not. */
1060 parse_neon_type (struct neon_type
*type
, char **str
)
1067 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1069 enum neon_el_type thistype
= NT_untyped
;
1070 unsigned thissize
= -1u;
1077 /* Just a size without an explicit type. */
1081 switch (TOLOWER (*ptr
))
1083 case 'i': thistype
= NT_integer
; break;
1084 case 'f': thistype
= NT_float
; break;
1085 case 'p': thistype
= NT_poly
; break;
1086 case 's': thistype
= NT_signed
; break;
1087 case 'u': thistype
= NT_unsigned
; break;
1089 thistype
= NT_float
;
1094 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1100 /* .f is an abbreviation for .f32. */
1101 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1106 thissize
= strtoul (ptr
, &ptr
, 10);
1108 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1111 as_bad (_("bad size %d in type specifier"), thissize
);
1119 type
->el
[type
->elems
].type
= thistype
;
1120 type
->el
[type
->elems
].size
= thissize
;
1125 /* Empty/missing type is not a successful parse. */
1126 if (type
->elems
== 0)
1134 /* Errors may be set multiple times during parsing or bit encoding
1135 (particularly in the Neon bits), but usually the earliest error which is set
1136 will be the most meaningful. Avoid overwriting it with later (cascading)
1137 errors by calling this function. */
1140 first_error (const char *err
)
1146 /* Parse a single type, e.g. ".s32", leading period included. */
1148 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1151 struct neon_type optype
;
1155 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1157 if (optype
.elems
== 1)
1158 *vectype
= optype
.el
[0];
1161 first_error (_("only one type should be specified for operand"));
1167 first_error (_("vector type expected"));
1179 /* Special meanings for indices (which have a range of 0-7), which will fit into
1182 #define NEON_ALL_LANES 15
1183 #define NEON_INTERLEAVE_LANES 14
1185 /* Parse either a register or a scalar, with an optional type. Return the
1186 register number, and optionally fill in the actual type of the register
1187 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1188 type/index information in *TYPEINFO. */
1191 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1192 enum arm_reg_type
*rtype
,
1193 struct neon_typed_alias
*typeinfo
)
1196 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1197 struct neon_typed_alias atype
;
1198 struct neon_type_el parsetype
;
1202 atype
.eltype
.type
= NT_invtype
;
1203 atype
.eltype
.size
= -1;
1205 /* Try alternate syntax for some types of register. Note these are mutually
1206 exclusive with the Neon syntax extensions. */
1209 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1217 /* Undo polymorphism when a set of register types may be accepted. */
1218 if ((type
== REG_TYPE_NDQ
1219 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1220 || (type
== REG_TYPE_VFSD
1221 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1222 || (type
== REG_TYPE_NSDQ
1223 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1224 || reg
->type
== REG_TYPE_NQ
))
1225 || (type
== REG_TYPE_MMXWC
1226 && (reg
->type
== REG_TYPE_MMXWCG
)))
1229 if (type
!= reg
->type
)
1235 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1237 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1239 first_error (_("can't redefine type for operand"));
1242 atype
.defined
|= NTA_HASTYPE
;
1243 atype
.eltype
= parsetype
;
1246 if (skip_past_char (&str
, '[') == SUCCESS
)
1248 if (type
!= REG_TYPE_VFD
)
1250 first_error (_("only D registers may be indexed"));
1254 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1256 first_error (_("can't change index for operand"));
1260 atype
.defined
|= NTA_HASINDEX
;
1262 if (skip_past_char (&str
, ']') == SUCCESS
)
1263 atype
.index
= NEON_ALL_LANES
;
1268 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1270 if (exp
.X_op
!= O_constant
)
1272 first_error (_("constant expression required"));
1276 if (skip_past_char (&str
, ']') == FAIL
)
1279 atype
.index
= exp
.X_add_number
;
1294 /* Like arm_reg_parse, but allow allow the following extra features:
1295 - If RTYPE is non-zero, return the (possibly restricted) type of the
1296 register (e.g. Neon double or quad reg when either has been requested).
1297 - If this is a Neon vector type with additional type information, fill
1298 in the struct pointed to by VECTYPE (if non-NULL).
1299 This function will fault on encountering a scalar.
1303 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1304 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1306 struct neon_typed_alias atype
;
1308 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1313 /* Do not allow a scalar (reg+index) to parse as a register. */
1314 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1316 first_error (_("register operand expected, but got scalar"));
1321 *vectype
= atype
.eltype
;
1328 #define NEON_SCALAR_REG(X) ((X) >> 4)
1329 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1331 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1332 have enough information to be able to do a good job bounds-checking. So, we
1333 just do easy checks here, and do further checks later. */
1336 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1340 struct neon_typed_alias atype
;
1342 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1344 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1347 if (atype
.index
== NEON_ALL_LANES
)
1349 first_error (_("scalar must have an index"));
1352 else if (atype
.index
>= 64 / elsize
)
1354 first_error (_("scalar index out of range"));
1359 *type
= atype
.eltype
;
1363 return reg
* 16 + atype
.index
;
1366 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1368 parse_reg_list (char ** strp
)
1370 char * str
= * strp
;
1374 /* We come back here if we get ranges concatenated by '+' or '|'. */
1389 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1391 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1401 first_error (_("bad range in register list"));
1405 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1407 if (range
& (1 << i
))
1409 (_("Warning: duplicated register (r%d) in register list"),
1417 if (range
& (1 << reg
))
1418 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1420 else if (reg
<= cur_reg
)
1421 as_tsktsk (_("Warning: register range not in ascending order"));
1426 while (skip_past_comma (&str
) != FAIL
1427 || (in_range
= 1, *str
++ == '-'));
1432 first_error (_("missing `}'"));
1440 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1443 if (expr
.X_op
== O_constant
)
1445 if (expr
.X_add_number
1446 != (expr
.X_add_number
& 0x0000ffff))
1448 inst
.error
= _("invalid register mask");
1452 if ((range
& expr
.X_add_number
) != 0)
1454 int regno
= range
& expr
.X_add_number
;
1457 regno
= (1 << regno
) - 1;
1459 (_("Warning: duplicated register (r%d) in register list"),
1463 range
|= expr
.X_add_number
;
1467 if (inst
.reloc
.type
!= 0)
1469 inst
.error
= _("expression too complex");
1473 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1474 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1475 inst
.reloc
.pc_rel
= 0;
1479 if (*str
== '|' || *str
== '+')
1485 while (another_range
);
1491 /* Types of registers in a list. */
1500 /* Parse a VFP register list. If the string is invalid return FAIL.
1501 Otherwise return the number of registers, and set PBASE to the first
1502 register. Parses registers of type ETYPE.
1503 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1504 - Q registers can be used to specify pairs of D registers
1505 - { } can be omitted from around a singleton register list
1506 FIXME: This is not implemented, as it would require backtracking in
1509 This could be done (the meaning isn't really ambiguous), but doesn't
1510 fit in well with the current parsing framework.
1511 - 32 D registers may be used (also true for VFPv3).
1512 FIXME: Types are ignored in these register lists, which is probably a
1516 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1521 enum arm_reg_type regtype
= 0;
1530 inst
.error
= _("expecting {");
1539 regtype
= REG_TYPE_VFS
;
1544 regtype
= REG_TYPE_VFD
;
1547 case REGLIST_NEON_D
:
1548 regtype
= REG_TYPE_NDQ
;
1552 if (etype
!= REGLIST_VFP_S
)
1554 /* VFPv3 allows 32 D registers. */
1555 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1559 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1562 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1569 base_reg
= max_regs
;
1573 int setmask
= 1, addregs
= 1;
1575 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1577 if (new_base
== FAIL
)
1579 first_error (_(reg_expected_msgs
[regtype
]));
1583 if (new_base
>= max_regs
)
1585 first_error (_("register out of range in list"));
1589 /* Note: a value of 2 * n is returned for the register Q<n>. */
1590 if (regtype
== REG_TYPE_NQ
)
1596 if (new_base
< base_reg
)
1597 base_reg
= new_base
;
1599 if (mask
& (setmask
<< new_base
))
1601 first_error (_("invalid register list"));
1605 if ((mask
>> new_base
) != 0 && ! warned
)
1607 as_tsktsk (_("register list not in ascending order"));
1611 mask
|= setmask
<< new_base
;
1614 if (*str
== '-') /* We have the start of a range expression */
1620 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1623 inst
.error
= /* HACK gettext */ (reg_expected_msgs
[regtype
]);
1627 if (high_range
>= max_regs
)
1629 first_error (_("register out of range in list"));
1633 if (regtype
== REG_TYPE_NQ
)
1634 high_range
= high_range
+ 1;
1636 if (high_range
<= new_base
)
1638 inst
.error
= _("register range not in ascending order");
1642 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1644 if (mask
& (setmask
<< new_base
))
1646 inst
.error
= _("invalid register list");
1650 mask
|= setmask
<< new_base
;
1655 while (skip_past_comma (&str
) != FAIL
);
1659 /* Sanity check -- should have raised a parse error above. */
1660 if (count
== 0 || count
> max_regs
)
1665 /* Final test -- the registers must be consecutive. */
1667 for (i
= 0; i
< count
; i
++)
1669 if ((mask
& (1u << i
)) == 0)
1671 inst
.error
= _("non-contiguous register range");
1681 /* True if two alias types are the same. */
1684 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1692 if (a
->defined
!= b
->defined
)
1695 if ((a
->defined
& NTA_HASTYPE
) != 0
1696 && (a
->eltype
.type
!= b
->eltype
.type
1697 || a
->eltype
.size
!= b
->eltype
.size
))
1700 if ((a
->defined
& NTA_HASINDEX
) != 0
1701 && (a
->index
!= b
->index
))
1707 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1708 The base register is put in *PBASE.
1709 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1711 The register stride (minus one) is put in bit 4 of the return value.
1712 Bits [6:5] encode the list length (minus one).
1713 The type of the list elements is put in *ELTYPE, if non-NULL. */
1715 #define NEON_LANE(X) ((X) & 0xf)
1716 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1717 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1720 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1721 struct neon_type_el
*eltype
)
1728 int leading_brace
= 0;
1729 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1731 const char *const incr_error
= "register stride must be 1 or 2";
1732 const char *const type_error
= "mismatched element/structure types in list";
1733 struct neon_typed_alias firsttype
= { 0 };
1735 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1740 struct neon_typed_alias atype
;
1741 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1745 first_error (_(reg_expected_msgs
[rtype
]));
1752 if (rtype
== REG_TYPE_NQ
)
1759 else if (reg_incr
== -1)
1761 reg_incr
= getreg
- base_reg
;
1762 if (reg_incr
< 1 || reg_incr
> 2)
1764 first_error (_(incr_error
));
1768 else if (getreg
!= base_reg
+ reg_incr
* count
)
1770 first_error (_(incr_error
));
1774 if (!neon_alias_types_same (&atype
, &firsttype
))
1776 first_error (_(type_error
));
1780 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1784 struct neon_typed_alias htype
;
1785 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1786 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1790 else if (lane
!= atype
.index
)
1792 first_error (_(type_error
));
1796 else if (lane
== -1)
1797 lane
= NEON_INTERLEAVE_LANES
;
1798 else if (lane
!= NEON_INTERLEAVE_LANES
)
1800 first_error (_(type_error
));
1805 else if (reg_incr
!= 1)
1807 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1811 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1814 first_error (_(reg_expected_msgs
[rtype
]));
1817 if (!neon_alias_types_same (&htype
, &firsttype
))
1819 first_error (_(type_error
));
1822 count
+= hireg
+ dregs
- getreg
;
1826 /* If we're using Q registers, we can't use [] or [n] syntax. */
1827 if (rtype
== REG_TYPE_NQ
)
1833 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1837 else if (lane
!= atype
.index
)
1839 first_error (_(type_error
));
1843 else if (lane
== -1)
1844 lane
= NEON_INTERLEAVE_LANES
;
1845 else if (lane
!= NEON_INTERLEAVE_LANES
)
1847 first_error (_(type_error
));
1852 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1854 /* No lane set by [x]. We must be interleaving structures. */
1856 lane
= NEON_INTERLEAVE_LANES
;
1859 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1860 || (count
> 1 && reg_incr
== -1))
1862 first_error (_("error parsing element/structure list"));
1866 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1868 first_error (_("expected }"));
1876 *eltype
= firsttype
.eltype
;
1881 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1884 /* Parse an explicit relocation suffix on an expression. This is
1885 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1886 arm_reloc_hsh contains no entries, so this function can only
1887 succeed if there is no () after the word. Returns -1 on error,
1888 BFD_RELOC_UNUSED if there wasn't any suffix. */
1890 parse_reloc (char **str
)
1892 struct reloc_entry
*r
;
1896 return BFD_RELOC_UNUSED
;
1901 while (*q
&& *q
!= ')' && *q
!= ',')
1906 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1913 /* Directives: register aliases. */
1915 static struct reg_entry
*
1916 insert_reg_alias (char *str
, int number
, int type
)
1918 struct reg_entry
*new;
1921 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1924 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1926 /* Only warn about a redefinition if it's not defined as the
1928 else if (new->number
!= number
|| new->type
!= type
)
1929 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1935 name
= xstrdup (str
);
1937 name
= xmalloc (strlen(str
) + 1);
1938 strcpy((char *)name
, str
);
1940 new = xmalloc (sizeof (struct reg_entry
));
1943 new->number
= number
;
1945 new->builtin
= FALSE
;
1948 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1955 insert_neon_reg_alias (char *str
, int number
, int type
,
1956 struct neon_typed_alias
*atype
)
1958 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1962 first_error (_("attempt to redefine typed alias"));
1968 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1969 *reg
->neon
= *atype
;
1973 /* Look for the .req directive. This is of the form:
1975 new_register_name .req existing_register_name
1977 If we find one, or if it looks sufficiently like one that we want to
1978 handle any error here, return non-zero. Otherwise return zero. */
1981 create_register_alias (char * newname
, char *p
)
1983 struct reg_entry
*old
;
1984 char *oldname
, *nbuf
;
1987 /* The input scrubber ensures that whitespace after the mnemonic is
1988 collapsed to single spaces. */
1990 if (strncmp (oldname
, " .req ", 6) != 0)
1994 if (*oldname
== '\0')
1997 old
= hash_find (arm_reg_hsh
, oldname
);
2000 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2004 #define TC_CASE_SENSITIVE /* HACK */
2005 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2006 the desired alias name, and p points to its end. If not, then
2007 the desired alias name is in the global original_case_string. */
2008 #ifdef TC_CASE_SENSITIVE
2011 newname
= original_case_string
;
2012 nlen
= strlen (newname
);
2015 nbuf
= alloca (nlen
+ 1);
2016 memcpy (nbuf
, newname
, nlen
);
2019 /* Create aliases under the new name as stated; an all-lowercase
2020 version of the new name; and an all-uppercase version of the new
2022 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2024 for (p
= nbuf
; *p
; p
++)
2027 if (strncmp (nbuf
, newname
, nlen
))
2028 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2030 for (p
= nbuf
; *p
; p
++)
2033 if (strncmp (nbuf
, newname
, nlen
))
2034 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2039 /* Create a Neon typed/indexed register alias using directives, e.g.:
2044 These typed registers can be used instead of the types specified after the
2045 Neon mnemonic, so long as all operands given have types. Types can also be
2046 specified directly, e.g.:
2047 vadd d0.s32, d1.s32, d2.s32
2051 create_neon_reg_alias (char *newname
, char *p
)
2053 enum arm_reg_type basetype
;
2054 struct reg_entry
*basereg
;
2055 struct reg_entry mybasereg
;
2056 struct neon_type ntype
;
2057 struct neon_typed_alias typeinfo
;
2058 char *namebuf
, *nameend
;
2061 typeinfo
.defined
= 0;
2062 typeinfo
.eltype
.type
= NT_invtype
;
2063 typeinfo
.eltype
.size
= -1;
2064 typeinfo
.index
= -1;
2068 if (strncmp (p
, " .dn ", 5) == 0)
2069 basetype
= REG_TYPE_VFD
;
2070 else if (strncmp (p
, " .qn ", 5) == 0)
2071 basetype
= REG_TYPE_NQ
;
2080 basereg
= arm_reg_parse_multi (&p
);
2082 if (basereg
&& basereg
->type
!= basetype
)
2084 as_bad (_("bad type for register"));
2088 if (basereg
== NULL
)
2091 /* Try parsing as an integer. */
2092 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2093 if (exp
.X_op
!= O_constant
)
2095 as_bad (_("expression must be constant"));
2098 basereg
= &mybasereg
;
2099 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2105 typeinfo
= *basereg
->neon
;
2107 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2109 /* We got a type. */
2110 if (typeinfo
.defined
& NTA_HASTYPE
)
2112 as_bad (_("can't redefine the type of a register alias"));
2116 typeinfo
.defined
|= NTA_HASTYPE
;
2117 if (ntype
.elems
!= 1)
2119 as_bad (_("you must specify a single type only"));
2122 typeinfo
.eltype
= ntype
.el
[0];
2125 if (skip_past_char (&p
, '[') == SUCCESS
)
2128 /* We got a scalar index. */
2130 if (typeinfo
.defined
& NTA_HASINDEX
)
2132 as_bad (_("can't redefine the index of a scalar alias"));
2136 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2138 if (exp
.X_op
!= O_constant
)
2140 as_bad (_("scalar index must be constant"));
2144 typeinfo
.defined
|= NTA_HASINDEX
;
2145 typeinfo
.index
= exp
.X_add_number
;
2147 if (skip_past_char (&p
, ']') == FAIL
)
2149 as_bad (_("expecting ]"));
2154 namelen
= nameend
- newname
;
2155 namebuf
= alloca (namelen
+ 1);
2156 strncpy (namebuf
, newname
, namelen
);
2157 namebuf
[namelen
] = '\0';
2159 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2160 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2162 /* Insert name in all uppercase. */
2163 for (p
= namebuf
; *p
; p
++)
2166 if (strncmp (namebuf
, newname
, namelen
))
2167 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2168 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2170 /* Insert name in all lowercase. */
2171 for (p
= namebuf
; *p
; p
++)
2174 if (strncmp (namebuf
, newname
, namelen
))
2175 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2176 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2181 /* Should never be called, as .req goes between the alias and the
2182 register name, not at the beginning of the line. */
2184 s_req (uintptr_t a ATTRIBUTE_UNUSED
)
2186 as_bad (_("invalid syntax for .req directive"));
2190 s_dn (uintptr_t a ATTRIBUTE_UNUSED
)
2192 as_bad (_("invalid syntax for .dn directive"));
2196 s_qn (uintptr_t a ATTRIBUTE_UNUSED
)
2198 as_bad (_("invalid syntax for .qn directive"));
2201 /* The .unreq directive deletes an alias which was previously defined
2202 by .req. For example:
2208 s_unreq (uintptr_t a ATTRIBUTE_UNUSED
)
2213 name
= input_line_pointer
;
2215 while (*input_line_pointer
!= 0
2216 && *input_line_pointer
!= ' '
2217 && *input_line_pointer
!= '\n')
2218 ++input_line_pointer
;
2220 saved_char
= *input_line_pointer
;
2221 *input_line_pointer
= 0;
2224 as_bad (_("invalid syntax for .unreq directive"));
2227 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2230 as_bad (_("unknown register alias '%s'"), name
);
2231 else if (reg
->builtin
)
2232 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2236 hash_delete (arm_reg_hsh
, name
);
2237 free ((char *) reg
->name
);
2244 *input_line_pointer
= saved_char
;
2245 demand_empty_rest_of_line ();
2248 /* Directives: Instruction set selection. */
2251 /* unused OBJ_ELF code removed */
2253 #define mapping_state(x) /* nothing */
2256 /* Find the real, Thumb encoded start of a Thumb function. */
2259 find_real_start (symbolS
* symbolP
)
2262 const char * name
= S_GET_NAME (symbolP
);
2263 symbolS
* new_target
;
2265 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2266 #define STUB_NAME ".real_start_of"
2271 /* The compiler may generate BL instructions to local labels because
2272 it needs to perform a branch to a far away location. These labels
2273 do not have a corresponding ".real_start_of" label. We check
2274 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2275 the ".real_start_of" convention for nonlocal branches. */
2276 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2279 real_start
= malloc (strlen (name
) + strlen (STUB_NAME
) + 1);
2280 sprintf (real_start
, "%s%s", STUB_NAME
, name
);
2281 new_target
= symbol_find (real_start
);
2284 if (new_target
== NULL
)
2286 as_warn ("Failed to find real start of function: %s\n", name
);
2287 new_target
= symbolP
;
2297 opcode_select (int width
)
2304 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2305 as_bad (_("selected processor does not support THUMB opcodes"));
2309 /* No need to force the alignment, since we will have been
2310 coming from ARM mode, which is word-aligned. */
2311 record_alignment (now_seg
, 1);
2314 mapping_state (MAP_THUMB
);
2320 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2321 as_bad (_("selected processor does not support ARM opcodes"));
2326 frag_align (2, 0, 0);
2328 record_alignment (now_seg
, 1);
2331 mapping_state (MAP_ARM
);
2335 as_bad (_("invalid instruction size selected (%d)"), width
);
2340 s_arm (uintptr_t ignore ATTRIBUTE_UNUSED
)
2343 demand_empty_rest_of_line ();
2347 s_thumb (uintptr_t ignore ATTRIBUTE_UNUSED
)
2350 demand_empty_rest_of_line ();
2354 s_code (uintptr_t unused ATTRIBUTE_UNUSED
)
2358 temp
= get_absolute_expression ();
2363 opcode_select (temp
);
2367 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2372 s_force_thumb (uintptr_t ignore ATTRIBUTE_UNUSED
)
2374 /* If we are not already in thumb mode go into it, EVEN if
2375 the target processor does not support thumb instructions.
2376 This is used by gcc/config/arm/lib1funcs.asm for example
2377 to compile interworking support functions even if the
2378 target processor should not support interworking. */
2383 record_alignment (now_seg
, 1);
2387 demand_empty_rest_of_line ();
2390 /* We will support '.thumb_func' a la binutils, but we will also support
2391 '.thumb_func /symbol_name/', to avoid the inherent pitfalls of
2392 looking for the next valid label. */
2394 s_thumb_func (uintptr_t ignore ATTRIBUTE_UNUSED
)
2396 if (is_end_of_line(*input_line_pointer
))
2398 /* No symbol specified - we'll use the next one we find. */
2402 /* The following label is the name/address of the start of a Thumb function.
2403 We need to know this for the interworking support. */
2404 label_is_thumb_function_name
= TRUE
;
2408 /* Symbol name specified. */
2413 if (*input_line_pointer
== '"')
2414 name
= input_line_pointer
+ 1;
2416 name
= input_line_pointer
;
2418 c
= get_symbol_end();
2419 symbolP
= symbol_find_or_make (name
);
2420 *input_line_pointer
= c
;
2423 THUMB_SET_FUNC (symbolP
, 1);
2424 symbolP
->sy_desc
|= N_ARM_THUMB_DEF
;
2427 demand_empty_rest_of_line ();
2430 /* Perform a .set directive, but also mark the alias as
2431 being a thumb function. */
2434 s_thumb_set (uintptr_t equiv
)
2436 /* XXX the following is a duplicate of the code for s_set() in read.c
2437 We cannot just call that code as we need to get at the symbol that
2444 /* Especial apologies for the random logic:
2445 This just grew, and could be parsed much more simply!
2447 name
= input_line_pointer
;
2448 delim
= get_symbol_end ();
2449 end_name
= input_line_pointer
;
2452 if (*input_line_pointer
!= ',')
2455 as_bad (_("expected comma after name \"%s\""), name
);
2457 ignore_rest_of_line ();
2461 input_line_pointer
++;
2464 if (name
[0] == '.' && name
[1] == '\0')
2466 /* XXX - this should not happen to .thumb_set. */
2470 if ((symbolP
= symbol_find (name
)) == NULL
2471 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2473 #define NO_LISTING /* HACK */
2475 /* When doing symbol listings, play games with dummy fragments living
2476 outside the normal fragment chain to record the file and line info
2478 if (listing
& LISTING_SYMBOLS
)
2480 extern struct list_info_struct
* listing_tail
;
2481 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2483 memset (dummy_frag
, 0, sizeof (fragS
));
2484 dummy_frag
->fr_type
= rs_fill
;
2485 dummy_frag
->line
= listing_tail
;
2486 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2487 dummy_frag
->fr_symbol
= symbolP
;
2491 symbolP
= symbol_new (name
, undefined_section
, 0, 0, 0, &zero_address_frag
);
2493 /* "set" symbols are local unless otherwise specified. */
2494 SF_SET_LOCAL (symbolP
);
2495 #endif /* OBJ_COFF */
2496 } /* Make a new symbol. */
2498 symbol_table_insert (symbolP
);
2504 && S_IS_DEFINED (symbolP
)
2505 && S_GET_SEGMENT (symbolP
) != reg_section
)
2506 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2509 pseudo_set (symbolP
);
2511 demand_empty_rest_of_line ();
2513 /* XXX Now we come to the Thumb specific bit of code. */
2515 THUMB_SET_FUNC (symbolP
, 1);
2516 ARM_SET_THUMB (symbolP
, 1);
2517 symbolP
->sy_desc
|= N_ARM_THUMB_DEF
;
2518 #if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2523 /* Directives: Mode selection. */
2525 /* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2529 s_syntax (uintptr_t unused ATTRIBUTE_UNUSED
)
2533 name
= input_line_pointer
;
2534 delim
= get_symbol_end ();
2536 if (!strcasecmp (name
, "unified"))
2537 unified_syntax
= TRUE
;
2538 else if (!strcasecmp (name
, "divided"))
2539 unified_syntax
= FALSE
;
2542 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2545 *input_line_pointer
= delim
;
2546 demand_empty_rest_of_line ();
2549 /* Directives: sectioning and alignment. */
2552 /* Same as s_align_ptwo but align 0 => align 2. */
2555 s_align (uintptr_t unused ATTRIBUTE_UNUSED
)
2560 int32_t max_alignment
= 15;
2562 temp
= get_absolute_expression ();
2563 if (temp
> max_alignment
)
2564 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2567 as_bad (_("alignment negative. 0 assumed."));
2571 if (*input_line_pointer
== ',')
2573 input_line_pointer
++;
2574 temp_fill
= get_absolute_expression ();
2586 /* Only make a frag if we HAVE to. */
2594 if (!fill_p
&& subseg_text_p (now_seg
))
2595 frag_align_code (temp
, 0);
2600 md_number_to_chars(fill
, temp_fill
, 4);
2601 frag_align (temp
, fill
, 0, 0);
2604 demand_empty_rest_of_line ();
2607 record_alignment (now_seg
, temp
);
2613 s_bss (uintptr_t ignore ATTRIBUTE_UNUSED
)
2616 /* We don't support putting frags in the BSS segment, we fake it by
2617 marking in_bss, then looking at s_skip for clues. */
2618 subseg_set (bss_section
, 0);
2619 demand_empty_rest_of_line ();
2620 mapping_state (MAP_DATA
);
2622 as_fatal(".bss directive not supported, use .zerofill for Mach-O files");
2627 s_even (uintptr_t ignore ATTRIBUTE_UNUSED
)
2629 /* Never make frag if expect extra pass. */
2633 frag_align (1, 0, 0, 0);
2636 record_alignment (now_seg
, 1);
2639 demand_empty_rest_of_line ();
2642 /* Directives: Literal pools. */
2644 static literal_pool
*
2645 find_literal_pool (void)
2647 literal_pool
* pool
;
2649 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2651 if (pool
->section
== now_seg
2652 && pool
->sub_section
== now_subseg
)
2659 static literal_pool
*
2660 find_or_make_literal_pool (void)
2662 /* Next literal pool ID number. */
2663 static unsigned int latest_pool_num
= 1;
2664 literal_pool
* pool
;
2666 pool
= find_literal_pool ();
2670 /* Create a new pool. */
2671 pool
= xmalloc (sizeof (* pool
));
2675 pool
->next_free_entry
= 0;
2676 pool
->section
= now_seg
;
2677 pool
->sub_section
= now_subseg
;
2678 pool
->next
= list_of_pools
;
2679 pool
->symbol
= NULL
;
2681 /* Add it to the list. */
2682 list_of_pools
= pool
;
2685 /* New pools, and emptied pools, will have a NULL symbol. */
2686 if (pool
->symbol
== NULL
)
2688 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2689 (valueT
) 0, &zero_address_frag
);
2690 pool
->id
= latest_pool_num
++;
2697 /* Add the literal in the global 'inst'
2698 structure to the relevent literal pool. */
2701 add_to_lit_pool (void)
2703 literal_pool
* pool
;
2706 pool
= find_or_make_literal_pool ();
2708 /* Check if this literal value is already in the pool. */
2709 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2711 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2712 && (inst
.reloc
.exp
.X_op
== O_constant
)
2713 && (pool
->literals
[entry
].X_add_number
2714 == inst
.reloc
.exp
.X_add_number
)
2716 && (pool
->literals
[entry
].X_unsigned
2717 == inst
.reloc
.exp
.X_unsigned
))
2723 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2724 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2725 && (pool
->literals
[entry
].X_add_number
2726 == inst
.reloc
.exp
.X_add_number
)
2727 && (pool
->literals
[entry
].X_add_symbol
2728 == inst
.reloc
.exp
.X_add_symbol
)
2729 && (pool
->literals
[entry
].X_op_symbol
2730 == inst
.reloc
.exp
.X_op_symbol
))
2734 /* Do we need to create a new entry? */
2735 if (entry
== pool
->next_free_entry
)
2737 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2739 inst
.error
= _("literal pool overflow");
2743 pool
->literals
[entry
] = inst
.reloc
.exp
;
2744 pool
->next_free_entry
+= 1;
2747 inst
.reloc
.exp
.X_op
= O_symbol
;
2748 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4 - 8;
2749 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2755 /* Can't use symbol_new here, so have to create a symbol and then at
2756 a later date assign it a value. Thats what these functions do. */
2759 symbol_locate (symbolS
* symbolP
,
2760 const char * name
, /* It is copied, the caller can modify. */
2761 segT segment
, /* Segment identifier (SEG_<something>). */
2762 valueT valu
, /* Symbol value. */
2763 fragS
* frag
) /* Associated fragment. */
2765 unsigned int name_length
;
2766 char * preserved_copy_of_name
;
2768 name_length
= strlen (name
) + 1; /* +1 for \0. */
2769 obstack_grow (¬es
, name
, name_length
);
2770 preserved_copy_of_name
= obstack_finish (¬es
);
2772 #ifdef tc_canonicalize_symbol_name
2773 preserved_copy_of_name
=
2774 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2777 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2779 S_SET_SEGMENT (symbolP
, segment
);
2780 S_SET_VALUE (symbolP
, valu
);
2781 symbol_clear_list_pointers (symbolP
);
2783 symbol_set_frag (symbolP
, frag
);
2785 /* Link to end of symbol chain. */
2787 extern int symbol_table_frozen
;
2789 if (symbol_table_frozen
)
2793 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2795 obj_symbol_new_hook (symbolP
);
2797 #ifdef tc_symbol_new_hook
2798 tc_symbol_new_hook (symbolP
);
2802 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2803 #endif /* DEBUG_SYMS */
2807 s_ltorg (uintptr_t ignored ATTRIBUTE_UNUSED
)
2810 literal_pool
* pool
;
2813 pool
= find_literal_pool ();
2815 || pool
->symbol
== NULL
2816 || pool
->next_free_entry
== 0)
2819 mapping_state (MAP_DATA
);
2821 /* Align pool as you have word accesses.
2822 Only make a frag if we have to. */
2826 frag_align (2, 0, 0, 0);
2829 record_alignment (now_seg
, 2);
2832 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2834 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2835 (valueT
) frag_now_fix (), frag_now
);
2836 symbol_table_insert (pool
->symbol
);
2838 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2840 #if defined OBJ_COFF || defined OBJ_ELF
2841 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2844 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2845 /* First output the expression in the instruction to the pool. */
2846 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2848 /* Mark the pool as empty. */
2849 pool
->next_free_entry
= 0;
2850 pool
->symbol
= NULL
;
2854 /* This table describes all the machine specific pseudo-ops the assembler
2855 has to support. The fields are:
2856 pseudo-op name without dot
2857 function to call to execute this pseudo-op
2858 Integer arg to pass to the function. */
2860 const pseudo_typeS md_pseudo_table
[] =
2862 /* Never called because '.req' does not start a line. */
2863 { "req", s_req
, 0 },
2864 /* Following two are likewise never called. */
2867 { "unreq", s_unreq
, 0 },
2868 { "bss", s_bss
, 0 },
2870 { "align", s_align
, 0 },
2872 { "arm", s_arm
, 0 },
2873 { "thumb", s_thumb
, 0 },
2874 { "code", s_code
, 0 },
2875 { "force_thumb", s_force_thumb
, 0 },
2876 { "thumb_func", s_thumb_func
, 0 },
2877 { "thumb_set", s_thumb_set
, 0 },
2878 { "even", s_even
, 0 },
2880 { "ltorg", s_ltorg
, 0 },
2881 { "pool", s_ltorg
, 0 },
2883 { "syntax", s_syntax
, 0 },
2885 { "cpu", s_arm_cpu
, 0 },
2886 { "arch", s_arm_arch
, 0 },
2887 { "object_arch", s_arm_object_arch
, 0 },
2888 { "fpu", s_arm_fpu
, 0 },
2891 /* unused OBJ_ELF directives removed */
2895 /* These are used for dwarf. */
2899 /* These are used for dwarf2. */
2900 { "file", (void (*) (uintptr_t)) dwarf2_directive_file
, 0 },
2901 { "loc", dwarf2_directive_loc
, 0 },
2902 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
2904 { "extend", float_cons
, 'x' },
2905 { "ldouble", float_cons
, 'x' },
2906 { "packed", float_cons
, 'p' },
2910 /* Parser functions used exclusively in instruction operands. */
2912 /* Generic immediate-value read function for use in insn parsing.
2913 STR points to the beginning of the immediate (the leading #);
2914 VAL receives the value; if the value is outside [MIN, MAX]
2915 issue an error. PREFIX_OPT is true if the immediate prefix is
2919 parse_immediate (char **str
, int *val
, int min
, int max
,
2920 bfd_boolean prefix_opt
)
2923 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
2924 if (exp
.X_op
!= O_constant
)
2926 inst
.error
= _("constant expression required");
2930 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
2932 inst
.error
= _("immediate value out of range");
2936 *val
= exp
.X_add_number
;
2940 /* Less-generic immediate-value read function with the possibility of loading a
2941 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
2942 instructions. Puts the result directly in inst.operands[i]. */
2945 parse_big_immediate (char **str
, int i
)
2950 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
2952 if (exp
.X_op
== O_constant
)
2954 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
2955 /* If we're on a 64-bit host, then a 64-bit number can be returned using
2956 O_constant. We have to be careful not to break compilation for
2957 32-bit X_add_number, though. */
2958 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
2960 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
2961 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
2962 inst
.operands
[i
].regisimm
= 1;
2965 else if (exp
.X_op
== O_big
2966 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
2967 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
2969 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
2970 /* Bignums have their least significant bits in
2971 generic_bignum[0]. Make sure we put 32 bits in imm and
2972 32 bits in reg, in a (hopefully) portable way. */
2973 assert (parts
!= 0);
2974 inst
.operands
[i
].imm
= 0;
2975 for (j
= 0; j
< parts
; j
++, idx
++)
2976 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
2977 << (LITTLENUM_NUMBER_OF_BITS
* j
);
2978 inst
.operands
[i
].reg
= 0;
2979 for (j
= 0; j
< parts
; j
++, idx
++)
2980 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
2981 << (LITTLENUM_NUMBER_OF_BITS
* j
);
2982 inst
.operands
[i
].regisimm
= 1;
2992 /* Returns the pseudo-register number of an FPA immediate constant,
2993 or FAIL if there isn't a valid constant here. */
2996 parse_fpa_immediate (char ** str
)
2998 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3004 /* First try and match exact strings, this is to guarantee
3005 that some formats will work even for cross assembly. */
3007 for (i
= 0; fp_const
[i
]; i
++)
3009 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
3013 *str
+= strlen (fp_const
[i
]);
3014 if (is_end_of_line((unsigned char) **str
))
3020 /* Just because we didn't get a match doesn't mean that the constant
3021 isn't valid, just that it is in a format that we don't
3022 automatically recognize. Try parsing it with the standard
3023 expression routines. */
3025 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
3027 /* Look for a raw floating point number. */
3028 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
3029 && is_end_of_line((unsigned char) *save_in
))
3031 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3033 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3035 if (words
[j
] != fp_values
[i
][j
])
3039 if (j
== MAX_LITTLENUMS
)
3047 /* Try and parse a more complex expression, this will probably fail
3048 unless the code uses a floating point prefix (eg "0f"). */
3049 save_in
= input_line_pointer
;
3050 input_line_pointer
= *str
;
3051 if (expression (&exp
) == absolute_section
3052 && exp
.X_op
== O_big
3053 && exp
.X_add_number
< 0)
3055 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3057 if (gen_to_words (words
, 5, (int32_t) 15) == 0)
3059 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3061 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3063 if (words
[j
] != fp_values
[i
][j
])
3067 if (j
== MAX_LITTLENUMS
)
3069 *str
= input_line_pointer
;
3070 input_line_pointer
= save_in
;
3077 *str
= input_line_pointer
;
3078 input_line_pointer
= save_in
;
3079 inst
.error
= _("invalid FPA immediate expression");
3083 /* Returns 1 if a number has "quarter-precision" float format
3084 0baBbbbbbc defgh000 00000000 00000000. */
3087 is_quarter_float (unsigned imm
)
3089 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
3090 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
3093 /* Parse an 8-bit "quarter-precision" floating point number of the form:
3094 0baBbbbbbc defgh000 00000000 00000000.
3095 The zero and minus-zero cases need special handling, since they can't be
3096 encoded in the "quarter-precision" float format, but can nonetheless be
3097 loaded as integer constants. */
3100 parse_qfloat_immediate (char **ccp
, int *immed
)
3104 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3105 int found_fpchar
= 0;
3107 skip_past_char (&str
, '#');
3109 /* We must not accidentally parse an integer as a floating-point number. Make
3110 sure that the value we parse is not an integer by checking for special
3111 characters '.' or 'e'.
3112 FIXME: This is a horrible hack, but doing better is tricky because type
3113 information isn't in a very usable state at parse time. */
3115 skip_whitespace (fpnum
);
3117 if (strncmp (fpnum
, "0x", 2) == 0)
3121 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
3122 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
3132 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
3134 unsigned fpword
= 0;
3137 /* Our FP word must be 32 bits (single-precision FP). */
3138 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
3140 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
3144 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
3157 /* Shift operands. */
3160 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
3163 struct asm_shift_name
3166 enum shift_kind kind
;
3169 /* Third argument to parse_shift. */
3170 enum parse_shift_mode
3172 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
3173 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
3174 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
3175 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
3176 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
3179 /* Parse a <shift> specifier on an ARM data processing instruction.
3180 This has three forms:
3182 (LSL|LSR|ASL|ASR|ROR) Rs
3183 (LSL|LSR|ASL|ASR|ROR) #imm
3186 Note that ASL is assimilated to LSL in the instruction encoding, and
3187 RRX to ROR #0 (which cannot be written as such). */
3190 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
3192 const struct asm_shift_name
*shift_name
;
3193 enum shift_kind shift
;
3198 for (p
= *str
; ISALPHA (*p
); p
++)
3203 inst
.error
= _("shift expression expected");
3207 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
3209 if (shift_name
== NULL
)
3211 inst
.error
= _("shift expression expected");
3215 shift
= shift_name
->kind
;
3219 case NO_SHIFT_RESTRICT
:
3220 case SHIFT_IMMEDIATE
: break;
3222 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
3223 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
3225 inst
.error
= _("'LSL' or 'ASR' required");
3230 case SHIFT_LSL_IMMEDIATE
:
3231 if (shift
!= SHIFT_LSL
)
3233 inst
.error
= _("'LSL' required");
3238 case SHIFT_ASR_IMMEDIATE
:
3239 if (shift
!= SHIFT_ASR
)
3241 inst
.error
= _("'ASR' required");
3249 if (shift
!= SHIFT_RRX
)
3251 /* Whitespace can appear here if the next thing is a bare digit. */
3252 skip_whitespace (p
);
3254 if (mode
== NO_SHIFT_RESTRICT
3255 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
3257 inst
.operands
[i
].imm
= reg
;
3258 inst
.operands
[i
].immisreg
= 1;
3260 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
3263 inst
.operands
[i
].shift_kind
= shift
;
3264 inst
.operands
[i
].shifted
= 1;
3269 /* Parse a <shifter_operand> for an ARM data processing instruction:
3272 #<immediate>, <rotate>
3276 where <shift> is defined by parse_shift above, and <rotate> is a
3277 multiple of 2 between 0 and 30. Validation of immediate operands
3278 is deferred to md_apply_fix. */
3281 parse_shifter_operand (char **str
, int i
)
3286 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
3288 inst
.operands
[i
].reg
= value
;
3289 inst
.operands
[i
].isreg
= 1;
3291 /* parse_shift will override this if appropriate */
3292 inst
.reloc
.exp
.X_op
= O_constant
;
3293 inst
.reloc
.exp
.X_add_number
= 0;
3295 if (skip_past_comma (str
) == FAIL
)
3298 /* Shift operation on register. */
3299 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
3302 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
3305 if (skip_past_comma (str
) == SUCCESS
)
3307 /* #x, y -- ie explicit rotation by Y. */
3308 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
3311 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
3313 inst
.error
= _("constant expression expected");
3317 value
= expr
.X_add_number
;
3318 if (value
< 0 || value
> 30 || value
% 2 != 0)
3320 inst
.error
= _("invalid rotation");
3323 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
3325 inst
.error
= _("invalid constant");
3329 /* Convert to decoded value. md_apply_fix will put it back. */
3330 inst
.reloc
.exp
.X_add_number
3331 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
3332 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
3335 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
3336 inst
.reloc
.pc_rel
= 0;
3340 /* Group relocation information. Each entry in the table contains the
3341 textual name of the relocation as may appear in assembler source
3342 and must end with a colon.
3343 Along with this textual name are the relocation codes to be used if
3344 the corresponding instruction is an ALU instruction (ADD or SUB only),
3345 an LDR, an LDRS, or an LDC. */
3347 struct group_reloc_table_entry
3358 /* Varieties of non-ALU group relocation. */
3365 static struct group_reloc_table_entry group_reloc_table
[] =
3366 { /* Program counter relative: */
3368 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
3373 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
3374 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
3375 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
3376 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
3378 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
3383 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
3384 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
3385 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
3386 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
3388 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
3389 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
3390 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
3391 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
3392 /* Section base relative */
3394 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
3399 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
3400 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
3401 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
3402 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
3404 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
3409 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
3410 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
3411 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
3412 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
3414 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
3415 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
3416 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
3417 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
3419 /* Given the address of a pointer pointing to the textual name of a group
3420 relocation as may appear in assembler source, attempt to find its details
3421 in group_reloc_table. The pointer will be updated to the character after
3422 the trailing colon. On failure, FAIL will be returned; SUCCESS
3423 otherwise. On success, *entry will be updated to point at the relevant
3424 group_reloc_table entry. */
3427 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
3430 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
3432 int length
= strlen (group_reloc_table
[i
].name
);
3434 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0 &&
3435 (*str
)[length
] == ':')
3437 *out
= &group_reloc_table
[i
];
3438 *str
+= (length
+ 1);
3446 /* Results from operand parsing worker functions. */
3450 PARSE_OPERAND_SUCCESS
,
3452 PARSE_OPERAND_FAIL_NO_BACKTRACK
3453 } parse_operand_result
;
3455 /* Parse a <shifter_operand> for an ARM data processing instruction
3456 (as for parse_shifter_operand) where group relocations are allowed:
3459 #<immediate>, <rotate>
3460 #:<group_reloc>:<expression>
3464 where <group_reloc> is one of the strings defined in group_reloc_table.
3465 The hashes are optional.
3467 Everything else is as for parse_shifter_operand. */
3469 static parse_operand_result
3470 parse_shifter_operand_group_reloc (char **str
, int i
)
3472 /* Determine if we have the sequence of characters #: or just :
3473 coming next. If we do, then we check for a group relocation.
3474 If we don't, punt the whole lot to parse_shifter_operand. */
3476 if (((*str
)[0] == '#' && (*str
)[1] == ':')
3477 || (*str
)[0] == ':')
3479 struct group_reloc_table_entry
*entry
;
3481 if ((*str
)[0] == '#')
3486 /* Try to parse a group relocation. Anything else is an error. */
3487 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
3489 inst
.error
= _("unknown group relocation");
3490 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
3493 /* We now have the group relocation table entry corresponding to
3494 the name in the assembler source. Next, we parse the expression. */
3495 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
3496 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
3498 /* Record the relocation type (always the ALU variant here). */
3499 inst
.reloc
.type
= entry
->alu_code
;
3500 assert (inst
.reloc
.type
!= 0);
3502 return PARSE_OPERAND_SUCCESS
;
3505 return parse_shifter_operand (str
, i
) == SUCCESS
3506 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
3508 /* Never reached. */
3511 /* Parse all forms of an ARM address expression. Information is written
3512 to inst.operands[i] and/or inst.reloc.
3514 Preindexed addressing (.preind=1):
3516 [Rn, #offset] .reg=Rn .reloc.exp=offset
3517 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3518 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3519 .shift_kind=shift .reloc.exp=shift_imm
3521 These three may have a trailing ! which causes .writeback to be set also.
3523 Postindexed addressing (.postind=1, .writeback=1):
3525 [Rn], #offset .reg=Rn .reloc.exp=offset
3526 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3527 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3528 .shift_kind=shift .reloc.exp=shift_imm
3530 Unindexed addressing (.preind=0, .postind=0):
3532 [Rn], {option} .reg=Rn .imm=option .immisreg=0
3536 [Rn]{!} shorthand for [Rn,#0]{!}
3537 =immediate .isreg=0 .reloc.exp=immediate
3538 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
3540 It is the caller's responsibility to check for addressing modes not
3541 supported by the instruction, and to set inst.reloc.type. */
3543 static parse_operand_result
3544 parse_address_main (char **str
, int i
, int group_relocations
,
3545 group_reloc_type group_type
)
3550 if (skip_past_char (&p
, '[') == FAIL
)
3552 if (skip_past_char (&p
, '=') == FAIL
)
3554 /* bare address - translate to PC-relative offset */
3555 inst
.reloc
.pc_rel
= 1;
3556 inst
.operands
[i
].reg
= REG_PC
;
3557 inst
.operands
[i
].isreg
= 1;
3558 inst
.operands
[i
].preind
= 1;
3560 /* else a load-constant pseudo op, no special treatment needed here */
3562 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
3563 return PARSE_OPERAND_FAIL
;
3566 return PARSE_OPERAND_SUCCESS
;
3569 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
3571 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
3572 return PARSE_OPERAND_FAIL
;
3574 inst
.operands
[i
].reg
= reg
;
3575 inst
.operands
[i
].isreg
= 1;
3577 if (skip_past_comma (&p
) == SUCCESS
)
3579 inst
.operands
[i
].preind
= 1;
3582 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
3584 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
3586 inst
.operands
[i
].imm
= reg
;
3587 inst
.operands
[i
].immisreg
= 1;
3589 if (skip_past_comma (&p
) == SUCCESS
)
3590 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
3591 return PARSE_OPERAND_FAIL
;
3593 else if (skip_past_char (&p
, ':') == SUCCESS
)
3595 /* FIXME: '@' should be used here, but it's filtered out by generic
3596 code before we get to see it here. This may be subject to
3599 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
3600 if (exp
.X_op
!= O_constant
)
3602 inst
.error
= _("alignment must be constant");
3603 return PARSE_OPERAND_FAIL
;
3605 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
3606 inst
.operands
[i
].immisalign
= 1;
3607 /* Alignments are not pre-indexes. */
3608 inst
.operands
[i
].preind
= 0;
3612 if (inst
.operands
[i
].negative
)
3614 inst
.operands
[i
].negative
= 0;
3618 if (group_relocations
&&
3619 ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
3622 struct group_reloc_table_entry
*entry
;
3624 /* Skip over the #: or : sequence. */
3630 /* Try to parse a group relocation. Anything else is an
3632 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
3634 inst
.error
= _("unknown group relocation");
3635 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
3638 /* We now have the group relocation table entry corresponding to
3639 the name in the assembler source. Next, we parse the
3641 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
3642 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
3644 /* Record the relocation type. */
3648 inst
.reloc
.type
= entry
->ldr_code
;
3652 inst
.reloc
.type
= entry
->ldrs_code
;
3656 inst
.reloc
.type
= entry
->ldc_code
;
3663 if (inst
.reloc
.type
== 0)
3665 inst
.error
= _("this group relocation is not allowed on this instruction");
3666 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
3670 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
3671 return PARSE_OPERAND_FAIL
;
3675 if (skip_past_char (&p
, ']') == FAIL
)
3677 inst
.error
= _("']' expected");
3678 return PARSE_OPERAND_FAIL
;
3681 if (skip_past_char (&p
, '!') == SUCCESS
)
3682 inst
.operands
[i
].writeback
= 1;
3684 else if (skip_past_comma (&p
) == SUCCESS
)
3686 if (skip_past_char (&p
, '{') == SUCCESS
)
3688 /* [Rn], {expr} - unindexed, with option */
3689 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
3690 0, 255, TRUE
) == FAIL
)
3691 return PARSE_OPERAND_FAIL
;
3693 if (skip_past_char (&p
, '}') == FAIL
)
3695 inst
.error
= _("'}' expected at end of 'option' field");
3696 return PARSE_OPERAND_FAIL
;
3698 if (inst
.operands
[i
].preind
)
3700 inst
.error
= _("cannot combine index with option");
3701 return PARSE_OPERAND_FAIL
;
3704 return PARSE_OPERAND_SUCCESS
;
3708 inst
.operands
[i
].postind
= 1;
3709 inst
.operands
[i
].writeback
= 1;
3711 if (inst
.operands
[i
].preind
)
3713 inst
.error
= _("cannot combine pre- and post-indexing");
3714 return PARSE_OPERAND_FAIL
;
3718 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
3720 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
3722 /* We might be using the immediate for alignment already. If we
3723 are, OR the register number into the low-order bits. */
3724 if (inst
.operands
[i
].immisalign
)
3725 inst
.operands
[i
].imm
|= reg
;
3727 inst
.operands
[i
].imm
= reg
;
3728 inst
.operands
[i
].immisreg
= 1;
3730 if (skip_past_comma (&p
) == SUCCESS
)
3731 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
3732 return PARSE_OPERAND_FAIL
;
3736 if (inst
.operands
[i
].negative
)
3738 inst
.operands
[i
].negative
= 0;
3741 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
3742 return PARSE_OPERAND_FAIL
;
3747 /* If at this point neither .preind nor .postind is set, we have a
3748 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
3749 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
3751 inst
.operands
[i
].preind
= 1;
3752 inst
.reloc
.exp
.X_op
= O_constant
;
3753 inst
.reloc
.exp
.X_add_number
= 0;
3756 return PARSE_OPERAND_SUCCESS
;
3760 parse_address (char **str
, int i
)
3762 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
3766 static parse_operand_result
3767 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
3769 return parse_address_main (str
, i
, 1, type
);
3772 /* Parse an operand for a MOVW or MOVT instruction. */
3774 parse_half (char **str
)
3779 skip_past_char (&p
, '#');
3780 if (strncasecmp (p
, ":lower16:", 9) == 0)
3781 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
3782 else if (strncasecmp (p
, ":upper16:", 9) == 0)
3783 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
3785 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
3791 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
3794 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
3796 if (inst
.reloc
.exp
.X_op
!= O_constant
)
3798 inst
.error
= _("constant expression expected");
3801 if (inst
.reloc
.exp
.X_add_number
< 0
3802 || inst
.reloc
.exp
.X_add_number
> 0xffff)
3804 inst
.error
= _("immediate value out of range");
3812 /* Miscellaneous. */
3814 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
3815 or a bitmask suitable to be or-ed into the ARM msr instruction. */
3817 parse_psr (char **str
)
3821 const struct asm_psr
*psr
;
3824 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
3825 feature for ease of use and backwards compatibility. */
3827 if (strncasecmp (p
, "SPSR", 4) == 0)
3828 psr_field
= SPSR_BIT
;
3829 else if (strncasecmp (p
, "CPSR", 4) == 0)
3836 while (ISALNUM (*p
) || *p
== '_');
3838 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
3849 /* A suffix follows. */
3855 while (ISALNUM (*p
) || *p
== '_');
3857 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
3861 psr_field
|= psr
->field
;
3866 goto error
; /* Garbage after "[CS]PSR". */
3868 psr_field
|= (PSR_c
| PSR_f
);
3874 inst
.error
= _("flag for {c}psr instruction expected");
3878 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
3879 value suitable for splatting into the AIF field of the instruction. */
3882 parse_cps_flags (char **str
)
3891 case '\0': case ',':
3894 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
3895 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
3896 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
3899 inst
.error
= _("unrecognized CPS flag");
3904 if (saw_a_flag
== 0)
3906 inst
.error
= _("missing CPS flags");
3914 /* Parse an endian specifier ("BE" or "LE", case insensitive);
3915 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
3918 parse_endian_specifier (char **str
)
3923 if (strncasecmp (s
, "BE", 2))
3925 else if (strncasecmp (s
, "LE", 2))
3929 inst
.error
= _("valid endian specifiers are be or le");
3933 if (ISALNUM (s
[2]) || s
[2] == '_')
3935 inst
.error
= _("valid endian specifiers are be or le");
3940 return little_endian
;
3943 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
3944 value suitable for poking into the rotate field of an sxt or sxta
3945 instruction, or FAIL on error. */
3948 parse_ror (char **str
)
3953 if (strncasecmp (s
, "ROR", 3) == 0)
3957 inst
.error
= _("missing rotation field after comma");
3961 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
3966 case 0: *str
= s
; return 0x0;
3967 case 8: *str
= s
; return 0x1;
3968 case 16: *str
= s
; return 0x2;
3969 case 24: *str
= s
; return 0x3;
3972 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
3977 /* Parse a conditional code (from conds[] below). The value returned is in the
3978 range 0 .. 14, or FAIL. */
3980 parse_cond (char **str
)
3983 const struct asm_cond
*c
;
3986 while (ISALPHA (*q
))
3989 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
3992 inst
.error
= _("condition required");
4000 /* Parse an option for a barrier instruction. Returns the encoding for the
4003 parse_barrier (char **str
)
4006 const struct asm_barrier_opt
*o
;
4009 while (ISALPHA (*q
))
4012 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
4020 /* Parse the operands of a table branch instruction. Similar to a memory
4023 parse_tb (char **str
)
4028 if (skip_past_char (&p
, '[') == FAIL
)
4030 inst
.error
= _("'[' expected");
4034 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4036 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4039 inst
.operands
[0].reg
= reg
;
4041 if (skip_past_comma (&p
) == FAIL
)
4043 inst
.error
= _("',' expected");
4047 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4049 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4052 inst
.operands
[0].imm
= reg
;
4054 if (skip_past_comma (&p
) == SUCCESS
)
4056 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
4058 if (inst
.reloc
.exp
.X_add_number
!= 1)
4060 inst
.error
= _("invalid shift");
4063 inst
.operands
[0].shifted
= 1;
4066 if (skip_past_char (&p
, ']') == FAIL
)
4068 inst
.error
= _("']' expected");
4075 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4076 information on the types the operands can take and how they are encoded.
4077 Up to four operands may be read; this function handles setting the
4078 ".present" field for each read operand itself.
4079 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4080 else returns FAIL. */
4083 parse_neon_mov (char **str
, int *which_operand
)
4085 int i
= *which_operand
, val
;
4086 enum arm_reg_type rtype
;
4088 struct neon_type_el optype
;
4090 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4092 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4093 inst
.operands
[i
].reg
= val
;
4094 inst
.operands
[i
].isscalar
= 1;
4095 inst
.operands
[i
].vectype
= optype
;
4096 inst
.operands
[i
++].present
= 1;
4098 if (skip_past_comma (&ptr
) == FAIL
)
4101 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4104 inst
.operands
[i
].reg
= val
;
4105 inst
.operands
[i
].isreg
= 1;
4106 inst
.operands
[i
].present
= 1;
4108 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
4111 /* Cases 0, 1, 2, 3, 5 (D only). */
4112 if (skip_past_comma (&ptr
) == FAIL
)
4115 inst
.operands
[i
].reg
= val
;
4116 inst
.operands
[i
].isreg
= 1;
4117 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4118 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4119 inst
.operands
[i
].isvec
= 1;
4120 inst
.operands
[i
].vectype
= optype
;
4121 inst
.operands
[i
++].present
= 1;
4123 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4125 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
4126 Case 13: VMOV <Sd>, <Rm> */
4127 inst
.operands
[i
].reg
= val
;
4128 inst
.operands
[i
].isreg
= 1;
4129 inst
.operands
[i
].present
= 1;
4131 if (rtype
== REG_TYPE_NQ
)
4133 first_error (_("can't use Neon quad register here"));
4136 else if (rtype
!= REG_TYPE_VFS
)
4139 if (skip_past_comma (&ptr
) == FAIL
)
4141 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4143 inst
.operands
[i
].reg
= val
;
4144 inst
.operands
[i
].isreg
= 1;
4145 inst
.operands
[i
].present
= 1;
4148 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
4149 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4150 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
4151 Case 10: VMOV.F32 <Sd>, #<imm>
4152 Case 11: VMOV.F64 <Dd>, #<imm> */
4153 inst
.operands
[i
].immisfloat
= 1;
4154 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
4155 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4156 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4158 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
4161 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4162 Case 1: VMOV<c><q> <Dd>, <Dm>
4163 Case 8: VMOV.F32 <Sd>, <Sm>
4164 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
4166 inst
.operands
[i
].reg
= val
;
4167 inst
.operands
[i
].isreg
= 1;
4168 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4169 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4170 inst
.operands
[i
].isvec
= 1;
4171 inst
.operands
[i
].vectype
= optype
;
4172 inst
.operands
[i
].present
= 1;
4174 if (skip_past_comma (&ptr
) == SUCCESS
)
4179 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4182 inst
.operands
[i
].reg
= val
;
4183 inst
.operands
[i
].isreg
= 1;
4184 inst
.operands
[i
++].present
= 1;
4186 if (skip_past_comma (&ptr
) == FAIL
)
4189 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4192 inst
.operands
[i
].reg
= val
;
4193 inst
.operands
[i
].isreg
= 1;
4194 inst
.operands
[i
++].present
= 1;
4199 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4203 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4206 inst
.operands
[i
].reg
= val
;
4207 inst
.operands
[i
].isreg
= 1;
4208 inst
.operands
[i
++].present
= 1;
4210 if (skip_past_comma (&ptr
) == FAIL
)
4213 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4215 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4216 inst
.operands
[i
].reg
= val
;
4217 inst
.operands
[i
].isscalar
= 1;
4218 inst
.operands
[i
].present
= 1;
4219 inst
.operands
[i
].vectype
= optype
;
4221 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4223 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4224 inst
.operands
[i
].reg
= val
;
4225 inst
.operands
[i
].isreg
= 1;
4226 inst
.operands
[i
++].present
= 1;
4228 if (skip_past_comma (&ptr
) == FAIL
)
4231 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
4234 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
4238 inst
.operands
[i
].reg
= val
;
4239 inst
.operands
[i
].isreg
= 1;
4240 inst
.operands
[i
].isvec
= 1;
4241 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
4242 inst
.operands
[i
].vectype
= optype
;
4243 inst
.operands
[i
].present
= 1;
4245 if (rtype
== REG_TYPE_VFS
)
4249 if (skip_past_comma (&ptr
) == FAIL
)
4251 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
4254 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
4257 inst
.operands
[i
].reg
= val
;
4258 inst
.operands
[i
].isreg
= 1;
4259 inst
.operands
[i
].isvec
= 1;
4260 inst
.operands
[i
].issingle
= 1;
4261 inst
.operands
[i
].vectype
= optype
;
4262 inst
.operands
[i
].present
= 1;
4265 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
4269 inst
.operands
[i
].reg
= val
;
4270 inst
.operands
[i
].isreg
= 1;
4271 inst
.operands
[i
].isvec
= 1;
4272 inst
.operands
[i
].issingle
= 1;
4273 inst
.operands
[i
].vectype
= optype
;
4274 inst
.operands
[i
++].present
= 1;
4279 first_error (_("parse error"));
4283 /* Successfully parsed the operands. Update args. */
4289 first_error (_("expected comma"));
4293 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
4297 /* Matcher codes for parse_operands. */
4298 enum operand_parse_code
4300 OP_stop
, /* end of line */
4302 OP_RR
, /* ARM register */
4303 OP_RRnpc
, /* ARM register, not r15 */
4304 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
4305 OP_RRw
, /* ARM register, not r15, optional trailing ! */
4306 OP_RCP
, /* Coprocessor number */
4307 OP_RCN
, /* Coprocessor register */
4308 OP_RF
, /* FPA register */
4309 OP_RVS
, /* VFP single precision register */
4310 OP_RVD
, /* VFP double precision register (0..15) */
4311 OP_RND
, /* Neon double precision register (0..31) */
4312 OP_RNQ
, /* Neon quad precision register */
4313 OP_RVSD
, /* VFP single or double precision register */
4314 OP_RNDQ
, /* Neon double or quad precision register */
4315 OP_RNSDQ
, /* Neon single, double or quad precision register */
4316 OP_RNSC
, /* Neon scalar D[X] */
4317 OP_RVC
, /* VFP control register */
4318 OP_RMF
, /* Maverick F register */
4319 OP_RMD
, /* Maverick D register */
4320 OP_RMFX
, /* Maverick FX register */
4321 OP_RMDX
, /* Maverick DX register */
4322 OP_RMAX
, /* Maverick AX register */
4323 OP_RMDS
, /* Maverick DSPSC register */
4324 OP_RIWR
, /* iWMMXt wR register */
4325 OP_RIWC
, /* iWMMXt wC register */
4326 OP_RIWG
, /* iWMMXt wCG register */
4327 OP_RXA
, /* XScale accumulator register */
4329 OP_REGLST
, /* ARM register list */
4330 OP_VRSLST
, /* VFP single-precision register list */
4331 OP_VRDLST
, /* VFP double-precision register list */
4332 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
4333 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
4334 OP_NSTRLST
, /* Neon element/structure list */
4336 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4337 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
4338 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
4339 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
4340 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
4341 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
4342 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
4343 OP_VMOV
, /* Neon VMOV operands. */
4344 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
4345 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
4346 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
4348 OP_I0
, /* immediate zero */
4349 OP_I7
, /* immediate value 0 .. 7 */
4350 OP_I15
, /* 0 .. 15 */
4351 OP_I16
, /* 1 .. 16 */
4352 OP_I16z
, /* 0 .. 16 */
4353 OP_I31
, /* 0 .. 31 */
4354 OP_I31w
, /* 0 .. 31, optional trailing ! */
4355 OP_I32
, /* 1 .. 32 */
4356 OP_I32z
, /* 0 .. 32 */
4357 OP_I63
, /* 0 .. 63 */
4358 OP_I63s
, /* -64 .. 63 */
4359 OP_I64
, /* 1 .. 64 */
4360 OP_I64z
, /* 0 .. 64 */
4361 OP_I255
, /* 0 .. 255 */
4363 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
4364 OP_I7b
, /* 0 .. 7 */
4365 OP_I15b
, /* 0 .. 15 */
4366 OP_I31b
, /* 0 .. 31 */
4368 OP_SH
, /* shifter operand */
4369 OP_SHG
, /* shifter operand with possible group relocation */
4370 OP_ADDR
, /* Memory address expression (any mode) */
4371 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
4372 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
4373 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
4374 OP_EXP
, /* arbitrary expression */
4375 OP_EXPi
, /* same, with optional immediate prefix */
4376 OP_EXPr
, /* same, with optional relocation suffix */
4377 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
4379 OP_CPSF
, /* CPS flags */
4380 OP_ENDI
, /* Endianness specifier */
4381 OP_PSR
, /* CPSR/SPSR mask for msr */
4382 OP_COND
, /* conditional code */
4383 OP_TB
, /* Table branch. */
4385 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
4386 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
4388 OP_RRnpc_I0
, /* ARM register or literal 0 */
4389 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
4390 OP_RR_EXi
, /* ARM register or expression with imm prefix */
4391 OP_RF_IF
, /* FPA register or immediate */
4392 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
4393 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
4395 /* Optional operands. */
4396 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
4397 OP_oI31b
, /* 0 .. 31 */
4398 OP_oI32b
, /* 1 .. 32 */
4399 OP_oIffffb
, /* 0 .. 65535 */
4400 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
4402 OP_oRR
, /* ARM register */
4403 OP_oRRnpc
, /* ARM register, not the PC */
4404 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
4405 OP_oRND
, /* Optional Neon double precision register */
4406 OP_oRNQ
, /* Optional Neon quad precision register */
4407 OP_oRNDQ
, /* Optional Neon double or quad precision register */
4408 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
4409 OP_oSHll
, /* LSL immediate */
4410 OP_oSHar
, /* ASR immediate */
4411 OP_oSHllar
, /* LSL or ASR immediate */
4412 OP_oROR
, /* ROR 0/8/16/24 */
4413 OP_oBARRIER
, /* Option argument for a barrier instruction. */
4415 OP_FIRST_OPTIONAL
= OP_oI7b
4418 /* Generic instruction operand parser. This does no encoding and no
4419 semantic validation; it merely squirrels values away in the inst
4420 structure. Returns SUCCESS or FAIL depending on whether the
4421 specified grammar matched. */
4423 parse_operands (char *str
, const unsigned char *pattern
)
4425 unsigned const char *upat
= pattern
;
4426 char *backtrack_pos
= 0;
4427 const char *backtrack_error
= 0;
4428 int i
, val
, backtrack_index
= 0;
4429 enum arm_reg_type rtype
;
4430 parse_operand_result result
;
4432 #define po_char_or_fail(chr) do { \
4433 if (skip_past_char (&str, chr) == FAIL) \
4437 #define po_reg_or_fail(regtype) do { \
4438 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4439 &inst.operands[i].vectype); \
4442 first_error (_(reg_expected_msgs[regtype])); \
4445 inst.operands[i].reg = val; \
4446 inst.operands[i].isreg = 1; \
4447 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4448 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
4449 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
4450 || rtype == REG_TYPE_VFD \
4451 || rtype == REG_TYPE_NQ); \
4454 #define po_reg_or_goto(regtype, label) do { \
4455 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4456 &inst.operands[i].vectype); \
4460 inst.operands[i].reg = val; \
4461 inst.operands[i].isreg = 1; \
4462 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4463 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
4464 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
4465 || rtype == REG_TYPE_VFD \
4466 || rtype == REG_TYPE_NQ); \
4469 #define po_imm_or_fail(min, max, popt) do { \
4470 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
4472 inst.operands[i].imm = val; \
4475 #define po_scalar_or_goto(elsz, label) do { \
4476 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
4479 inst.operands[i].reg = val; \
4480 inst.operands[i].isscalar = 1; \
4483 #define po_misc_or_fail(expr) do { \
4488 #define po_misc_or_fail_no_backtrack(expr) do { \
4490 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
4491 backtrack_pos = 0; \
4492 if (result != PARSE_OPERAND_SUCCESS) \
4496 skip_whitespace (str
);
4498 for (i
= 0; upat
[i
] != OP_stop
; i
++)
4500 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
4502 /* Remember where we are in case we need to backtrack. */
4503 assert (!backtrack_pos
);
4504 backtrack_pos
= str
;
4505 backtrack_error
= inst
.error
;
4506 backtrack_index
= i
;
4509 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
4510 po_char_or_fail (',');
4518 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
4519 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
4520 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
4521 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
4522 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
4523 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
4525 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
4526 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
4527 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
4528 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
4529 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
4530 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
4531 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
4532 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
4533 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
4534 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
4535 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
4536 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
4538 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
4540 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
4541 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
4543 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
4545 /* Neon scalar. Using an element size of 8 means that some invalid
4546 scalars are accepted here, so deal with those in later code. */
4547 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
4549 /* WARNING: We can expand to two operands here. This has the potential
4550 to totally confuse the backtracking mechanism! It will be OK at
4551 least as long as we don't try to use optional args as well,
4555 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
4556 inst
.operands
[i
].present
= 1;
4558 skip_past_comma (&str
);
4559 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
4562 /* Optional register operand was omitted. Unfortunately, it's in
4563 operands[i-1] and we need it to be in inst.operands[i]. Fix that
4564 here (this is a bit grotty). */
4565 inst
.operands
[i
] = inst
.operands
[i
-1];
4566 inst
.operands
[i
-1].present
= 0;
4569 /* There's a possibility of getting a 64-bit immediate here, so
4570 we need special handling. */
4571 if (parse_big_immediate (&str
, i
) == FAIL
)
4573 inst
.error
= _("immediate value is out of range");
4581 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
4584 po_imm_or_fail (0, 0, TRUE
);
4589 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
4594 po_scalar_or_goto (8, try_rr
);
4597 po_reg_or_fail (REG_TYPE_RN
);
4603 po_scalar_or_goto (8, try_nsdq
);
4606 po_reg_or_fail (REG_TYPE_NSDQ
);
4612 po_scalar_or_goto (8, try_ndq
);
4615 po_reg_or_fail (REG_TYPE_NDQ
);
4621 po_scalar_or_goto (8, try_vfd
);
4624 po_reg_or_fail (REG_TYPE_VFD
);
4629 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
4630 not careful then bad things might happen. */
4631 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
4636 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
4639 /* There's a possibility of getting a 64-bit immediate here, so
4640 we need special handling. */
4641 if (parse_big_immediate (&str
, i
) == FAIL
)
4643 inst
.error
= _("immediate value is out of range");
4651 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
4654 po_imm_or_fail (0, 63, TRUE
);
4659 po_char_or_fail ('[');
4660 po_reg_or_fail (REG_TYPE_RN
);
4661 po_char_or_fail (']');
4666 po_reg_or_fail (REG_TYPE_RN
);
4667 if (skip_past_char (&str
, '!') == SUCCESS
)
4668 inst
.operands
[i
].writeback
= 1;
4672 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
4673 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
4674 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
4675 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
4676 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
4677 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
4678 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
4679 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
4680 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
4681 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
4682 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
4683 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
4685 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
4687 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
4688 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
4690 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
4691 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
4692 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
4694 /* Immediate variants */
4696 po_char_or_fail ('{');
4697 po_imm_or_fail (0, 255, TRUE
);
4698 po_char_or_fail ('}');
4702 /* The expression parser chokes on a trailing !, so we have
4703 to find it first and zap it. */
4706 while (*s
&& *s
!= ',')
4711 inst
.operands
[i
].writeback
= 1;
4713 po_imm_or_fail (0, 31, TRUE
);
4721 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
4726 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
4731 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
4733 if (inst
.reloc
.exp
.X_op
== O_symbol
)
4735 val
= parse_reloc (&str
);
4738 inst
.error
= _("unrecognized relocation suffix");
4741 else if (val
!= BFD_RELOC_UNUSED
)
4743 inst
.operands
[i
].imm
= val
;
4744 inst
.operands
[i
].hasreloc
= 1;
4749 /* Operand for MOVW or MOVT. */
4751 po_misc_or_fail (parse_half (&str
));
4754 /* Register or expression */
4755 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
4756 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
4758 /* Register or immediate */
4759 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
4760 I0
: po_imm_or_fail (0, 0, FALSE
); break;
4762 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
4764 if (!is_immediate_prefix (*str
))
4767 val
= parse_fpa_immediate (&str
);
4770 /* FPA immediates are encoded as registers 8-15.
4771 parse_fpa_immediate has already applied the offset. */
4772 inst
.operands
[i
].reg
= val
;
4773 inst
.operands
[i
].isreg
= 1;
4776 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
4777 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
4779 /* Two kinds of register */
4782 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
4784 || (rege
->type
!= REG_TYPE_MMXWR
4785 && rege
->type
!= REG_TYPE_MMXWC
4786 && rege
->type
!= REG_TYPE_MMXWCG
))
4788 inst
.error
= _("iWMMXt data or control register expected");
4791 inst
.operands
[i
].reg
= rege
->number
;
4792 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
4798 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
4800 || (rege
->type
!= REG_TYPE_MMXWC
4801 && rege
->type
!= REG_TYPE_MMXWCG
))
4803 inst
.error
= _("iWMMXt control register expected");
4806 inst
.operands
[i
].reg
= rege
->number
;
4807 inst
.operands
[i
].isreg
= 1;
4812 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
4813 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
4814 case OP_oROR
: val
= parse_ror (&str
); break;
4815 case OP_PSR
: val
= parse_psr (&str
); break;
4816 case OP_COND
: val
= parse_cond (&str
); break;
4817 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
4820 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
4821 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
4824 val
= parse_psr (&str
);
4828 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
4831 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
4833 if (strncasecmp (str
, "APSR_", 5) == 0)
4840 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
4841 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
4842 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
4843 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
4844 default: found
= 16;
4848 inst
.operands
[i
].isvec
= 1;
4855 po_misc_or_fail (parse_tb (&str
));
4858 /* Register lists */
4860 val
= parse_reg_list (&str
);
4863 inst
.operands
[1].writeback
= 1;
4869 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
4873 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
4877 /* Allow Q registers too. */
4878 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
4883 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
4885 inst
.operands
[i
].issingle
= 1;
4890 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
4895 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
4896 &inst
.operands
[i
].vectype
);
4899 /* Addressing modes */
4901 po_misc_or_fail (parse_address (&str
, i
));
4905 po_misc_or_fail_no_backtrack (
4906 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
4910 po_misc_or_fail_no_backtrack (
4911 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
4915 po_misc_or_fail_no_backtrack (
4916 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
4920 po_misc_or_fail (parse_shifter_operand (&str
, i
));
4924 po_misc_or_fail_no_backtrack (
4925 parse_shifter_operand_group_reloc (&str
, i
));
4929 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
4933 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
4937 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
4941 as_fatal ("unhandled operand code %d", upat
[i
]);
4944 /* Various value-based sanity checks and shared operations. We
4945 do not signal immediate failures for the register constraints;
4946 this allows a syntax error to take precedence. */
4955 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
4956 inst
.error
= BAD_PC
;
4974 inst
.operands
[i
].imm
= val
;
4981 /* If we get here, this operand was successfully parsed. */
4982 inst
.operands
[i
].present
= 1;
4986 inst
.error
= BAD_ARGS
;
4991 /* The parse routine should already have set inst.error, but set a
4992 defaut here just in case. */
4994 inst
.error
= _("syntax error");
4998 /* Do not backtrack over a trailing optional argument that
4999 absorbed some text. We will only fail again, with the
5000 'garbage following instruction' error message, which is
5001 probably less helpful than the current one. */
5002 if (backtrack_index
== i
&& backtrack_pos
!= str
5003 && upat
[i
+1] == OP_stop
)
5006 inst
.error
= _("syntax error");
5010 /* Try again, skipping the optional argument at backtrack_pos. */
5011 str
= backtrack_pos
;
5012 inst
.error
= backtrack_error
;
5013 inst
.operands
[backtrack_index
].present
= 0;
5014 i
= backtrack_index
;
5018 /* Check that we have parsed all the arguments. */
5019 if (*str
!= '\0' && !inst
.error
)
5020 inst
.error
= _("garbage following instruction");
5022 return inst
.error
? FAIL
: SUCCESS
;
5025 #undef po_char_or_fail
5026 #undef po_reg_or_fail
5027 #undef po_reg_or_goto
5028 #undef po_imm_or_fail
5029 #undef po_scalar_or_fail
5031 /* Shorthand macro for instruction encoding functions issuing errors. */
5032 #define constraint(expr, err) do { \
5040 /* Functions for operand encoding. ARM, then Thumb. */
5042 #define rotate_left(v, n) (v << n | v >> (32 - n))
5044 /* If VAL can be encoded in the immediate field of an ARM instruction,
5045 return the encoded form. Otherwise, return FAIL. */
5048 encode_arm_immediate (unsigned int val
)
5052 for (i
= 0; i
< 32; i
+= 2)
5053 if ((a
= rotate_left (val
, i
)) <= 0xff)
5054 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
5059 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5060 return the encoded form. Otherwise, return FAIL. */
5062 encode_thumb32_immediate (unsigned int val
)
5069 for (i
= 1; i
<= 24; i
++)
5072 if ((val
& ~(0xff << i
)) == 0)
5073 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
5077 if (val
== ((a
<< 16) | a
))
5079 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
5083 if (val
== ((a
<< 16) | a
))
5084 return 0x200 | (a
>> 8);
5088 /* Encode a VFP SP or DP register number into inst.instruction. */
5091 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
5093 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
5096 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
5099 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
5102 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
5107 first_error (_("D register out of range for selected VFP version"));
5115 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
5119 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
5123 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
5127 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
5131 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
5135 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
5143 /* Encode a <shift> in an ARM-format instruction. The immediate,
5144 if any, is handled by md_apply_fix. */
5146 encode_arm_shift (int i
)
5148 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5149 inst
.instruction
|= SHIFT_ROR
<< 5;
5152 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5153 if (inst
.operands
[i
].immisreg
)
5155 inst
.instruction
|= SHIFT_BY_REG
;
5156 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
5159 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5164 encode_arm_shifter_operand (int i
)
5166 if (inst
.operands
[i
].isreg
)
5168 inst
.instruction
|= inst
.operands
[i
].reg
;
5169 encode_arm_shift (i
);
5172 inst
.instruction
|= INST_IMMEDIATE
;
5175 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5177 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
5179 assert (inst
.operands
[i
].isreg
);
5180 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5182 if (inst
.operands
[i
].preind
)
5186 inst
.error
= _("instruction does not accept preindexed addressing");
5189 inst
.instruction
|= PRE_INDEX
;
5190 if (inst
.operands
[i
].writeback
)
5191 inst
.instruction
|= WRITE_BACK
;
5194 else if (inst
.operands
[i
].postind
)
5196 assert (inst
.operands
[i
].writeback
);
5198 inst
.instruction
|= WRITE_BACK
;
5200 else /* unindexed - only for coprocessor */
5202 inst
.error
= _("instruction does not accept unindexed addressing");
5206 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
5207 && (((inst
.instruction
& 0x000f0000) >> 16)
5208 == ((inst
.instruction
& 0x0000f000) >> 12)))
5209 as_warn ((inst
.instruction
& LOAD_BIT
)
5210 ? _("destination register same as write-back base")
5211 : _("source register same as write-back base"));
5214 /* inst.operands[i] was set up by parse_address. Encode it into an
5215 ARM-format mode 2 load or store instruction. If is_t is true,
5216 reject forms that cannot be used with a T instruction (i.e. not
5219 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
5221 encode_arm_addr_mode_common (i
, is_t
);
5223 if (inst
.operands
[i
].immisreg
)
5225 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
5226 inst
.instruction
|= inst
.operands
[i
].imm
;
5227 if (!inst
.operands
[i
].negative
)
5228 inst
.instruction
|= INDEX_UP
;
5229 if (inst
.operands
[i
].shifted
)
5231 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5232 inst
.instruction
|= SHIFT_ROR
<< 5;
5235 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5236 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5240 else /* immediate offset in inst.reloc */
5242 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5243 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
5247 /* inst.operands[i] was set up by parse_address. Encode it into an
5248 ARM-format mode 3 load or store instruction. Reject forms that
5249 cannot be used with such instructions. If is_t is true, reject
5250 forms that cannot be used with a T instruction (i.e. not
5253 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
5255 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
5257 inst
.error
= _("instruction does not accept scaled register index");
5261 encode_arm_addr_mode_common (i
, is_t
);
5263 if (inst
.operands
[i
].immisreg
)
5265 inst
.instruction
|= inst
.operands
[i
].imm
;
5266 if (!inst
.operands
[i
].negative
)
5267 inst
.instruction
|= INDEX_UP
;
5269 else /* immediate offset in inst.reloc */
5271 inst
.instruction
|= HWOFFSET_IMM
;
5272 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5273 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
5277 /* inst.operands[i] was set up by parse_address. Encode it into an
5278 ARM-format instruction. Reject all forms which cannot be encoded
5279 into a coprocessor load/store instruction. If wb_ok is false,
5280 reject use of writeback; if unind_ok is false, reject use of
5281 unindexed addressing. If reloc_override is not 0, use it instead
5282 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
5283 (in which case it is preserved). */
5286 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
5288 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5290 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
5292 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
5294 assert (!inst
.operands
[i
].writeback
);
5297 inst
.error
= _("instruction does not support unindexed addressing");
5300 inst
.instruction
|= inst
.operands
[i
].imm
;
5301 inst
.instruction
|= INDEX_UP
;
5305 if (inst
.operands
[i
].preind
)
5306 inst
.instruction
|= PRE_INDEX
;
5308 if (inst
.operands
[i
].writeback
)
5310 if (inst
.operands
[i
].reg
== REG_PC
)
5312 inst
.error
= _("pc may not be used with write-back");
5317 inst
.error
= _("instruction does not support writeback");
5320 inst
.instruction
|= WRITE_BACK
;
5324 inst
.reloc
.type
= reloc_override
;
5325 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
5326 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
5327 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
5330 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
5332 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
5338 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5339 Determine whether it can be performed with a move instruction; if
5340 it can, convert inst.instruction to that move instruction and
5341 return 1; if it can't, convert inst.instruction to a literal-pool
5342 load and return 0. If this is not a valid thing to do in the
5343 current context, set inst.error and return 1.
5345 inst.operands[i] describes the destination register. */
5348 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
5353 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
5357 if ((inst
.instruction
& tbit
) == 0)
5359 inst
.error
= _("invalid pseudo operation");
5362 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
5364 inst
.error
= _("constant expression expected");
5367 if (inst
.reloc
.exp
.X_op
== O_constant
)
5371 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
5373 /* This can be done with a mov(1) instruction. */
5374 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
5375 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
5381 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
5384 /* This can be done with a mov instruction. */
5385 inst
.instruction
&= LITERAL_MASK
;
5386 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
5387 inst
.instruction
|= value
& 0xfff;
5391 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
5394 /* This can be done with a mvn instruction. */
5395 inst
.instruction
&= LITERAL_MASK
;
5396 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
5397 inst
.instruction
|= value
& 0xfff;
5403 if (add_to_lit_pool () == FAIL
)
5405 inst
.error
= _("literal pool insertion failed");
5408 inst
.operands
[1].reg
= REG_PC
;
5409 inst
.operands
[1].isreg
= 1;
5410 inst
.operands
[1].preind
= 1;
5411 inst
.reloc
.pc_rel
= 1;
5412 inst
.reloc
.type
= (thumb_p
5413 ? BFD_RELOC_ARM_THUMB_OFFSET
5415 ? BFD_RELOC_ARM_HWLITERAL
5416 : BFD_RELOC_ARM_LITERAL
));
5420 /* Functions for instruction encoding, sorted by subarchitecture.
5421 First some generics; their names are taken from the conventional
5422 bit positions for register arguments in ARM format instructions. */
5432 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5438 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5439 inst
.instruction
|= inst
.operands
[1].reg
;
5445 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5446 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5452 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
5453 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5459 unsigned Rn
= inst
.operands
[2].reg
;
5460 /* Enforce restrictions on SWP instruction. */
5461 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
5462 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
5463 _("Rn must not overlap other operands"));
5464 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5465 inst
.instruction
|= inst
.operands
[1].reg
;
5466 inst
.instruction
|= Rn
<< 16;
5472 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5473 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5474 inst
.instruction
|= inst
.operands
[2].reg
;
5480 inst
.instruction
|= inst
.operands
[0].reg
;
5481 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5482 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
5488 inst
.instruction
|= inst
.operands
[0].imm
;
5494 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5495 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
5498 /* ARM instructions, in alphabetical order by function name (except
5499 that wrapper functions appear immediately after the function they
5502 /* This is a pseudo-op of the form "adr rd, label" to be converted
5503 into a relative address of the form "add rd, pc, #label-.-8". */
5508 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5510 /* Frag hacking will turn this into a sub instruction if the offset turns
5511 out to be negative. */
5512 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5513 inst
.reloc
.pc_rel
= 1;
5514 inst
.reloc
.exp
.X_add_number
-= 8;
5517 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5518 into a relative address of the form:
5519 add rd, pc, #low(label-.-8)"
5520 add rd, rd, #high(label-.-8)" */
5525 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5527 /* Frag hacking will turn this into a sub instruction if the offset turns
5528 out to be negative. */
5529 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
5530 inst
.reloc
.pc_rel
= 1;
5531 inst
.size
= INSN_SIZE
* 2;
5532 inst
.reloc
.exp
.X_add_number
-= 8;
5538 if (!inst
.operands
[1].present
)
5539 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
5540 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5541 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5542 encode_arm_shifter_operand (2);
5548 if (inst
.operands
[0].present
)
5550 constraint ((inst
.instruction
& 0xf0) == 0x60
5551 && inst
.operands
[0].imm
!= 0xf,
5552 "bad barrier type");
5553 inst
.instruction
|= inst
.operands
[0].imm
;
5556 inst
.instruction
|= 0xf;
5562 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
5563 constraint (msb
> 32, _("bit-field extends past end of register"));
5564 /* The instruction encoding stores the LSB and MSB,
5565 not the LSB and width. */
5566 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5567 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
5568 inst
.instruction
|= (msb
- 1) << 16;
5576 /* #0 in second position is alternative syntax for bfc, which is
5577 the same instruction but with REG_PC in the Rm field. */
5578 if (!inst
.operands
[1].isreg
)
5579 inst
.operands
[1].reg
= REG_PC
;
5581 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
5582 constraint (msb
> 32, _("bit-field extends past end of register"));
5583 /* The instruction encoding stores the LSB and MSB,
5584 not the LSB and width. */
5585 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5586 inst
.instruction
|= inst
.operands
[1].reg
;
5587 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5588 inst
.instruction
|= (msb
- 1) << 16;
5594 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
5595 _("bit-field extends past end of register"));
5596 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5597 inst
.instruction
|= inst
.operands
[1].reg
;
5598 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5599 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
5602 /* ARM V5 breakpoint instruction (argument parse)
5603 BKPT <16 bit unsigned immediate>
5604 Instruction is not conditional.
5605 The bit pattern given in insns[] has the COND_ALWAYS condition,
5606 and it is an error if the caller tried to override that. */
5611 /* Top 12 of 16 bits to bits 19:8. */
5612 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
5614 /* Bottom 4 of 16 bits to bits 3:0. */
5615 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
5619 encode_branch (int default_reloc
)
5621 if (inst
.operands
[0].hasreloc
)
5623 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
5624 _("the only suffix valid here is '(plt)'"));
5625 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
5629 inst
.reloc
.type
= default_reloc
;
5631 inst
.reloc
.pc_rel
= 1;
5638 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
5639 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
5642 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
5649 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
5651 if (inst
.cond
== COND_ALWAYS
)
5652 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
5654 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
5658 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
5661 /* ARM V5 branch-link-exchange instruction (argument parse)
5662 BLX <target_addr> ie BLX(1)
5663 BLX{<condition>} <Rm> ie BLX(2)
5664 Unfortunately, there are two different opcodes for this mnemonic.
5665 So, the insns[].value is not used, and the code here zaps values
5666 into inst.instruction.
5667 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
5672 if (inst
.operands
[0].isreg
)
5674 /* Arg is a register; the opcode provided by insns[] is correct.
5675 It is not illegal to do "blx pc", just useless. */
5676 if (inst
.operands
[0].reg
== REG_PC
)
5677 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
5679 inst
.instruction
|= inst
.operands
[0].reg
;
5683 /* Arg is an address; this instruction cannot be executed
5684 conditionally, and the opcode must be adjusted. */
5685 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
5686 inst
.instruction
= 0xfa000000;
5688 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
5689 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
5692 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
5699 if (inst
.operands
[0].reg
== REG_PC
)
5700 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
5702 inst
.instruction
|= inst
.operands
[0].reg
;
5706 /* ARM v5TEJ. Jump to Jazelle code. */
5711 if (inst
.operands
[0].reg
== REG_PC
)
5712 as_tsktsk (_("use of r15 in bxj is not really useful"));
5714 inst
.instruction
|= inst
.operands
[0].reg
;
5717 /* Co-processor data operation:
5718 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
5719 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
5723 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
5724 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
5725 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
5726 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
5727 inst
.instruction
|= inst
.operands
[4].reg
;
5728 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
5734 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
5735 encode_arm_shifter_operand (1);
5738 /* Transfer between coprocessor and ARM registers.
5739 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
5744 No special properties. */
5749 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
5750 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
5751 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
5752 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
5753 inst
.instruction
|= inst
.operands
[4].reg
;
5754 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
5757 /* Transfer between coprocessor register and pair of ARM registers.
5758 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
5763 Two XScale instructions are special cases of these:
5765 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
5766 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
5768 Result unpredicatable if Rd or Rn is R15. */
5773 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
5774 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
5775 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
5776 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
5777 inst
.instruction
|= inst
.operands
[4].reg
;
5783 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
5784 if (inst
.operands
[1].present
)
5786 inst
.instruction
|= CPSI_MMOD
;
5787 inst
.instruction
|= inst
.operands
[1].imm
;
5794 inst
.instruction
|= inst
.operands
[0].imm
;
5800 /* There is no IT instruction in ARM mode. We
5801 process it but do not generate code for it. */
5808 int base_reg
= inst
.operands
[0].reg
;
5809 int range
= inst
.operands
[1].imm
;
5811 inst
.instruction
|= base_reg
<< 16;
5812 inst
.instruction
|= range
;
5814 if (inst
.operands
[1].writeback
)
5815 inst
.instruction
|= LDM_TYPE_2_OR_3
;
5817 if (inst
.operands
[0].writeback
)
5819 inst
.instruction
|= WRITE_BACK
;
5820 /* Check for unpredictable uses of writeback. */
5821 if (inst
.instruction
& LOAD_BIT
)
5823 /* Not allowed in LDM type 2. */
5824 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
5825 && ((range
& (1 << REG_PC
)) == 0))
5826 as_warn (_("writeback of base register is UNPREDICTABLE"));
5827 /* Only allowed if base reg not in list for other types. */
5828 else if (range
& (1 << base_reg
))
5829 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
5833 /* Not allowed for type 2. */
5834 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
5835 as_warn (_("writeback of base register is UNPREDICTABLE"));
5836 /* Only allowed if base reg not in list, or first in list. */
5837 else if ((range
& (1 << base_reg
))
5838 && (range
& ((1 << base_reg
) - 1)))
5839 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
5844 /* ARMv5TE load-consecutive (argument parse)
5853 constraint (inst
.operands
[0].reg
% 2 != 0,
5854 _("first destination register must be even"));
5855 constraint (inst
.operands
[1].present
5856 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
5857 _("can only load two consecutive registers"));
5858 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
5859 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
5861 if (!inst
.operands
[1].present
)
5862 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
5864 if (inst
.instruction
& LOAD_BIT
)
5866 /* encode_arm_addr_mode_3 will diagnose overlap between the base
5867 register and the first register written; we have to diagnose
5868 overlap between the base and the second register written here. */
5870 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
5871 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
5872 as_warn (_("base register written back, and overlaps "
5873 "second destination register"));
5875 /* For an index-register load, the index register must not overlap the
5876 destination (even if not write-back). */
5877 else if (inst
.operands
[2].immisreg
5878 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
5879 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
5880 as_warn (_("index register overlaps destination register"));
5883 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5884 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
5890 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
5891 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
5892 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
5893 || inst
.operands
[1].negative
5894 /* This can arise if the programmer has written
5896 or if they have mistakenly used a register name as the last
5899 It is very difficult to distinguish between these two cases
5900 because "rX" might actually be a label. ie the register
5901 name has been occluded by a symbol of the same name. So we
5902 just generate a general 'bad addressing mode' type error
5903 message and leave it up to the programmer to discover the
5904 true cause and fix their mistake. */
5905 || (inst
.operands
[1].reg
== REG_PC
),
5908 constraint (inst
.reloc
.exp
.X_op
!= O_constant
5909 || inst
.reloc
.exp
.X_add_number
!= 0,
5910 _("offset must be zero in ARM encoding"));
5912 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5913 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5914 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
5920 constraint (inst
.operands
[0].reg
% 2 != 0,
5921 _("even register required"));
5922 constraint (inst
.operands
[1].present
5923 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
5924 _("can only load two consecutive registers"));
5925 /* If op 1 were present and equal to PC, this function wouldn't
5926 have been called in the first place. */
5927 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
5929 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5930 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
5936 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5937 if (!inst
.operands
[1].isreg
)
5938 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
5940 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
5946 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
5948 if (inst
.operands
[1].preind
)
5950 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
5951 inst
.reloc
.exp
.X_add_number
!= 0,
5952 _("this instruction requires a post-indexed address"));
5954 inst
.operands
[1].preind
= 0;
5955 inst
.operands
[1].postind
= 1;
5956 inst
.operands
[1].writeback
= 1;
5958 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5959 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
5962 /* Halfword and signed-byte load/store operations. */
5967 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5968 if (!inst
.operands
[1].isreg
)
5969 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
5971 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
5977 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
5979 if (inst
.operands
[1].preind
)
5981 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
5982 inst
.reloc
.exp
.X_add_number
!= 0,
5983 _("this instruction requires a post-indexed address"));
5985 inst
.operands
[1].preind
= 0;
5986 inst
.operands
[1].postind
= 1;
5987 inst
.operands
[1].writeback
= 1;
5989 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5990 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
5993 /* Co-processor register load/store.
5994 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
5998 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
5999 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6000 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6006 /* This restriction does not apply to mls (nor to mla in v6 or later). */
6007 /* Only restrict on pre-V4 architectures - radar 4474226 */
6008 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6009 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
)
6010 && !force_cpusubtype_ALL
)
6011 as_tsktsk (_("rd and rm should be different in mla"));
6013 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6014 inst
.instruction
|= inst
.operands
[1].reg
;
6015 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6016 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6022 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6023 encode_arm_shifter_operand (1);
6026 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6033 top
= (inst
.instruction
& 0x00400000) != 0;
6034 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
6035 _(":lower16: not allowed this instruction"));
6036 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
6037 _(":upper16: not allowed instruction"));
6038 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6039 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6041 imm
= inst
.reloc
.exp
.X_add_number
;
6042 /* The value is in two pieces: 0:11, 16:19. */
6043 inst
.instruction
|= (imm
& 0x00000fff);
6044 inst
.instruction
|= (imm
& 0x0000f000) << 4;
6048 static void do_vfp_nsyn_opcode (const char *);
6051 do_vfp_nsyn_mrs (void)
6053 if (inst
.operands
[0].isvec
)
6055 if (inst
.operands
[1].reg
!= 1)
6056 first_error (_("operand 1 must be FPSCR"));
6057 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
6058 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
6059 do_vfp_nsyn_opcode ("fmstat");
6061 else if (inst
.operands
[1].isvec
)
6062 do_vfp_nsyn_opcode ("fmrx");
6070 do_vfp_nsyn_msr (void)
6072 if (inst
.operands
[0].isvec
)
6073 do_vfp_nsyn_opcode ("fmxr");
6083 if (do_vfp_nsyn_mrs () == SUCCESS
)
6086 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6087 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
6089 _("'CPSR' or 'SPSR' expected"));
6090 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6091 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
6097 if (inst
.operands
[0].isvec
)
6099 if (inst
.operands
[1].reg
!= 1)
6100 first_error (_("operand 1 must be FPSCR"));
6101 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
6102 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
6103 do_vfp_nsyn_opcode ("fmstat");
6109 /* Two possible forms:
6110 "{C|S}PSR_<field>, Rm",
6111 "{C|S}PSR_f, #expression". */
6116 if (do_vfp_nsyn_msr () == SUCCESS
)
6119 inst
.instruction
|= inst
.operands
[0].imm
;
6120 if (inst
.operands
[1].isreg
)
6121 inst
.instruction
|= inst
.operands
[1].reg
;
6124 inst
.instruction
|= INST_IMMEDIATE
;
6125 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6126 inst
.reloc
.pc_rel
= 0;
6133 if (!inst
.operands
[2].present
)
6134 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
6135 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6136 inst
.instruction
|= inst
.operands
[1].reg
;
6137 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6139 /* Only restrict on pre-V4 architectures - radar 4474226 */
6140 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6141 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
)
6142 && !force_cpusubtype_ALL
)
6143 as_tsktsk (_("Rd and Rm should be different in mul"));
6146 /* Long Multiply Parser
6147 UMULL RdLo, RdHi, Rm, Rs
6148 SMULL RdLo, RdHi, Rm, Rs
6149 UMLAL RdLo, RdHi, Rm, Rs
6150 SMLAL RdLo, RdHi, Rm, Rs. */
6155 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6156 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6157 inst
.instruction
|= inst
.operands
[2].reg
;
6158 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6160 /* Only restrict rm on pre-V4 architectures - radar 4474226 */
6161 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
)
6162 || force_cpusubtype_ALL
)
6164 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6165 as_tsktsk (_("rdhi and rdlo must be different"));
6169 /* rdhi, rdlo and rm must all be different. */
6170 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6171 || inst
.operands
[0].reg
== inst
.operands
[2].reg
6172 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
6173 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6180 if (inst
.operands
[0].present
)
6182 /* Architectural NOP hints are CPSR sets with no bits selected. */
6183 inst
.instruction
&= 0xf0000000;
6184 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
6188 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6189 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6190 Condition defaults to COND_ALWAYS.
6191 Error if Rd, Rn or Rm are R15. */
6196 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6197 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6198 inst
.instruction
|= inst
.operands
[2].reg
;
6199 if (inst
.operands
[3].present
)
6200 encode_arm_shift (3);
6203 /* ARM V6 PKHTB (Argument Parse). */
6208 if (!inst
.operands
[3].present
)
6210 /* If the shift specifier is omitted, turn the instruction
6211 into pkhbt rd, rm, rn. */
6212 inst
.instruction
&= 0xfff00010;
6213 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6214 inst
.instruction
|= inst
.operands
[1].reg
;
6215 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6219 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6220 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6221 inst
.instruction
|= inst
.operands
[2].reg
;
6222 encode_arm_shift (3);
6226 /* ARMv5TE: Preload-Cache
6230 Syntactically, like LDR with B=1, W=0, L=1. */
6235 constraint (!inst
.operands
[0].isreg
,
6236 _("'[' expected after PLD mnemonic"));
6237 constraint (inst
.operands
[0].postind
,
6238 _("post-indexed expression used in preload instruction"));
6239 constraint (inst
.operands
[0].writeback
,
6240 _("writeback used in preload instruction"));
6241 constraint (!inst
.operands
[0].preind
,
6242 _("unindexed addressing used in preload instruction"));
6243 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6246 /* ARMv7: PLI <addr_mode> */
6250 constraint (!inst
.operands
[0].isreg
,
6251 _("'[' expected after PLI mnemonic"));
6252 constraint (inst
.operands
[0].postind
,
6253 _("post-indexed expression used in preload instruction"));
6254 constraint (inst
.operands
[0].writeback
,
6255 _("writeback used in preload instruction"));
6256 constraint (!inst
.operands
[0].preind
,
6257 _("unindexed addressing used in preload instruction"));
6258 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6259 inst
.instruction
&= ~PRE_INDEX
;
6265 inst
.operands
[1] = inst
.operands
[0];
6266 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
6267 inst
.operands
[0].isreg
= 1;
6268 inst
.operands
[0].writeback
= 1;
6269 inst
.operands
[0].reg
= REG_SP
;
6273 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6274 word at the specified address and the following word
6276 Unconditionally executed.
6277 Error if Rn is R15. */
6282 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6283 if (inst
.operands
[0].writeback
)
6284 inst
.instruction
|= WRITE_BACK
;
6287 /* ARM V6 ssat (argument parse). */
6292 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6293 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
6294 inst
.instruction
|= inst
.operands
[2].reg
;
6296 if (inst
.operands
[3].present
)
6297 encode_arm_shift (3);
6300 /* ARM V6 usat (argument parse). */
6305 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6306 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6307 inst
.instruction
|= inst
.operands
[2].reg
;
6309 if (inst
.operands
[3].present
)
6310 encode_arm_shift (3);
6313 /* ARM V6 ssat16 (argument parse). */
6318 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6319 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
6320 inst
.instruction
|= inst
.operands
[2].reg
;
6326 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6327 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6328 inst
.instruction
|= inst
.operands
[2].reg
;
6331 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6332 preserving the other bits.
6334 setend <endian_specifier>, where <endian_specifier> is either
6340 if (inst
.operands
[0].imm
)
6341 inst
.instruction
|= 0x200;
6347 unsigned int Rm
= (inst
.operands
[1].present
6348 ? inst
.operands
[1].reg
6349 : inst
.operands
[0].reg
);
6351 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6352 inst
.instruction
|= Rm
;
6353 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
6355 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6356 inst
.instruction
|= SHIFT_BY_REG
;
6359 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6365 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
6366 inst
.reloc
.pc_rel
= 0;
6372 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
6373 inst
.reloc
.pc_rel
= 0;
6376 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6377 SMLAxy{cond} Rd,Rm,Rs,Rn
6378 SMLAWy{cond} Rd,Rm,Rs,Rn
6379 Error if any register is R15. */
6384 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6385 inst
.instruction
|= inst
.operands
[1].reg
;
6386 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6387 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6390 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6391 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6392 Error if any register is R15.
6393 Warning if Rdlo == Rdhi. */
6398 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6399 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6400 inst
.instruction
|= inst
.operands
[2].reg
;
6401 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6403 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6404 as_tsktsk (_("rdhi and rdlo must be different"));
6407 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6408 SMULxy{cond} Rd,Rm,Rs
6409 Error if any register is R15. */
6414 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6415 inst
.instruction
|= inst
.operands
[1].reg
;
6416 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6419 /* ARM V6 srs (argument parse). The variable fields in the encoding are
6420 the same for both ARM and Thumb-2. */
6427 if (inst
.operands
[0].present
)
6429 reg
= inst
.operands
[0].reg
;
6430 constraint (reg
!= 13, _("SRS base register must be r13"));
6435 inst
.instruction
|= reg
<< 16;
6436 inst
.instruction
|= inst
.operands
[1].imm
;
6437 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
6438 inst
.instruction
|= WRITE_BACK
;
6441 /* ARM V6 strex (argument parse). */
6446 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
6447 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
6448 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
6449 || inst
.operands
[2].negative
6450 /* See comment in do_ldrex(). */
6451 || (inst
.operands
[2].reg
== REG_PC
),
6454 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6455 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
6457 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6458 || inst
.reloc
.exp
.X_add_number
!= 0,
6459 _("offset must be zero in ARM encoding"));
6461 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6462 inst
.instruction
|= inst
.operands
[1].reg
;
6463 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6464 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6470 constraint (inst
.operands
[1].reg
% 2 != 0,
6471 _("even register required"));
6472 constraint (inst
.operands
[2].present
6473 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
6474 _("can only store two consecutive registers"));
6475 /* If op 2 were present and equal to PC, this function wouldn't
6476 have been called in the first place. */
6477 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
6479 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6480 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
6481 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
6484 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6485 inst
.instruction
|= inst
.operands
[1].reg
;
6486 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6489 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6490 extends it to 32-bits, and adds the result to a value in another
6491 register. You can specify a rotation by 0, 8, 16, or 24 bits
6492 before extracting the 16-bit value.
6493 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6494 Condition defaults to COND_ALWAYS.
6495 Error if any register uses R15. */
6500 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6501 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6502 inst
.instruction
|= inst
.operands
[2].reg
;
6503 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
6508 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6509 Condition defaults to COND_ALWAYS.
6510 Error if any register uses R15. */
6515 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6516 inst
.instruction
|= inst
.operands
[1].reg
;
6517 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
6520 /* VFP instructions. In a logical order: SP variant first, monad
6521 before dyad, arithmetic then move then load/store. */
6524 do_vfp_sp_monadic (void)
6526 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6527 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6531 do_vfp_sp_dyadic (void)
6533 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6534 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6535 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6539 do_vfp_sp_compare_z (void)
6541 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6545 do_vfp_dp_sp_cvt (void)
6547 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6548 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6552 do_vfp_sp_dp_cvt (void)
6554 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6555 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6559 do_vfp_sp_hp_cvt (void)
6562 inst
.instruction
|= 0xff000000;
6565 inst
.instruction
|= 0xf3000000;
6566 if(inst
.cond
!= COND_ALWAYS
)
6568 /* delayed diagnostic */
6569 inst
.error
= BAD_COND
;
6570 inst
.cond
= COND_ALWAYS
;
6573 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6574 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6578 do_vfp_hp_sp_cvt (void)
6581 inst
.instruction
|= 0xff000000;
6584 inst
.instruction
|= 0xf3000000;
6585 if(inst
.cond
!= COND_ALWAYS
)
6587 /* delayed diagnostic */
6588 inst
.error
= BAD_COND
;
6589 inst
.cond
= COND_ALWAYS
;
6592 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6593 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6597 do_vfp_t_sp_hp_cvt (void)
6599 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6600 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6604 do_vfp_b_sp_hp_cvt (void)
6606 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6607 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6611 do_vfp_t_hp_sp_cvt (void)
6613 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6614 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6618 do_vfp_b_hp_sp_cvt (void)
6620 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6621 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6625 do_vfp_reg_from_sp (void)
6627 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6628 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6632 do_vfp_reg2_from_sp2 (void)
6634 constraint (inst
.operands
[2].imm
!= 2,
6635 _("only two consecutive VFP SP registers allowed here"));
6636 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6637 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6638 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6642 do_vfp_sp_from_reg (void)
6644 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
6645 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6649 do_vfp_sp2_from_reg2 (void)
6651 constraint (inst
.operands
[0].imm
!= 2,
6652 _("only two consecutive VFP SP registers allowed here"));
6653 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
6654 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6655 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6659 do_vfp_sp_ldst (void)
6661 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6662 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6666 do_vfp_dp_ldst (void)
6668 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6669 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6674 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
6676 if (inst
.operands
[0].writeback
)
6677 inst
.instruction
|= WRITE_BACK
;
6679 constraint (ldstm_type
!= VFP_LDSTMIA
,
6680 _("this addressing mode requires base-register writeback"));
6681 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6682 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
6683 inst
.instruction
|= inst
.operands
[1].imm
;
6687 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
6691 if (inst
.operands
[0].writeback
)
6692 inst
.instruction
|= WRITE_BACK
;
6694 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
6695 _("this addressing mode requires base-register writeback"));
6697 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6698 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6700 count
= inst
.operands
[1].imm
<< 1;
6701 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
6704 inst
.instruction
|= count
;
6708 do_vfp_sp_ldstmia (void)
6710 vfp_sp_ldstm (VFP_LDSTMIA
);
6714 do_vfp_sp_ldstmdb (void)
6716 vfp_sp_ldstm (VFP_LDSTMDB
);
6720 do_vfp_dp_ldstmia (void)
6722 vfp_dp_ldstm (VFP_LDSTMIA
);
6726 do_vfp_dp_ldstmdb (void)
6728 vfp_dp_ldstm (VFP_LDSTMDB
);
6732 do_vfp_xp_ldstmia (void)
6734 vfp_dp_ldstm (VFP_LDSTMIAX
);
6738 do_vfp_xp_ldstmdb (void)
6740 vfp_dp_ldstm (VFP_LDSTMDBX
);
6744 do_vfp_dp_rd_rm (void)
6746 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6747 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6751 do_vfp_dp_rn_rd (void)
6753 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
6754 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6758 do_vfp_dp_rd_rn (void)
6760 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6761 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
6765 do_vfp_dp_rd_rn_rm (void)
6767 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6768 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
6769 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
6775 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6779 do_vfp_dp_rm_rd_rn (void)
6781 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
6782 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6783 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
6786 /* VFPv3 instructions. */
6788 do_vfp_sp_const (void)
6790 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6791 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
6792 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
6796 do_vfp_dp_const (void)
6798 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6799 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
6800 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
6804 vfp_conv (int srcsize
)
6806 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
6807 inst
.instruction
|= (immbits
& 1) << 5;
6808 inst
.instruction
|= (immbits
>> 1);
6812 do_vfp_sp_conv_16 (void)
6814 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6819 do_vfp_dp_conv_16 (void)
6821 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6826 do_vfp_sp_conv_32 (void)
6828 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6833 do_vfp_dp_conv_32 (void)
6835 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6840 /* FPA instructions. Also in a logical order. */
6845 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6846 inst
.instruction
|= inst
.operands
[1].reg
;
6850 do_fpa_ldmstm (void)
6852 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6853 switch (inst
.operands
[1].imm
)
6855 case 1: inst
.instruction
|= CP_T_X
; break;
6856 case 2: inst
.instruction
|= CP_T_Y
; break;
6857 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
6862 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
6864 /* The instruction specified "ea" or "fd", so we can only accept
6865 [Rn]{!}. The instruction does not really support stacking or
6866 unstacking, so we have to emulate these by setting appropriate
6867 bits and offsets. */
6868 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6869 || inst
.reloc
.exp
.X_add_number
!= 0,
6870 _("this instruction does not support indexing"));
6872 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
6873 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
6875 if (!(inst
.instruction
& INDEX_UP
))
6876 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
6878 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
6880 inst
.operands
[2].preind
= 0;
6881 inst
.operands
[2].postind
= 1;
6885 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6889 /* iWMMXt instructions: strictly in alphabetical order. */
6892 do_iwmmxt_tandorc (void)
6894 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
6898 do_iwmmxt_textrc (void)
6900 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6901 inst
.instruction
|= inst
.operands
[1].imm
;
6905 do_iwmmxt_textrm (void)
6907 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6908 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6909 inst
.instruction
|= inst
.operands
[2].imm
;
6913 do_iwmmxt_tinsr (void)
6915 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6916 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6917 inst
.instruction
|= inst
.operands
[2].imm
;
6921 do_iwmmxt_tmia (void)
6923 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
6924 inst
.instruction
|= inst
.operands
[1].reg
;
6925 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6929 do_iwmmxt_waligni (void)
6931 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6932 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6933 inst
.instruction
|= inst
.operands
[2].reg
;
6934 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
6938 do_iwmmxt_wmerge (void)
6940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6942 inst
.instruction
|= inst
.operands
[2].reg
;
6943 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
6947 do_iwmmxt_wmov (void)
6949 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
6950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6951 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6952 inst
.instruction
|= inst
.operands
[1].reg
;
6956 do_iwmmxt_wldstbh (void)
6959 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6961 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
6963 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
6964 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
6968 do_iwmmxt_wldstw (void)
6970 /* RIWR_RIWC clears .isreg for a control register. */
6971 if (!inst
.operands
[0].isreg
)
6973 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6974 inst
.instruction
|= 0xf0000000;
6977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6978 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6982 do_iwmmxt_wldstd (void)
6984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6985 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
6986 && inst
.operands
[1].immisreg
)
6988 inst
.instruction
&= ~0x1a000ff;
6989 inst
.instruction
|= (0xf << 28);
6990 if (inst
.operands
[1].preind
)
6991 inst
.instruction
|= PRE_INDEX
;
6992 if (!inst
.operands
[1].negative
)
6993 inst
.instruction
|= INDEX_UP
;
6994 if (inst
.operands
[1].writeback
)
6995 inst
.instruction
|= WRITE_BACK
;
6996 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6997 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
6998 inst
.instruction
|= inst
.operands
[1].imm
;
7001 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7005 do_iwmmxt_wshufh (void)
7007 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7008 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7009 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7010 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7014 do_iwmmxt_wzero (void)
7016 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7017 inst
.instruction
|= inst
.operands
[0].reg
;
7018 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7019 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7023 do_iwmmxt_wrwrwr_or_imm5 (void)
7025 if (inst
.operands
[2].isreg
)
7028 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
7029 _("immediate operand requires iWMMXt2"));
7031 if (inst
.operands
[2].imm
== 0)
7033 switch ((inst
.instruction
>> 20) & 0xf)
7039 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7040 inst
.operands
[2].imm
= 16;
7041 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
7047 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7048 inst
.operands
[2].imm
= 32;
7049 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
7056 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7058 wrn
= (inst
.instruction
>> 16) & 0xf;
7059 inst
.instruction
&= 0xff0fff0f;
7060 inst
.instruction
|= wrn
;
7061 /* Bail out here; the instruction is now assembled. */
7066 /* Map 32 -> 0, etc. */
7067 inst
.operands
[2].imm
&= 0x1f;
7068 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
7073 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7074 operations first, then control, shift, and load/store. */
7076 /* Insns like "foo X,Y,Z". */
7079 do_mav_triple (void)
7081 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7082 inst
.instruction
|= inst
.operands
[1].reg
;
7083 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7086 /* Insns like "foo W,X,Y,Z".
7087 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7092 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7093 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7094 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7095 inst
.instruction
|= inst
.operands
[3].reg
;
7098 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7102 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7105 /* Maverick shift immediate instructions.
7106 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7107 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7112 int imm
= inst
.operands
[2].imm
;
7114 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7115 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7117 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7118 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7119 Bit 4 should be 0. */
7120 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
7122 inst
.instruction
|= imm
;
7126 /* XScale instructions. Also sorted arithmetic before move. */
7128 /* Xscale multiply-accumulate (argument parse)
7131 MIAxycc acc0,Rm,Rs. */
7136 inst
.instruction
|= inst
.operands
[1].reg
;
7137 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7140 /* Xscale move-accumulator-register (argument parse)
7142 MARcc acc0,RdLo,RdHi. */
7147 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7148 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7151 /* Xscale move-register-accumulator (argument parse)
7153 MRAcc RdLo,RdHi,acc0. */
7158 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
7159 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7160 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7163 /* Encoding functions relevant only to Thumb. */
7165 /* inst.operands[i] is a shifted-register operand; encode
7166 it into inst.instruction in the format used by Thumb32. */
7169 encode_thumb32_shifted_operand (int i
)
7171 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
7172 unsigned int shift
= inst
.operands
[i
].shift_kind
;
7174 constraint (inst
.operands
[i
].immisreg
,
7175 _("shift by register not allowed in thumb mode"));
7176 inst
.instruction
|= inst
.operands
[i
].reg
;
7177 if (shift
== SHIFT_RRX
)
7178 inst
.instruction
|= SHIFT_ROR
<< 4;
7181 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7182 _("expression too complex"));
7184 constraint (value
> 32
7185 || (value
== 32 && (shift
== SHIFT_LSL
7186 || shift
== SHIFT_ROR
)),
7187 _("shift expression is too large"));
7191 else if (value
== 32)
7194 inst
.instruction
|= shift
<< 4;
7195 inst
.instruction
|= (value
& 0x1c) << 10;
7196 inst
.instruction
|= (value
& 0x03) << 6;
7201 /* inst.operands[i] was set up by parse_address. Encode it into a
7202 Thumb32 format load or store instruction. Reject forms that cannot
7203 be used with such instructions. If is_t is true, reject forms that
7204 cannot be used with a T instruction; if is_d is true, reject forms
7205 that cannot be used with a D instruction. */
7208 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
7210 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7212 constraint (!inst
.operands
[i
].isreg
,
7213 _("Instruction does not support =N addresses"));
7215 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7216 if (inst
.operands
[i
].immisreg
)
7218 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
7219 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
7220 constraint (inst
.operands
[i
].negative
,
7221 _("Thumb does not support negative register indexing"));
7222 constraint (inst
.operands
[i
].postind
,
7223 _("Thumb does not support register post-indexing"));
7224 constraint (inst
.operands
[i
].writeback
,
7225 _("Thumb does not support register indexing with writeback"));
7226 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
7227 _("Thumb supports only LSL in shifted register indexing"));
7229 inst
.instruction
|= inst
.operands
[i
].imm
;
7230 if (inst
.operands
[i
].shifted
)
7232 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7233 _("expression too complex"));
7234 constraint (inst
.reloc
.exp
.X_add_number
< 0
7235 || inst
.reloc
.exp
.X_add_number
> 3,
7236 _("shift out of range"));
7237 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7239 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7241 else if (inst
.operands
[i
].preind
)
7243 constraint (is_pc
&& inst
.operands
[i
].writeback
,
7244 _("cannot use writeback with PC-relative addressing"));
7245 constraint (is_t
&& inst
.operands
[i
].writeback
,
7246 _("cannot use writeback with this instruction"));
7250 inst
.instruction
|= 0x01000000;
7251 if (inst
.operands
[i
].writeback
)
7252 inst
.instruction
|= 0x00200000;
7256 inst
.instruction
|= 0x00000c00;
7257 if (inst
.operands
[i
].writeback
)
7258 inst
.instruction
|= 0x00000100;
7260 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7262 else if (inst
.operands
[i
].postind
)
7264 assert (inst
.operands
[i
].writeback
);
7265 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
7266 constraint (is_t
, _("cannot use post-indexing with this instruction"));
7269 inst
.instruction
|= 0x00200000;
7271 inst
.instruction
|= 0x00000900;
7272 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7274 else /* unindexed - only for coprocessor */
7275 inst
.error
= _("instruction does not accept unindexed addressing");
7278 /* Table of Thumb instructions which exist in both 16- and 32-bit
7279 encodings (the latter only in post-V6T2 cores). The index is the
7280 value used in the insns table below. When there is more than one
7281 possible 16-bit encoding for the instruction, this table always
7283 Also contains several pseudo-instructions used during relaxation. */
7284 #define T16_32_TAB \
7285 X(adc, 4140, eb400000), \
7286 X(adcs, 4140, eb500000), \
7287 X(add, 1c00, eb000000), \
7288 X(adds, 1c00, eb100000), \
7289 X(addi, 0000, f1000000), \
7290 X(addis, 0000, f1100000), \
7291 X(add_pc,000f, f20f0000), \
7292 X(add_sp,000d, f10d0000), \
7293 X(adr, 000f, f20f0000), \
7294 X(and, 4000, ea000000), \
7295 X(ands, 4000, ea100000), \
7296 X(asr, 1000, fa40f000), \
7297 X(asrs, 1000, fa50f000), \
7298 X(b, e000, f000b000), \
7299 X(bcond, d000, f0008000), \
7300 X(bic, 4380, ea200000), \
7301 X(bics, 4380, ea300000), \
7302 X(cmn, 42c0, eb100f00), \
7303 X(cmp, 2800, ebb00f00), \
7304 X(cpsie, b660, f3af8400), \
7305 X(cpsid, b670, f3af8600), \
7306 X(cpy, 4600, ea4f0000), \
7307 X(dec_sp,80dd, f1ad0d00), \
7308 X(eor, 4040, ea800000), \
7309 X(eors, 4040, ea900000), \
7310 X(inc_sp,00dd, f10d0d00), \
7311 X(ldmia, c800, e8900000), \
7312 X(ldr, 6800, f8500000), \
7313 X(ldrb, 7800, f8100000), \
7314 X(ldrh, 8800, f8300000), \
7315 X(ldrsb, 5600, f9100000), \
7316 X(ldrsh, 5e00, f9300000), \
7317 X(ldr_pc,4800, f85f0000), \
7318 X(ldr_pc2,4800, f85f0000), \
7319 X(ldr_sp,9800, f85d0000), \
7320 X(lsl, 0000, fa00f000), \
7321 X(lsls, 0000, fa10f000), \
7322 X(lsr, 0800, fa20f000), \
7323 X(lsrs, 0800, fa30f000), \
7324 X(mov, 2000, ea4f0000), \
7325 X(movs, 2000, ea5f0000), \
7326 X(mul, 4340, fb00f000), \
7327 X(muls, 4340, ffffffff), /* no 32b muls */ \
7328 X(mvn, 43c0, ea6f0000), \
7329 X(mvns, 43c0, ea7f0000), \
7330 X(neg, 4240, f1c00000), /* rsb #0 */ \
7331 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7332 X(orr, 4300, ea400000), \
7333 X(orrs, 4300, ea500000), \
7334 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7335 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7336 X(rev, ba00, fa90f080), \
7337 X(rev16, ba40, fa90f090), \
7338 X(revsh, bac0, fa90f0b0), \
7339 X(ror, 41c0, fa60f000), \
7340 X(rors, 41c0, fa70f000), \
7341 X(sbc, 4180, eb600000), \
7342 X(sbcs, 4180, eb700000), \
7343 X(stmia, c000, e8800000), \
7344 X(str, 6000, f8400000), \
7345 X(strb, 7000, f8000000), \
7346 X(strh, 8000, f8200000), \
7347 X(str_sp,9000, f84d0000), \
7348 X(sub, 1e00, eba00000), \
7349 X(subs, 1e00, ebb00000), \
7350 X(subi, 8000, f1a00000), \
7351 X(subis, 8000, f1b00000), \
7352 X(sxtb, b240, fa4ff080), \
7353 X(sxth, b200, fa0ff080), \
7354 X(tst, 4200, ea100f00), \
7355 X(uxtb, b2c0, fa5ff080), \
7356 X(uxth, b280, fa1ff080), \
7357 X(nop, bf00, f3af8000), \
7358 X(yield, bf10, f3af8001), \
7359 X(wfe, bf20, f3af8002), \
7360 X(wfi, bf30, f3af8003), \
7361 X(sev, bf40, f3af9004), /* typo, 8004? */
7363 /* To catch errors in encoding functions, the codes are all offset by
7364 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7365 as 16-bit instructions. */
7366 #define X(a,b,c) T_MNEM_##a
7367 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
7370 #define X(a,b,c) 0x##b
7371 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
7372 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7375 #define X(a,b,c) 0x##c
7376 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
7377 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7378 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7382 /* Thumb instruction encoders, in alphabetical order. */
7386 do_t_add_sub_w (void)
7390 Rd
= inst
.operands
[0].reg
;
7391 Rn
= inst
.operands
[1].reg
;
7393 constraint (Rd
== 15, _("PC not allowed as destination"));
7394 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
7395 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7398 /* Parse an add or subtract instruction. We get here with inst.instruction
7399 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7406 Rd
= inst
.operands
[0].reg
;
7407 Rs
= (inst
.operands
[1].present
7408 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7409 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7417 flags
= (inst
.instruction
== T_MNEM_adds
7418 || inst
.instruction
== T_MNEM_subs
);
7420 narrow
= (current_it_mask
== 0);
7422 narrow
= (current_it_mask
!= 0);
7423 if (!inst
.operands
[2].isreg
)
7427 add
= (inst
.instruction
== T_MNEM_add
7428 || inst
.instruction
== T_MNEM_adds
);
7430 if (inst
.size_req
!= 4)
7432 /* Attempt to use a narrow opcode, with relaxation if
7434 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
7435 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
7436 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
7437 opcode
= T_MNEM_add_sp
;
7438 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
7439 opcode
= T_MNEM_add_pc
;
7440 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
7443 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
7445 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
7449 inst
.instruction
= THUMB_OP16(opcode
);
7450 inst
.instruction
|= (Rd
<< 4) | Rs
;
7451 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7452 if (inst
.size_req
!= 2)
7453 inst
.relax
= opcode
;
7456 constraint (inst
.size_req
== 2, BAD_HIREG
);
7458 if (inst
.size_req
== 4
7459 || (inst
.size_req
!= 2 && !opcode
))
7463 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
7464 _("only SUBS PC, LR, #const allowed"));
7465 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7466 _("expression too complex"));
7467 constraint (inst
.reloc
.exp
.X_add_number
< 0
7468 || inst
.reloc
.exp
.X_add_number
> 0xff,
7469 _("immediate value out of range"));
7470 inst
.instruction
= T2_SUBS_PC_LR
7471 | inst
.reloc
.exp
.X_add_number
;
7472 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7475 else if (Rs
== REG_PC
)
7477 /* Always use addw/subw. */
7478 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
7479 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7483 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7484 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
7487 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7489 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
7491 inst
.instruction
|= Rd
<< 8;
7492 inst
.instruction
|= Rs
<< 16;
7497 Rn
= inst
.operands
[2].reg
;
7498 /* See if we can do this with a 16-bit instruction. */
7499 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
7501 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7506 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
7507 || inst
.instruction
== T_MNEM_add
)
7510 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7514 if (inst
.instruction
== T_MNEM_add
)
7518 inst
.instruction
= T_OPCODE_ADD_HI
;
7519 inst
.instruction
|= (Rd
& 8) << 4;
7520 inst
.instruction
|= (Rd
& 7);
7521 inst
.instruction
|= Rn
<< 3;
7524 /* ... because addition is commutative! */
7527 inst
.instruction
= T_OPCODE_ADD_HI
;
7528 inst
.instruction
|= (Rd
& 8) << 4;
7529 inst
.instruction
|= (Rd
& 7);
7530 inst
.instruction
|= Rs
<< 3;
7535 /* If we get here, it can't be done in 16 bits. */
7536 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
7537 _("shift must be constant"));
7538 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7539 inst
.instruction
|= Rd
<< 8;
7540 inst
.instruction
|= Rs
<< 16;
7541 encode_thumb32_shifted_operand (2);
7546 constraint (inst
.instruction
== T_MNEM_adds
7547 || inst
.instruction
== T_MNEM_subs
,
7550 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
7552 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
7553 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
7556 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7558 inst
.instruction
|= (Rd
<< 4) | Rs
;
7559 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7563 Rn
= inst
.operands
[2].reg
;
7564 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
7566 /* We now have Rd, Rs, and Rn set to registers. */
7567 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7569 /* Can't do this for SUB. */
7570 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
7571 inst
.instruction
= T_OPCODE_ADD_HI
;
7572 inst
.instruction
|= (Rd
& 8) << 4;
7573 inst
.instruction
|= (Rd
& 7);
7575 inst
.instruction
|= Rn
<< 3;
7577 inst
.instruction
|= Rs
<< 3;
7579 constraint (1, _("dest must overlap one source register"));
7583 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7584 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
7585 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7593 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
7595 /* Defer to section relaxation. */
7596 inst
.relax
= inst
.instruction
;
7597 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7598 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7600 else if (unified_syntax
&& inst
.size_req
!= 2)
7602 /* Generate a 32-bit opcode. */
7603 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7604 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7605 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
7606 inst
.reloc
.pc_rel
= 1;
7610 /* Generate a 16-bit opcode. */
7611 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7612 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7613 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
7614 inst
.reloc
.pc_rel
= 1;
7616 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7620 /* Arithmetic instructions for which there is just one 16-bit
7621 instruction encoding, and it allows only two low registers.
7622 For maximal compatibility with ARM syntax, we allow three register
7623 operands even when Thumb-32 instructions are not available, as long
7624 as the first two are identical. For instance, both "sbc r0,r1" and
7625 "sbc r0,r0,r1" are allowed. */
7631 Rd
= inst
.operands
[0].reg
;
7632 Rs
= (inst
.operands
[1].present
7633 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7634 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7635 Rn
= inst
.operands
[2].reg
;
7639 if (!inst
.operands
[2].isreg
)
7641 /* For an immediate, we always generate a 32-bit opcode;
7642 section relaxation will shrink it later if possible. */
7643 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7644 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7645 inst
.instruction
|= Rd
<< 8;
7646 inst
.instruction
|= Rs
<< 16;
7647 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7653 /* See if we can do this with a 16-bit instruction. */
7654 if (THUMB_SETS_FLAGS (inst
.instruction
))
7655 narrow
= current_it_mask
== 0;
7657 narrow
= current_it_mask
!= 0;
7659 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7661 if (inst
.operands
[2].shifted
)
7663 if (inst
.size_req
== 4)
7669 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7670 inst
.instruction
|= Rd
;
7671 inst
.instruction
|= Rn
<< 3;
7675 /* If we get here, it can't be done in 16 bits. */
7676 constraint (inst
.operands
[2].shifted
7677 && inst
.operands
[2].immisreg
,
7678 _("shift must be constant"));
7679 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7680 inst
.instruction
|= Rd
<< 8;
7681 inst
.instruction
|= Rs
<< 16;
7682 encode_thumb32_shifted_operand (2);
7687 /* On its face this is a lie - the instruction does set the
7688 flags. However, the only supported mnemonic in this mode
7690 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7692 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7693 _("unshifted register required"));
7694 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7695 constraint (Rd
!= Rs
,
7696 _("dest and source1 must be the same register"));
7698 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7699 inst
.instruction
|= Rd
;
7700 inst
.instruction
|= Rn
<< 3;
7704 /* Similarly, but for instructions where the arithmetic operation is
7705 commutative, so we can allow either of them to be different from
7706 the destination operand in a 16-bit instruction. For instance, all
7707 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7714 Rd
= inst
.operands
[0].reg
;
7715 Rs
= (inst
.operands
[1].present
7716 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7717 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7718 Rn
= inst
.operands
[2].reg
;
7722 if (!inst
.operands
[2].isreg
)
7724 /* For an immediate, we always generate a 32-bit opcode;
7725 section relaxation will shrink it later if possible. */
7726 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7727 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7728 inst
.instruction
|= Rd
<< 8;
7729 inst
.instruction
|= Rs
<< 16;
7730 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7736 /* See if we can do this with a 16-bit instruction. */
7737 if (THUMB_SETS_FLAGS (inst
.instruction
))
7738 narrow
= current_it_mask
== 0;
7740 narrow
= current_it_mask
!= 0;
7742 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7744 if (inst
.operands
[2].shifted
)
7746 if (inst
.size_req
== 4)
7753 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7754 inst
.instruction
|= Rd
;
7755 inst
.instruction
|= Rn
<< 3;
7760 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7761 inst
.instruction
|= Rd
;
7762 inst
.instruction
|= Rs
<< 3;
7767 /* If we get here, it can't be done in 16 bits. */
7768 constraint (inst
.operands
[2].shifted
7769 && inst
.operands
[2].immisreg
,
7770 _("shift must be constant"));
7771 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7772 inst
.instruction
|= Rd
<< 8;
7773 inst
.instruction
|= Rs
<< 16;
7774 encode_thumb32_shifted_operand (2);
7779 /* On its face this is a lie - the instruction does set the
7780 flags. However, the only supported mnemonic in this mode
7782 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7784 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7785 _("unshifted register required"));
7786 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7788 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7789 inst
.instruction
|= Rd
;
7792 inst
.instruction
|= Rn
<< 3;
7794 inst
.instruction
|= Rs
<< 3;
7796 constraint (1, _("dest must overlap one source register"));
7803 if (inst
.operands
[0].present
)
7805 constraint ((inst
.instruction
& 0xf0) == 0x60
7806 && inst
.operands
[0].imm
!= 0xf,
7807 "bad barrier type");
7808 inst
.instruction
|= inst
.operands
[0].imm
;
7811 inst
.instruction
|= 0xf;
7817 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7818 constraint (msb
> 32, _("bit-field extends past end of register"));
7819 /* The instruction encoding stores the LSB and MSB,
7820 not the LSB and width. */
7821 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7822 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
7823 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
7824 inst
.instruction
|= msb
- 1;
7832 /* #0 in second position is alternative syntax for bfc, which is
7833 the same instruction but with REG_PC in the Rm field. */
7834 if (!inst
.operands
[1].isreg
)
7835 inst
.operands
[1].reg
= REG_PC
;
7837 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7838 constraint (msb
> 32, _("bit-field extends past end of register"));
7839 /* The instruction encoding stores the LSB and MSB,
7840 not the LSB and width. */
7841 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7842 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7843 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7844 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7845 inst
.instruction
|= msb
- 1;
7851 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7852 _("bit-field extends past end of register"));
7853 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7854 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7855 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7856 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7857 inst
.instruction
|= inst
.operands
[3].imm
- 1;
7860 /* ARM V5 Thumb BLX (argument parse)
7861 BLX <target_addr> which is BLX(1)
7862 BLX <Rm> which is BLX(2)
7863 Unfortunately, there are two different opcodes for this mnemonic.
7864 So, the insns[].value is not used, and the code here zaps values
7865 into inst.instruction.
7867 ??? How to take advantage of the additional two bits of displacement
7868 available in Thumb32 mode? Need new relocation? */
7873 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
7874 if (inst
.operands
[0].isreg
)
7875 /* We have a register, so this is BLX(2). */
7876 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
7879 /* No register. This must be BLX(1). */
7880 inst
.instruction
= 0xf000e800;
7882 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7883 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
7886 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
7887 inst
.reloc
.pc_rel
= 1;
7897 if (current_it_mask
)
7899 /* Conditional branches inside IT blocks are encoded as unconditional
7902 /* A branch must be the last instruction in an IT block. */
7903 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
7908 if (cond
!= COND_ALWAYS
)
7909 opcode
= T_MNEM_bcond
;
7911 opcode
= inst
.instruction
;
7913 if (unified_syntax
&& inst
.size_req
== 4)
7915 inst
.instruction
= THUMB_OP32(opcode
);
7916 if (cond
== COND_ALWAYS
)
7917 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
7920 assert (cond
!= 0xF);
7921 inst
.instruction
|= cond
<< 22;
7922 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
7927 inst
.instruction
= THUMB_OP16(opcode
);
7928 if (cond
== COND_ALWAYS
)
7929 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
7932 inst
.instruction
|= cond
<< 8;
7933 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
7935 /* Allow section relaxation. */
7936 if (unified_syntax
&& inst
.size_req
!= 2)
7937 inst
.relax
= opcode
;
7940 inst
.reloc
.pc_rel
= 1;
7946 constraint (inst
.cond
!= COND_ALWAYS
,
7947 _("instruction is always unconditional"));
7948 if (inst
.operands
[0].present
)
7950 constraint (inst
.operands
[0].imm
> 255,
7951 _("immediate value out of range"));
7952 inst
.instruction
|= inst
.operands
[0].imm
;
7957 do_t_branch23 (void)
7959 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
7960 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
7961 inst
.reloc
.pc_rel
= 1;
7963 /* If the destination of the branch is a defined symbol which does not have
7964 the THUMB_FUNC attribute, then we must be calling a function which has
7965 the (interfacearm) attribute. We look for the Thumb entry point to that
7966 function and change the branch to refer to that function instead. */
7967 if ( inst
.reloc
.exp
.X_op
== O_symbol
7968 && inst
.reloc
.exp
.X_add_symbol
!= NULL
7969 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
7970 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
7971 inst
.reloc
.exp
.X_add_symbol
=
7972 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
7978 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
7979 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
7980 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
7981 should cause the alignment to be checked once it is known. This is
7982 because BX PC only works if the instruction is word aligned. */
7988 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
7989 if (inst
.operands
[0].reg
== REG_PC
)
7990 as_tsktsk (_("use of r15 in bxj is not really useful"));
7992 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7998 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7999 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8000 inst
.instruction
|= inst
.operands
[1].reg
;
8006 constraint (current_it_mask
, BAD_NOT_IT
);
8007 inst
.instruction
|= inst
.operands
[0].imm
;
8013 constraint (current_it_mask
, BAD_NOT_IT
);
8015 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8016 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8018 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8019 inst
.instruction
= 0xf3af8000;
8020 inst
.instruction
|= imod
<< 9;
8021 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8022 if (inst
.operands
[1].present
)
8023 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8027 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8028 && (inst
.operands
[0].imm
& 4),
8029 _("selected processor does not support 'A' form "
8030 "of this instruction"));
8031 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8032 _("Thumb does not support the 2-argument "
8033 "form of this instruction"));
8034 inst
.instruction
|= inst
.operands
[0].imm
;
8038 /* THUMB CPY instruction (argument parse). */
8043 if (inst
.size_req
== 4)
8045 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8046 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8047 inst
.instruction
|= inst
.operands
[1].reg
;
8051 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8052 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8053 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8060 constraint (current_it_mask
, BAD_NOT_IT
);
8061 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8062 inst
.instruction
|= inst
.operands
[0].reg
;
8063 inst
.reloc
.pc_rel
= 1;
8064 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8070 inst
.instruction
|= inst
.operands
[0].imm
;
8076 if (!inst
.operands
[1].present
)
8077 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8078 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8079 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8080 inst
.instruction
|= inst
.operands
[2].reg
;
8086 if (unified_syntax
&& inst
.size_req
== 4)
8087 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8089 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8095 unsigned int cond
= inst
.operands
[0].imm
;
8097 constraint (current_it_mask
, BAD_NOT_IT
);
8098 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
8101 /* If the condition is a negative condition, invert the mask. */
8102 if ((cond
& 0x1) == 0x0)
8104 unsigned int mask
= inst
.instruction
& 0x000f;
8106 if ((mask
& 0x7) == 0)
8107 /* no conversion needed */;
8108 else if ((mask
& 0x3) == 0)
8110 else if ((mask
& 0x1) == 0)
8115 inst
.instruction
&= 0xfff0;
8116 inst
.instruction
|= mask
;
8119 inst
.instruction
|= cond
<< 4;
8122 /* Helper function used for both push/pop and ldm/stm. */
8124 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
8128 load
= (inst
.instruction
& (1 << 20)) != 0;
8130 if (mask
& (1 << 13))
8131 inst
.error
= _("SP not allowed in register list");
8134 if (mask
& (1 << 14)
8135 && mask
& (1 << 15))
8136 inst
.error
= _("LR and PC should not both be in register list");
8138 if ((mask
& (1 << base
)) != 0
8140 as_warn (_("base register should not be in register list "
8141 "when written back"));
8145 if (mask
& (1 << 15))
8146 inst
.error
= _("PC not allowed in register list");
8148 if ((mask
& (1 << base
)) &&
8149 (mask
& (0xffffffff >> (32 - (base
-1)))) != 0)
8150 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
8153 if ((mask
& (mask
- 1)) == 0)
8155 /* Single register transfers implemented as str/ldr. */
8158 if (inst
.instruction
& (1 << 23))
8159 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
8161 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
8165 if (inst
.instruction
& (1 << 23))
8166 inst
.instruction
= 0x00800000; /* ia -> [base] */
8168 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
8171 inst
.instruction
|= 0xf8400000;
8173 inst
.instruction
|= 0x00100000;
8175 mask
= ffs(mask
) - 1;
8179 inst
.instruction
|= WRITE_BACK
;
8181 inst
.instruction
|= mask
;
8182 inst
.instruction
|= base
<< 16;
8188 /* This really doesn't seem worth it. */
8189 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8190 _("expression too complex"));
8191 constraint (inst
.operands
[1].writeback
,
8192 _("Thumb load/store multiple does not support {reglist}^"));
8200 /* See if we can use a 16-bit instruction. */
8201 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
8202 && inst
.size_req
!= 4
8203 && !(inst
.operands
[1].imm
& ~0xff))
8205 mask
= 1 << inst
.operands
[0].reg
;
8207 if (inst
.operands
[0].reg
<= 7
8208 && (inst
.instruction
== T_MNEM_stmia
8209 ? inst
.operands
[0].writeback
8210 : (inst
.operands
[0].writeback
8211 == !(inst
.operands
[1].imm
& mask
))))
8213 if (inst
.instruction
== T_MNEM_stmia
8214 && (inst
.operands
[1].imm
& mask
)
8215 && (inst
.operands
[1].imm
& (mask
- 1)))
8216 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8217 inst
.operands
[0].reg
);
8219 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8220 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8221 inst
.instruction
|= inst
.operands
[1].imm
;
8224 else if (inst
.operands
[0] .reg
== REG_SP
8225 && inst
.operands
[0].writeback
)
8227 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
8228 ? T_MNEM_push
: T_MNEM_pop
);
8229 inst
.instruction
|= inst
.operands
[1].imm
;
8236 if (inst
.instruction
< 0xffff)
8237 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8239 encode_thumb2_ldmstm(inst
.operands
[0].reg
, inst
.operands
[1].imm
,
8240 inst
.operands
[0].writeback
);
8245 constraint (inst
.operands
[0].reg
> 7
8246 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
8247 constraint (inst
.instruction
!= T_MNEM_ldmia
8248 && inst
.instruction
!= T_MNEM_stmia
,
8249 _("Thumb-2 instruction only valid in unified syntax"));
8250 if (inst
.instruction
== T_MNEM_stmia
)
8252 if (!inst
.operands
[0].writeback
)
8253 as_warn (_("this instruction will write back the base register"));
8254 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8255 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8256 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8257 inst
.operands
[0].reg
);
8261 if (!inst
.operands
[0].writeback
8262 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8263 as_warn (_("this instruction will write back the base register"));
8264 else if (inst
.operands
[0].writeback
8265 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8266 as_warn (_("this instruction will not write back the base register"));
8269 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8270 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8271 inst
.instruction
|= inst
.operands
[1].imm
;
8278 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8279 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8280 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8281 || inst
.operands
[1].negative
,
8284 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8285 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8286 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
8292 if (!inst
.operands
[1].present
)
8294 constraint (inst
.operands
[0].reg
== REG_LR
,
8295 _("r14 not allowed as first register "
8296 "when second register is omitted"));
8297 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8299 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
8302 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8303 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8304 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8313 opcode
= inst
.instruction
;
8316 if (!inst
.operands
[1].isreg
)
8318 if (opcode
<= 0xffff)
8319 inst
.instruction
= THUMB_OP32 (opcode
);
8320 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8323 if (inst
.operands
[1].isreg
8324 && !inst
.operands
[1].writeback
8325 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
8326 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
8328 && inst
.size_req
!= 4)
8330 /* Insn may have a 16-bit form. */
8331 Rn
= inst
.operands
[1].reg
;
8332 if (inst
.operands
[1].immisreg
)
8334 inst
.instruction
= THUMB_OP16 (opcode
);
8336 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
8339 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
8340 && opcode
!= T_MNEM_ldrsb
)
8341 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
8342 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
8349 if (inst
.reloc
.pc_rel
)
8350 opcode
= T_MNEM_ldr_pc2
;
8352 opcode
= T_MNEM_ldr_pc
;
8356 if (opcode
== T_MNEM_ldr
)
8357 opcode
= T_MNEM_ldr_sp
;
8359 opcode
= T_MNEM_str_sp
;
8361 inst
.instruction
= inst
.operands
[0].reg
<< 8;
8365 inst
.instruction
= inst
.operands
[0].reg
;
8366 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8368 inst
.instruction
|= THUMB_OP16 (opcode
);
8369 if (inst
.size_req
== 2)
8370 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8372 inst
.relax
= opcode
;
8376 /* Definitely a 32-bit variant. */
8377 inst
.instruction
= THUMB_OP32 (opcode
);
8378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8379 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8383 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8385 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
8387 /* Only [Rn,Rm] is acceptable. */
8388 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
8389 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
8390 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
8391 || inst
.operands
[1].negative
,
8392 _("Thumb does not support this addressing mode"));
8393 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8397 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8398 if (!inst
.operands
[1].isreg
)
8399 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8402 constraint (!inst
.operands
[1].preind
8403 || inst
.operands
[1].shifted
8404 || inst
.operands
[1].writeback
,
8405 _("Thumb does not support this addressing mode"));
8406 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
8408 constraint (inst
.instruction
& 0x0600,
8409 _("byte or halfword not valid for base register"));
8410 constraint (inst
.operands
[1].reg
== REG_PC
8411 && !(inst
.instruction
& THUMB_LOAD_BIT
),
8412 _("r15 based store not allowed"));
8413 constraint (inst
.operands
[1].immisreg
,
8414 _("invalid base register for register offset"));
8416 if (inst
.operands
[1].reg
== REG_PC
)
8417 inst
.instruction
= T_OPCODE_LDR_PC
;
8418 else if (inst
.instruction
& THUMB_LOAD_BIT
)
8419 inst
.instruction
= T_OPCODE_LDR_SP
;
8421 inst
.instruction
= T_OPCODE_STR_SP
;
8423 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8424 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8428 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
8429 if (!inst
.operands
[1].immisreg
)
8431 /* Immediate offset. */
8432 inst
.instruction
|= inst
.operands
[0].reg
;
8433 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8434 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8438 /* Register offset. */
8439 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
8440 constraint (inst
.operands
[1].negative
,
8441 _("Thumb does not support this addressing mode"));
8444 switch (inst
.instruction
)
8446 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
8447 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
8448 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
8449 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
8450 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
8451 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
8452 case 0x5600 /* ldrsb */:
8453 case 0x5e00 /* ldrsh */: break;
8457 inst
.instruction
|= inst
.operands
[0].reg
;
8458 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8459 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
8465 if (!inst
.operands
[1].present
)
8467 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8468 constraint (inst
.operands
[0].reg
== REG_LR
,
8469 _("r14 not allowed here"));
8471 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8472 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8473 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
8480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8481 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
8487 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8488 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8489 inst
.instruction
|= inst
.operands
[2].reg
;
8490 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8496 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8497 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8498 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8499 inst
.instruction
|= inst
.operands
[3].reg
;
8507 int r0off
= (inst
.instruction
== T_MNEM_mov
8508 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
8511 bfd_boolean low_regs
;
8513 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
8514 opcode
= inst
.instruction
;
8515 if (current_it_mask
)
8516 narrow
= opcode
!= T_MNEM_movs
;
8518 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
8519 if (inst
.size_req
== 4
8520 || inst
.operands
[1].shifted
)
8523 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
8524 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
8525 && !inst
.operands
[1].shifted
8526 && inst
.operands
[0].reg
== REG_PC
8527 && inst
.operands
[1].reg
== REG_LR
)
8529 inst
.instruction
= T2_SUBS_PC_LR
;
8533 if (!inst
.operands
[1].isreg
)
8535 /* Immediate operand. */
8536 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
8538 if (low_regs
&& narrow
)
8540 inst
.instruction
= THUMB_OP16 (opcode
);
8541 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8542 if (inst
.size_req
== 2)
8543 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8545 inst
.relax
= opcode
;
8549 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8550 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8551 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8552 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8555 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
8556 && (inst
.instruction
== T_MNEM_mov
8557 || inst
.instruction
== T_MNEM_movs
))
8559 /* Register shifts are encoded as separate shift instructions. */
8560 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
8562 if (current_it_mask
)
8567 if (inst
.size_req
== 4)
8570 if (!low_regs
|| inst
.operands
[1].imm
> 7)
8573 if (inst
.operands
[0].reg
!= inst
.operands
[1].reg
)
8576 switch (inst
.operands
[1].shift_kind
)
8579 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
8582 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
8585 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
8588 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
8594 inst
.instruction
= opcode
;
8597 inst
.instruction
|= inst
.operands
[0].reg
;
8598 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
8603 inst
.instruction
|= CONDS_BIT
;
8605 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8606 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8607 inst
.instruction
|= inst
.operands
[1].imm
;
8612 /* Some mov with immediate shift have narrow variants.
8613 Register shifts are handled above. */
8614 if (low_regs
&& inst
.operands
[1].shifted
8615 && (inst
.instruction
== T_MNEM_mov
8616 || inst
.instruction
== T_MNEM_movs
))
8618 if (current_it_mask
)
8619 narrow
= (inst
.instruction
== T_MNEM_mov
);
8621 narrow
= (inst
.instruction
== T_MNEM_movs
);
8626 switch (inst
.operands
[1].shift_kind
)
8628 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
8629 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
8630 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
8631 default: narrow
= FALSE
; break;
8637 inst
.instruction
|= inst
.operands
[0].reg
;
8638 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8639 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
8643 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8644 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8645 encode_thumb32_shifted_operand (1);
8649 switch (inst
.instruction
)
8652 inst
.instruction
= T_OPCODE_MOV_HR
;
8653 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8654 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8655 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8659 /* We know we have low registers at this point.
8660 Generate ADD Rd, Rs, #0. */
8661 inst
.instruction
= T_OPCODE_ADD_I3
;
8662 inst
.instruction
|= inst
.operands
[0].reg
;
8663 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8669 inst
.instruction
= T_OPCODE_CMP_LR
;
8670 inst
.instruction
|= inst
.operands
[0].reg
;
8671 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8675 inst
.instruction
= T_OPCODE_CMP_HR
;
8676 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8677 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8678 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8685 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8686 if (inst
.operands
[1].isreg
)
8688 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
8690 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8691 since a MOV instruction produces unpredictable results. */
8692 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8693 inst
.instruction
= T_OPCODE_ADD_I3
;
8695 inst
.instruction
= T_OPCODE_CMP_LR
;
8697 inst
.instruction
|= inst
.operands
[0].reg
;
8698 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8702 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8703 inst
.instruction
= T_OPCODE_MOV_HR
;
8705 inst
.instruction
= T_OPCODE_CMP_HR
;
8711 constraint (inst
.operands
[0].reg
> 7,
8712 _("only lo regs allowed with immediate"));
8713 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8714 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8724 top
= (inst
.instruction
& 0x00800000) != 0;
8725 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
8727 constraint (top
, _(":lower16: not allowed this instruction"));
8728 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
8730 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
8732 constraint (!top
, _(":upper16: not allowed this instruction"));
8733 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
8736 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8737 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8739 imm
= inst
.reloc
.exp
.X_add_number
;
8740 inst
.instruction
|= (imm
& 0xf000) << 4;
8741 inst
.instruction
|= (imm
& 0x0800) << 15;
8742 inst
.instruction
|= (imm
& 0x0700) << 4;
8743 inst
.instruction
|= (imm
& 0x00ff);
8752 int r0off
= (inst
.instruction
== T_MNEM_mvn
8753 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
8756 if (inst
.size_req
== 4
8757 || inst
.instruction
> 0xffff
8758 || inst
.operands
[1].shifted
8759 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8761 else if (inst
.instruction
== T_MNEM_cmn
)
8763 else if (THUMB_SETS_FLAGS (inst
.instruction
))
8764 narrow
= (current_it_mask
== 0);
8766 narrow
= (current_it_mask
!= 0);
8768 if (!inst
.operands
[1].isreg
)
8770 /* For an immediate, we always generate a 32-bit opcode;
8771 section relaxation will shrink it later if possible. */
8772 if (inst
.instruction
< 0xffff)
8773 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8774 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8775 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8776 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8780 /* See if we can do this with a 16-bit instruction. */
8783 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8784 inst
.instruction
|= inst
.operands
[0].reg
;
8785 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8789 constraint (inst
.operands
[1].shifted
8790 && inst
.operands
[1].immisreg
,
8791 _("shift must be constant"));
8792 if (inst
.instruction
< 0xffff)
8793 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8794 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8795 encode_thumb32_shifted_operand (1);
8801 constraint (inst
.instruction
> 0xffff
8802 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
8803 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
8804 _("unshifted register required"));
8805 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8808 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8809 inst
.instruction
|= inst
.operands
[0].reg
;
8810 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8819 if (do_vfp_nsyn_mrs () == SUCCESS
)
8822 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
8825 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8826 _("selected processor does not support "
8827 "requested special purpose register"));
8831 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8832 _("selected processor does not support "
8833 "requested special purpose register %x"));
8834 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8835 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
8836 _("'CPSR' or 'SPSR' expected"));
8839 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8840 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8841 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
8849 if (do_vfp_nsyn_msr () == SUCCESS
)
8852 constraint (!inst
.operands
[1].isreg
,
8853 _("Thumb encoding does not support an immediate here"));
8854 flags
= inst
.operands
[0].imm
;
8857 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8858 _("selected processor does not support "
8859 "requested special purpose register"));
8863 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8864 _("selected processor does not support "
8865 "requested special purpose register"));
8868 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8869 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
8870 inst
.instruction
|= (flags
& 0xff);
8871 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8877 if (!inst
.operands
[2].present
)
8878 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
8880 /* There is no 32-bit MULS and no 16-bit MUL. */
8881 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
8883 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8884 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8885 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8886 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
8890 constraint (!unified_syntax
8891 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
8892 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8895 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8896 inst
.instruction
|= inst
.operands
[0].reg
;
8898 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8899 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
8900 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
8901 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8903 constraint (1, _("dest must overlap one source register"));
8910 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8911 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8912 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8913 inst
.instruction
|= inst
.operands
[3].reg
;
8915 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8916 as_tsktsk (_("rdhi and rdlo must be different"));
8924 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
8926 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8927 inst
.instruction
|= inst
.operands
[0].imm
;
8931 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8932 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
8937 constraint (inst
.operands
[0].present
,
8938 _("Thumb does not support NOP with hints"));
8939 inst
.instruction
= 0x46c0;
8950 if (THUMB_SETS_FLAGS (inst
.instruction
))
8951 narrow
= (current_it_mask
== 0);
8953 narrow
= (current_it_mask
!= 0);
8954 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8956 if (inst
.size_req
== 4)
8961 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8962 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8963 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8967 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8968 inst
.instruction
|= inst
.operands
[0].reg
;
8969 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8974 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8976 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8978 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8979 inst
.instruction
|= inst
.operands
[0].reg
;
8980 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8989 Rs
= (inst
.operands
[1].present
8990 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8991 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8993 if (!inst
.operands
[2].isreg
)
8995 inst
.instruction
= 0xf0600000 | (0x00100000 & inst
.instruction
);
8996 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8997 inst
.instruction
|= Rs
<< 16;
8998 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9002 constraint (inst
.operands
[2].shifted
9003 && inst
.operands
[2].immisreg
,
9004 _("shift must be constant"));
9005 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9006 inst
.instruction
|= Rs
<< 16;
9007 encode_thumb32_shifted_operand (2);
9014 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9015 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9016 inst
.instruction
|= inst
.operands
[2].reg
;
9017 if (inst
.operands
[3].present
)
9019 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9020 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9021 _("expression too complex"));
9022 inst
.instruction
|= (val
& 0x1c) << 10;
9023 inst
.instruction
|= (val
& 0x03) << 6;
9030 if (!inst
.operands
[3].present
)
9031 inst
.instruction
&= ~0x00000020;
9038 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9042 do_t_push_pop (void)
9046 constraint (inst
.operands
[0].writeback
,
9047 _("push/pop do not support {reglist}^"));
9048 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9049 _("expression too complex"));
9051 mask
= inst
.operands
[0].imm
;
9052 if ((mask
& ~0xff) == 0)
9053 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
9054 else if ((inst
.instruction
== T_MNEM_push
9055 && (mask
& ~0xff) == 1 << REG_LR
)
9056 || (inst
.instruction
== T_MNEM_pop
9057 && (mask
& ~0xff) == 1 << REG_PC
))
9059 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9060 inst
.instruction
|= THUMB_PP_PC_LR
;
9061 inst
.instruction
|= mask
& 0xff;
9063 else if (unified_syntax
)
9065 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9066 encode_thumb2_ldmstm(13, mask
, TRUE
);
9070 inst
.error
= _("invalid register list to push/pop instruction");
9078 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9079 /* Rm is bits 3-0 is in of *both* 16-bit halves of the opcode */
9080 inst
.instruction
|= inst
.operands
[1].reg
;
9081 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9087 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9088 inst
.instruction
|= inst
.operands
[1].reg
;
9092 do_t_rd_rm_rn (void)
9094 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9095 inst
.instruction
|= inst
.operands
[1].reg
;
9096 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9102 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9103 && inst
.size_req
!= 4)
9105 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9106 inst
.instruction
|= inst
.operands
[0].reg
;
9107 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9109 else if (unified_syntax
)
9111 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9112 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9113 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9114 inst
.instruction
|= inst
.operands
[1].reg
;
9117 inst
.error
= BAD_HIREG
;
9125 Rd
= inst
.operands
[0].reg
;
9126 Rs
= (inst
.operands
[1].present
9127 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9128 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9130 inst
.instruction
|= Rd
<< 8;
9131 inst
.instruction
|= Rs
<< 16;
9132 if (!inst
.operands
[2].isreg
)
9136 if ((inst
.instruction
& 0x00100000) != 0)
9137 narrow
= (current_it_mask
== 0);
9139 narrow
= (current_it_mask
!= 0);
9141 if (Rd
> 7 || Rs
> 7)
9144 if (inst
.size_req
== 4 || !unified_syntax
)
9147 if (inst
.reloc
.exp
.X_op
!= O_constant
9148 || inst
.reloc
.exp
.X_add_number
!= 0)
9151 /* Turn rsb #0 into 16-bit neg. We should probably do this via
9152 relaxation, but it doesn't seem worth the hassle. */
9155 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9156 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
9157 inst
.instruction
|= Rs
<< 3;
9158 inst
.instruction
|= Rd
;
9162 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9163 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9167 encode_thumb32_shifted_operand (2);
9173 constraint (current_it_mask
, BAD_NOT_IT
);
9174 if (inst
.operands
[0].imm
)
9175 inst
.instruction
|= 0x8;
9181 if (!inst
.operands
[1].present
)
9182 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9189 switch (inst
.instruction
)
9192 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9194 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9196 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9198 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9202 if (THUMB_SETS_FLAGS (inst
.instruction
))
9203 narrow
= (current_it_mask
== 0);
9205 narrow
= (current_it_mask
!= 0);
9206 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9208 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9210 if (inst
.operands
[2].isreg
9211 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9212 || inst
.operands
[2].reg
> 7))
9214 if (inst
.size_req
== 4)
9219 if (inst
.operands
[2].isreg
)
9221 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9222 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9223 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9224 inst
.instruction
|= inst
.operands
[2].reg
;
9228 inst
.operands
[1].shifted
= 1;
9229 inst
.operands
[1].shift_kind
= shift_kind
;
9230 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9231 ? T_MNEM_movs
: T_MNEM_mov
);
9232 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9233 encode_thumb32_shifted_operand (1);
9234 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9235 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9240 if (inst
.operands
[2].isreg
)
9244 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9245 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9246 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9247 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9251 inst
.instruction
|= inst
.operands
[0].reg
;
9252 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9258 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9259 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9260 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9263 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9264 inst
.instruction
|= inst
.operands
[0].reg
;
9265 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9271 constraint (inst
.operands
[0].reg
> 7
9272 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9273 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9275 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9277 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9278 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9279 _("source1 and dest must be same register"));
9281 switch (inst
.instruction
)
9283 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9284 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9285 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9286 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9290 inst
.instruction
|= inst
.operands
[0].reg
;
9291 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9295 switch (inst
.instruction
)
9297 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9298 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9299 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9300 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
9303 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9304 inst
.instruction
|= inst
.operands
[0].reg
;
9305 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9313 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9314 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9315 inst
.instruction
|= inst
.operands
[2].reg
;
9321 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9322 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9323 _("expression too complex"));
9324 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9325 inst
.instruction
|= (value
& 0xf000) >> 12;
9326 inst
.instruction
|= (value
& 0x0ff0);
9327 inst
.instruction
|= (value
& 0x000f) << 16;
9333 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9334 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9335 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9337 if (inst
.operands
[3].present
)
9339 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9340 _("expression too complex"));
9342 if (inst
.reloc
.exp
.X_add_number
!= 0)
9344 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9345 inst
.instruction
|= 0x00200000; /* sh bit */
9346 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9347 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9349 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9356 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9357 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9358 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9364 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9365 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9366 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9367 || inst
.operands
[2].negative
,
9370 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9371 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9372 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9373 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9379 if (!inst
.operands
[2].present
)
9380 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
9382 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9383 || inst
.operands
[0].reg
== inst
.operands
[2].reg
9384 || inst
.operands
[0].reg
== inst
.operands
[3].reg
9385 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
9388 inst
.instruction
|= inst
.operands
[0].reg
;
9389 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9390 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9391 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9397 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9398 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9399 inst
.instruction
|= inst
.operands
[2].reg
;
9400 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
9406 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
9407 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9408 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
9410 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9411 inst
.instruction
|= inst
.operands
[0].reg
;
9412 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9414 else if (unified_syntax
)
9416 if (inst
.instruction
<= 0xffff)
9417 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9418 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9419 inst
.instruction
|= inst
.operands
[1].reg
;
9420 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
9424 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
9425 _("Thumb encoding does not support rotation"));
9426 constraint (1, BAD_HIREG
);
9433 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9441 half
= (inst
.instruction
& 0x10) != 0;
9442 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9443 constraint (inst
.operands
[0].immisreg
,
9444 _("instruction requires register index"));
9445 constraint (inst
.operands
[0].imm
== 15,
9446 _("PC is not a valid index register"));
9447 constraint (!half
&& inst
.operands
[0].shifted
,
9448 _("instruction does not allow shifted index"));
9449 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
9455 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9456 inst
.instruction
|= inst
.operands
[1].imm
;
9457 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9459 if (inst
.operands
[3].present
)
9461 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9462 _("expression too complex"));
9463 if (inst
.reloc
.exp
.X_add_number
!= 0)
9465 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9466 inst
.instruction
|= 0x00200000; /* sh bit */
9468 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9469 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9471 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9478 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9479 inst
.instruction
|= inst
.operands
[1].imm
;
9480 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9483 /* Neon instruction encoder helpers. */
9485 /* Encodings for the different types for various Neon opcodes. */
9487 /* An "invalid" code for the following tables. */
9490 struct neon_tab_entry
9493 unsigned float_or_poly
;
9494 unsigned scalar_or_imm
;
9497 /* Map overloaded Neon opcodes to their respective encodings. */
9498 #define NEON_ENC_TAB \
9499 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9500 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9501 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9502 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9503 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9504 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9505 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9506 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9507 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9508 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9509 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9510 /* Register variants of the following two instructions are encoded as
9511 vcge / vcgt with the operands reversed. */ \
9512 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
9513 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
9514 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9515 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9516 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9517 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9518 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9519 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9520 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9521 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9522 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9523 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9524 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9525 X(vshl, 0x0000400, N_INV, 0x0800510), \
9526 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9527 X(vand, 0x0000110, N_INV, 0x0800030), \
9528 X(vbic, 0x0100110, N_INV, 0x0800030), \
9529 X(veor, 0x1000110, N_INV, N_INV), \
9530 X(vorn, 0x0300110, N_INV, 0x0800010), \
9531 X(vorr, 0x0200110, N_INV, 0x0800010), \
9532 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9533 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9534 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9535 X(vcvtt, 0x1b30600, N_INV, 0x0800e10), /* single, half-precision.*/ \
9536 X(vcvtb, 0x1b30600, N_INV, 0x0800e10), /* single, half-precision.*/ \
9537 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9538 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9539 X(vst1, 0x0000000, 0x0800000, N_INV), \
9540 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9541 X(vst2, 0x0000100, 0x0800100, N_INV), \
9542 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9543 X(vst3, 0x0000200, 0x0800200, N_INV), \
9544 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9545 X(vst4, 0x0000300, 0x0800300, N_INV), \
9546 X(vmovn, 0x1b20200, N_INV, N_INV), \
9547 X(vtrn, 0x1b20080, N_INV, N_INV), \
9548 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9549 X(vqmovun, 0x1b20240, N_INV, N_INV), \
9550 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
9551 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
9552 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
9553 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
9554 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
9555 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
9556 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
9560 #define X(OPC,I,F,S) N_MNEM_##OPC
9565 static const struct neon_tab_entry neon_enc_tab
[] =
9567 #define X(OPC,I,F,S) { (I), (F), (S) }
9572 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9573 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9574 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9575 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9576 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9577 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9578 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9579 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9580 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9581 #define NEON_ENC_SINGLE(X) \
9582 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
9583 #define NEON_ENC_DOUBLE(X) \
9584 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
9586 /* Define shapes for instruction operands. The following mnemonic characters
9587 are used in this table:
9589 F - VFP S<n> register
9590 D - Neon D<n> register
9591 Q - Neon Q<n> register
9595 L - D<n> register list
9597 This table is used to generate various data:
9598 - enumerations of the form NS_DDR to be used as arguments to
9600 - a table classifying shapes into single, double, quad, mixed.
9601 - a table used to drive neon_select_shape.
9604 #define NEON_SHAPE_DEF \
9605 X(3, (D, D, D), DOUBLE), \
9606 X(3, (Q, Q, Q), QUAD), \
9607 X(3, (D, D, I), DOUBLE), \
9608 X(3, (Q, Q, I), QUAD), \
9609 X(3, (D, D, S), DOUBLE), \
9610 X(3, (Q, Q, S), QUAD), \
9611 X(2, (D, D), DOUBLE), \
9612 X(2, (Q, Q), QUAD), \
9613 X(2, (D, S), DOUBLE), \
9614 X(2, (Q, S), QUAD), \
9615 X(2, (D, R), DOUBLE), \
9616 X(2, (Q, R), QUAD), \
9617 X(2, (D, I), DOUBLE), \
9618 X(2, (Q, I), QUAD), \
9619 X(3, (D, L, D), DOUBLE), \
9620 X(2, (D, Q), MIXED), \
9621 X(2, (Q, D), MIXED), \
9622 X(3, (D, Q, I), MIXED), \
9623 X(3, (Q, D, I), MIXED), \
9624 X(3, (Q, D, D), MIXED), \
9625 X(3, (D, Q, Q), MIXED), \
9626 X(3, (Q, Q, D), MIXED), \
9627 X(3, (Q, D, S), MIXED), \
9628 X(3, (D, Q, S), MIXED), \
9629 X(4, (D, D, D, I), DOUBLE), \
9630 X(4, (Q, Q, Q, I), QUAD), \
9631 X(2, (F, F), SINGLE), \
9632 X(3, (F, F, F), SINGLE), \
9633 X(2, (F, I), SINGLE), \
9634 X(2, (F, D), MIXED), \
9635 X(2, (D, F), MIXED), \
9636 X(3, (F, F, I), MIXED), \
9637 X(4, (R, R, F, F), SINGLE), \
9638 X(4, (F, F, R, R), SINGLE), \
9639 X(3, (D, R, R), DOUBLE), \
9640 X(3, (R, R, D), DOUBLE), \
9641 X(2, (S, R), SINGLE), \
9642 X(2, (R, S), SINGLE), \
9643 X(2, (F, R), SINGLE), \
9644 X(2, (R, F), SINGLE)
9646 #define S2(A,B) NS_##A##B
9647 #define S3(A,B,C) NS_##A##B##C
9648 #define S4(A,B,C,D) NS_##A##B##C##D
9650 #define X(N, L, C) S##N L
9663 enum neon_shape_class
9671 #define X(N, L, C) SC_##C
9673 static enum neon_shape_class neon_shape_class
[] =
9691 /* Register widths of above. */
9692 static unsigned neon_shape_el_size
[] =
9703 struct neon_shape_info
9706 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
9709 #define S2(A,B) { SE_##A, SE_##B }
9710 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
9711 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
9713 #define X(N, L, C) { N, S##N L }
9715 static struct neon_shape_info neon_shape_tab
[] =
9725 /* Bit masks used in type checking given instructions.
9726 'N_EQK' means the type must be the same as (or based on in some way) the key
9727 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9728 set, various other bits can be set as well in order to modify the meaning of
9729 the type constraint. */
9754 N_KEY
= 0x200000, /* key element (main type specifier). */
9755 N_EQK
= 0x400000, /* given operand has the same type & size as the key. */
9756 N_VFP
= 0x800000, /* VFP mode: operand size must match register width. */
9757 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
9758 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
9759 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
9760 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9761 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
9762 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
9763 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9765 N_MAX_NONSPECIAL
= N_F64
9768 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9770 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9771 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9772 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9773 #define N_SUF_32 (N_SU_32 | N_F32)
9774 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9775 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9777 /* Pass this as the first type argument to neon_check_type to ignore types
9779 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9781 /* Select a "shape" for the current instruction (describing register types or
9782 sizes) from a list of alternatives. Return NS_NULL if the current instruction
9783 doesn't fit. For non-polymorphic shapes, checking is usually done as a
9784 function of operand parsing, so this function doesn't need to be called.
9785 Shapes should be listed in order of decreasing length. */
9787 static enum neon_shape
9788 neon_select_shape (enum neon_shape shape
, ...)
9791 enum neon_shape first_shape
= shape
;
9793 /* Fix missing optional operands. FIXME: we don't know at this point how
9794 many arguments we should have, so this makes the assumption that we have
9795 > 1. This is true of all current Neon opcodes, I think, but may not be
9796 true in the future. */
9797 if (!inst
.operands
[1].present
)
9798 inst
.operands
[1] = inst
.operands
[0];
9800 va_start (ap
, shape
);
9802 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
9807 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
9809 if (!inst
.operands
[j
].present
)
9815 switch (neon_shape_tab
[shape
].el
[j
])
9818 if (!(inst
.operands
[j
].isreg
9819 && inst
.operands
[j
].isvec
9820 && inst
.operands
[j
].issingle
9821 && !inst
.operands
[j
].isquad
))
9826 if (!(inst
.operands
[j
].isreg
9827 && inst
.operands
[j
].isvec
9828 && !inst
.operands
[j
].isquad
9829 && !inst
.operands
[j
].issingle
))
9834 if (!(inst
.operands
[j
].isreg
9835 && !inst
.operands
[j
].isvec
))
9840 if (!(inst
.operands
[j
].isreg
9841 && inst
.operands
[j
].isvec
9842 && inst
.operands
[j
].isquad
9843 && !inst
.operands
[j
].issingle
))
9848 if (!(!inst
.operands
[j
].isreg
9849 && !inst
.operands
[j
].isscalar
))
9854 if (!(!inst
.operands
[j
].isreg
9855 && inst
.operands
[j
].isscalar
))
9869 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
9870 first_error (_("invalid instruction shape"));
9875 /* True if SHAPE is predominantly a quadword operation (most of the time, this
9876 means the Q bit should be set). */
9879 neon_quad (enum neon_shape shape
)
9881 return neon_shape_class
[shape
] == SC_QUAD
;
9885 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
9888 /* Allow modification to be made to types which are constrained to be
9889 based on the key element, based on bits set alongside N_EQK. */
9890 if ((typebits
& N_EQK
) != 0)
9892 if ((typebits
& N_HLF
) != 0)
9894 else if ((typebits
& N_DBL
) != 0)
9896 if ((typebits
& N_SGN
) != 0)
9897 *g_type
= NT_signed
;
9898 else if ((typebits
& N_UNS
) != 0)
9899 *g_type
= NT_unsigned
;
9900 else if ((typebits
& N_INT
) != 0)
9901 *g_type
= NT_integer
;
9902 else if ((typebits
& N_FLT
) != 0)
9904 else if ((typebits
& N_SIZ
) != 0)
9905 *g_type
= NT_untyped
;
9909 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9910 operand type, i.e. the single type specified in a Neon instruction when it
9911 is the only one given. */
9913 static struct neon_type_el
9914 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
9916 struct neon_type_el dest
= *key
;
9918 assert ((thisarg
& N_EQK
) != 0);
9920 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
9925 /* Convert Neon type and size into compact bitmask representation. */
9927 static enum neon_type_mask
9928 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
9936 case 16: return N_16
;
9937 case 32: return N_32
;
9938 case 64: return N_64
;
9946 case 8: return N_I8
;
9947 case 16: return N_I16
;
9948 case 32: return N_I32
;
9949 case 64: return N_I64
;
9957 case 16: return N_F16
;
9958 case 32: return N_F32
;
9959 case 64: return N_F64
;
9967 case 8: return N_P8
;
9968 case 16: return N_P16
;
9976 case 8: return N_S8
;
9977 case 16: return N_S16
;
9978 case 32: return N_S32
;
9979 case 64: return N_S64
;
9987 case 8: return N_U8
;
9988 case 16: return N_U16
;
9989 case 32: return N_U32
;
9990 case 64: return N_U64
;
10001 /* Convert compact Neon bitmask type representation to a type and size. Only
10002 handles the case where a single bit is set in the mask. */
10005 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10006 enum neon_type_mask mask
)
10008 if ((mask
& N_EQK
) != 0)
10011 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10013 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10015 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10017 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10022 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10024 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10025 *type
= NT_unsigned
;
10026 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10027 *type
= NT_integer
;
10028 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10029 *type
= NT_untyped
;
10030 else if ((mask
& (N_P8
| N_P16
)) != 0)
10032 else if ((mask
& (N_F32
| N_F64
)) != 0)
10040 /* Modify a bitmask of allowed types. This is only needed for type
10044 modify_types_allowed (unsigned allowed
, unsigned mods
)
10047 enum neon_el_type type
;
10053 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
10055 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
10057 neon_modify_type_size (mods
, &type
, &size
);
10058 destmask
|= type_chk_of_el_type (type
, size
);
10065 /* Check type and return type classification.
10066 The manual states (paraphrase): If one datatype is given, it indicates the
10068 - the second operand, if there is one
10069 - the operand, if there is no second operand
10070 - the result, if there are no operands.
10071 This isn't quite good enough though, so we use a concept of a "key" datatype
10072 which is set on a per-instruction basis, which is the one which matters when
10073 only one data type is written.
10074 Note: this function has side-effects (e.g. filling in missing operands). All
10075 Neon instructions should call it before performing bit encoding. */
10077 static struct neon_type_el
10078 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
10081 unsigned i
, pass
, key_el
= 0;
10082 unsigned types
[NEON_MAX_TYPE_ELS
];
10083 enum neon_el_type k_type
= NT_invtype
;
10084 unsigned k_size
= -1u;
10085 struct neon_type_el badtype
= {NT_invtype
, -1};
10086 unsigned key_allowed
= 0;
10088 /* Optional registers in Neon instructions are always (not) in operand 1.
10089 Fill in the missing operand here, if it was omitted. */
10090 if (els
> 1 && !inst
.operands
[1].present
)
10091 inst
.operands
[1] = inst
.operands
[0];
10093 /* Suck up all the varargs. */
10095 for (i
= 0; i
< els
; i
++)
10097 unsigned thisarg
= va_arg (ap
, unsigned);
10098 if (thisarg
== N_IGNORE_TYPE
)
10103 types
[i
] = thisarg
;
10104 if ((thisarg
& N_KEY
) != 0)
10109 if (inst
.vectype
.elems
> 0)
10110 for (i
= 0; i
< els
; i
++)
10111 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
10113 first_error (_("types specified in both the mnemonic and operands"));
10117 /* Duplicate inst.vectype elements here as necessary.
10118 FIXME: No idea if this is exactly the same as the ARM assembler,
10119 particularly when an insn takes one register and one non-register
10121 if (inst
.vectype
.elems
== 1 && els
> 1)
10124 inst
.vectype
.elems
= els
;
10125 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
10126 for (j
= 0; j
< els
; j
++)
10128 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10131 else if (inst
.vectype
.elems
== 0 && els
> 0)
10134 /* No types were given after the mnemonic, so look for types specified
10135 after each operand. We allow some flexibility here; as long as the
10136 "key" operand has a type, we can infer the others. */
10137 for (j
= 0; j
< els
; j
++)
10138 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
10139 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
10141 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
10143 for (j
= 0; j
< els
; j
++)
10144 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
10145 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10150 first_error (_("operand types can't be inferred"));
10154 else if (inst
.vectype
.elems
!= els
)
10156 first_error (_("type specifier has the wrong number of parts"));
10160 for (pass
= 0; pass
< 2; pass
++)
10162 for (i
= 0; i
< els
; i
++)
10164 unsigned thisarg
= types
[i
];
10165 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
10166 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
10167 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
10168 unsigned g_size
= inst
.vectype
.el
[i
].size
;
10170 /* Decay more-specific signed & unsigned types to sign-insensitive
10171 integer types if sign-specific variants are unavailable. */
10172 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
10173 && (types_allowed
& N_SU_ALL
) == 0)
10174 g_type
= NT_integer
;
10176 /* If only untyped args are allowed, decay any more specific types to
10177 them. Some instructions only care about signs for some element
10178 sizes, so handle that properly. */
10179 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
10180 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
10181 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
10182 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
10183 g_type
= NT_untyped
;
10187 if ((thisarg
& N_KEY
) != 0)
10191 key_allowed
= thisarg
& ~N_KEY
;
10196 if ((thisarg
& N_VFP
) != 0)
10198 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
10199 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
10201 /* In VFP mode, operands must match register widths. If we
10202 have a key operand, use its width, else use the width of
10203 the current operand. */
10209 if (regwidth
!= match
)
10211 first_error (_("operand size must match register width"));
10216 if ((thisarg
& N_EQK
) == 0)
10218 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10220 if ((given_type
& types_allowed
) == 0)
10222 first_error (_("bad type in Neon instruction"));
10228 enum neon_el_type mod_k_type
= k_type
;
10229 unsigned mod_k_size
= k_size
;
10230 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10231 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10233 first_error (_("inconsistent types in Neon instruction"));
10241 return inst
.vectype
.el
[key_el
];
10244 /* Neon-style VFP instruction forwarding. */
10246 /* Thumb VFP instructions have 0xE in the condition field. */
10249 do_vfp_cond_or_thumb (void)
10252 inst
.instruction
|= 0xe0000000;
10254 inst
.instruction
|= inst
.cond
<< 28;
10257 /* Look up and encode a simple mnemonic, for use as a helper function for the
10258 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10259 etc. It is assumed that operand parsing has already been done, and that the
10260 operands are in the form expected by the given opcode (this isn't necessarily
10261 the same as the form in which they were parsed, hence some massaging must
10262 take place before this function is called).
10263 Checks current arch version against that in the looked-up opcode. */
10266 do_vfp_nsyn_opcode (const char *opname
)
10268 const struct asm_opcode
*opcode
;
10270 opcode
= hash_find (arm_ops_hsh
, opname
);
10275 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
10276 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
10281 inst
.instruction
= opcode
->tvalue
;
10282 opcode
->tencode ();
10286 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
10287 opcode
->aencode ();
10292 do_vfp_nsyn_add_sub (enum neon_shape rs
)
10294 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
10299 do_vfp_nsyn_opcode ("fadds");
10301 do_vfp_nsyn_opcode ("fsubs");
10306 do_vfp_nsyn_opcode ("faddd");
10308 do_vfp_nsyn_opcode ("fsubd");
10312 /* Check operand types to see if this is a VFP instruction, and if so call
10316 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
10318 enum neon_shape rs
;
10319 struct neon_type_el et
;
10324 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10325 et
= neon_check_type (2, rs
,
10326 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10330 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10331 et
= neon_check_type (3, rs
,
10332 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10339 if (et
.type
!= NT_invtype
)
10351 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
10353 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
10358 do_vfp_nsyn_opcode ("fmacs");
10360 do_vfp_nsyn_opcode ("fnmacs");
10365 do_vfp_nsyn_opcode ("fmacd");
10367 do_vfp_nsyn_opcode ("fnmacd");
10372 do_vfp_nsyn_mul (enum neon_shape rs
)
10375 do_vfp_nsyn_opcode ("fmuls");
10377 do_vfp_nsyn_opcode ("fmuld");
10381 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
10383 int is_neg
= (inst
.instruction
& 0x80) != 0;
10384 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
10389 do_vfp_nsyn_opcode ("fnegs");
10391 do_vfp_nsyn_opcode ("fabss");
10396 do_vfp_nsyn_opcode ("fnegd");
10398 do_vfp_nsyn_opcode ("fabsd");
10402 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
10403 insns belong to Neon, and are handled elsewhere. */
10406 do_vfp_nsyn_ldm_stm (int is_dbmode
)
10408 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
10412 do_vfp_nsyn_opcode ("fldmdbs");
10414 do_vfp_nsyn_opcode ("fldmias");
10419 do_vfp_nsyn_opcode ("fstmdbs");
10421 do_vfp_nsyn_opcode ("fstmias");
10426 do_vfp_nsyn_sqrt (void)
10428 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10429 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10432 do_vfp_nsyn_opcode ("fsqrts");
10434 do_vfp_nsyn_opcode ("fsqrtd");
10438 do_vfp_nsyn_div (void)
10440 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10441 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
10442 N_F32
| N_F64
| N_KEY
| N_VFP
);
10445 do_vfp_nsyn_opcode ("fdivs");
10447 do_vfp_nsyn_opcode ("fdivd");
10451 do_vfp_nsyn_nmul (void)
10453 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
10454 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
10455 N_F32
| N_F64
| N_KEY
| N_VFP
);
10459 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10460 do_vfp_sp_dyadic ();
10464 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10465 do_vfp_dp_rd_rn_rm ();
10467 do_vfp_cond_or_thumb ();
10471 do_vfp_nsyn_cmp (void)
10473 if (inst
.operands
[1].isreg
)
10475 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
10476 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
10480 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10481 do_vfp_sp_monadic ();
10485 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10486 do_vfp_dp_rd_rm ();
10491 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
10492 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
10494 switch (inst
.instruction
& 0x0fffffff)
10497 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
10500 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
10508 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
10509 do_vfp_sp_compare_z ();
10513 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
10517 do_vfp_cond_or_thumb ();
10521 nsyn_insert_sp (void)
10523 inst
.operands
[1] = inst
.operands
[0];
10524 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
10525 inst
.operands
[0].reg
= 13;
10526 inst
.operands
[0].isreg
= 1;
10527 inst
.operands
[0].writeback
= 1;
10528 inst
.operands
[0].present
= 1;
10532 do_vfp_nsyn_push (void)
10535 if (inst
.operands
[1].issingle
)
10536 do_vfp_nsyn_opcode ("fstmdbs");
10538 do_vfp_nsyn_opcode ("fstmdbd");
10542 do_vfp_nsyn_pop (void)
10545 if (inst
.operands
[1].issingle
)
10546 do_vfp_nsyn_opcode ("fldmias");
10548 do_vfp_nsyn_opcode ("fldmiad");
10551 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10552 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10555 neon_dp_fixup (unsigned i
)
10559 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10573 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10577 neon_logbits (unsigned x
)
10579 return ffs (x
) - 4;
10582 #define LOW4(R) ((R) & 0xf)
10583 #define HI1(R) (((R) >> 4) & 1)
10585 /* Encode insns with bit pattern:
10587 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10588 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10590 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10591 different meaning for some instruction. */
10594 neon_three_same (int isquad
, int ubit
, int size
)
10596 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10597 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10598 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10599 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10600 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
10601 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
10602 inst
.instruction
|= (isquad
!= 0) << 6;
10603 inst
.instruction
|= (ubit
!= 0) << 24;
10605 inst
.instruction
|= neon_logbits (size
) << 20;
10607 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10610 /* Encode instructions of the form:
10612 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10613 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10615 Don't write size if SIZE == -1. */
10618 neon_two_same (int qbit
, int ubit
, int size
)
10620 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10621 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10622 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10623 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10624 inst
.instruction
|= (qbit
!= 0) << 6;
10625 inst
.instruction
|= (ubit
!= 0) << 24;
10628 inst
.instruction
|= neon_logbits (size
) << 18;
10630 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10633 /* Neon instruction encoders, in approximate order of appearance. */
10636 do_neon_dyadic_i_su (void)
10638 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10639 struct neon_type_el et
= neon_check_type (3, rs
,
10640 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
10641 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10645 do_neon_dyadic_i64_su (void)
10647 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10648 struct neon_type_el et
= neon_check_type (3, rs
,
10649 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10650 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10654 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
10657 unsigned size
= et
.size
>> 3;
10658 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10659 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10660 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10661 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10662 inst
.instruction
|= (isquad
!= 0) << 6;
10663 inst
.instruction
|= immbits
<< 16;
10664 inst
.instruction
|= (size
>> 3) << 7;
10665 inst
.instruction
|= (size
& 0x7) << 19;
10667 inst
.instruction
|= (uval
!= 0) << 24;
10669 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10673 do_neon_shl_imm (void)
10675 if (!inst
.operands
[2].isreg
)
10677 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
10678 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
10679 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10680 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
10684 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10685 struct neon_type_el et
= neon_check_type (3, rs
,
10686 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10689 /* VSHL/VQSHL 3-register variants have syntax such as:
10691 whereas other 3-register operations encoded by neon_three_same have
10694 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
10696 tmp
= inst
.operands
[2].reg
;
10697 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
10698 inst
.operands
[1].reg
= tmp
;
10699 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10700 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10705 do_neon_qshl_imm (void)
10707 if (!inst
.operands
[2].isreg
)
10709 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
10710 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
10712 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10713 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
10714 inst
.operands
[2].imm
);
10718 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10719 struct neon_type_el et
= neon_check_type (3, rs
,
10720 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10723 /* See note in do_neon_shl_imm. */
10724 tmp
= inst
.operands
[2].reg
;
10725 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
10726 inst
.operands
[1].reg
= tmp
;
10727 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10728 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10733 do_neon_rshl (void)
10735 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10736 struct neon_type_el et
= neon_check_type (3, rs
,
10737 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10740 tmp
= inst
.operands
[2].reg
;
10741 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
10742 inst
.operands
[1].reg
= tmp
;
10743 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
10747 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
10749 /* Handle .I8 pseudo-instructions. */
10752 /* Unfortunately, this will make everything apart from zero out-of-range.
10753 FIXME is this the intended semantics? There doesn't seem much point in
10754 accepting .I8 if so. */
10755 immediate
|= immediate
<< 8;
10761 if (immediate
== (immediate
& 0x000000ff))
10763 *immbits
= immediate
;
10766 else if (immediate
== (immediate
& 0x0000ff00))
10768 *immbits
= immediate
>> 8;
10771 else if (immediate
== (immediate
& 0x00ff0000))
10773 *immbits
= immediate
>> 16;
10776 else if (immediate
== (immediate
& 0xff000000))
10778 *immbits
= immediate
>> 24;
10781 if ((immediate
& 0xffff) != (immediate
>> 16))
10782 goto bad_immediate
;
10783 immediate
&= 0xffff;
10786 if (immediate
== (immediate
& 0x000000ff))
10788 *immbits
= immediate
;
10791 else if (immediate
== (immediate
& 0x0000ff00))
10793 *immbits
= immediate
>> 8;
10798 first_error (_("immediate value out of range"));
10802 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10806 neon_bits_same_in_bytes (unsigned imm
)
10808 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
10809 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
10810 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
10811 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
10814 /* For immediate of above form, return 0bABCD. */
10817 neon_squash_bits (unsigned imm
)
10819 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
10820 | ((imm
& 0x01000000) >> 21);
10823 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10826 neon_qfloat_bits (unsigned imm
)
10828 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
10831 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10832 the instruction. *OP is passed as the initial value of the op field, and
10833 may be set to a different value depending on the constant (i.e.
10834 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10835 MVN). If the immediate looks like a repeated parttern then also
10836 try smaller element sizes. */
10839 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
10840 unsigned *immbits
, int *op
, int size
,
10841 enum neon_el_type type
)
10843 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
10845 if (type
== NT_float
&& !float_p
)
10848 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
10850 if (size
!= 32 || *op
== 1)
10852 *immbits
= neon_qfloat_bits (immlo
);
10858 if (neon_bits_same_in_bytes (immhi
)
10859 && neon_bits_same_in_bytes (immlo
))
10863 *immbits
= (neon_squash_bits (immhi
) << 4)
10864 | neon_squash_bits (immlo
);
10869 if (immhi
!= immlo
)
10875 if (immlo
== (immlo
& 0x000000ff))
10880 else if (immlo
== (immlo
& 0x0000ff00))
10882 *immbits
= immlo
>> 8;
10885 else if (immlo
== (immlo
& 0x00ff0000))
10887 *immbits
= immlo
>> 16;
10890 else if (immlo
== (immlo
& 0xff000000))
10892 *immbits
= immlo
>> 24;
10895 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
10897 *immbits
= (immlo
>> 8) & 0xff;
10900 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
10902 *immbits
= (immlo
>> 16) & 0xff;
10906 if ((immlo
& 0xffff) != (immlo
>> 16))
10913 if (immlo
== (immlo
& 0x000000ff))
10918 else if (immlo
== (immlo
& 0x0000ff00))
10920 *immbits
= immlo
>> 8;
10924 if ((immlo
& 0xff) != (immlo
>> 8))
10929 if (immlo
== (immlo
& 0x000000ff))
10931 /* Don't allow MVN with 8-bit immediate. */
10941 /* Write immediate bits [7:0] to the following locations:
10943 |28/24|23 19|18 16|15 4|3 0|
10944 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10946 This function is used by VMOV/VMVN/VORR/VBIC. */
10949 neon_write_immbits (unsigned immbits
)
10951 inst
.instruction
|= immbits
& 0xf;
10952 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
10953 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
10956 /* Invert low-order SIZE bits of XHI:XLO. */
10959 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
10961 unsigned immlo
= xlo
? *xlo
: 0;
10962 unsigned immhi
= xhi
? *xhi
: 0;
10967 immlo
= (~immlo
) & 0xff;
10971 immlo
= (~immlo
) & 0xffff;
10975 immhi
= (~immhi
) & 0xffffffff;
10976 /* fall through. */
10979 immlo
= (~immlo
) & 0xffffffff;
10994 do_neon_logic (void)
10996 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
10998 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
10999 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11000 /* U bit and size field were set as part of the bitmask. */
11001 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11002 neon_three_same (neon_quad (rs
), 0, -1);
11006 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11007 struct neon_type_el et
= neon_check_type (2, rs
,
11008 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11009 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11013 if (et
.type
== NT_invtype
)
11016 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11018 immbits
= inst
.operands
[1].imm
;
11021 /* .i64 is a pseudo-op, so the immediate must be a repeating
11023 if (immbits
!= (inst
.operands
[1].regisimm
?
11024 inst
.operands
[1].reg
: 0))
11026 /* Set immbits to an invalid constant. */
11027 immbits
= 0xdeadbeef;
11034 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11038 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11042 /* Pseudo-instruction for VBIC. */
11043 neon_invert_size (&immbits
, 0, et
.size
);
11044 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11048 /* Pseudo-instruction for VORR. */
11049 neon_invert_size (&immbits
, 0, et
.size
);
11050 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11060 inst
.instruction
|= neon_quad (rs
) << 6;
11061 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11062 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11063 inst
.instruction
|= cmode
<< 8;
11064 neon_write_immbits (immbits
);
11066 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11071 do_neon_bitfield (void)
11073 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11074 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11075 neon_three_same (neon_quad (rs
), 0, -1);
11079 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
11082 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11083 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
11085 if (et
.type
== NT_float
)
11087 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
11088 neon_three_same (neon_quad (rs
), 0, -1);
11092 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11093 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
11098 do_neon_dyadic_if_su (void)
11100 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11104 do_neon_dyadic_if_su_d (void)
11106 /* This version only allow D registers, but that constraint is enforced during
11107 operand parsing so we don't need to do anything extra here. */
11108 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11112 do_neon_dyadic_if_i_d (void)
11114 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11115 affected if we specify unsigned args. */
11116 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
11119 enum vfp_or_neon_is_neon_bits
11122 NEON_CHECK_ARCH
= 2
11125 /* Call this function if an instruction which may have belonged to the VFP or
11126 Neon instruction sets, but turned out to be a Neon instruction (due to the
11127 operand types involved, etc.). We have to check and/or fix-up a couple of
11130 - Make sure the user hasn't attempted to make a Neon instruction
11132 - Alter the value in the condition code field if necessary.
11133 - Make sure that the arch supports Neon instructions.
11135 Which of these operations take place depends on bits from enum
11136 vfp_or_neon_is_neon_bits.
11138 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11139 current instruction's condition is COND_ALWAYS, the condition field is
11140 changed to inst.uncond_value. This is necessary because instructions shared
11141 between VFP and Neon may be conditional for the VFP variants only, and the
11142 unconditional Neon version must have, e.g., 0xF in the condition field. */
11145 vfp_or_neon_is_neon (unsigned check
)
11147 /* Conditions are always legal in Thumb mode (IT blocks). */
11148 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
11150 if (inst
.cond
!= COND_ALWAYS
)
11152 first_error (_(BAD_COND
));
11155 if (inst
.uncond_value
!= -1)
11156 inst
.instruction
|= inst
.uncond_value
<< 28;
11159 if ((check
& NEON_CHECK_ARCH
)
11160 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
11162 first_error (_(BAD_FPU
));
11170 do_neon_addsub_if_i (void)
11172 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
11175 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11178 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11179 affected if we specify unsigned args. */
11180 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
11183 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11185 V<op> A,B (A is operand 0, B is operand 2)
11190 so handle that case specially. */
11193 neon_exchange_operands (void)
11195 void *scratch
= alloca (sizeof (inst
.operands
[0]));
11196 if (inst
.operands
[1].present
)
11198 /* Swap operands[1] and operands[2]. */
11199 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
11200 inst
.operands
[1] = inst
.operands
[2];
11201 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
11205 inst
.operands
[1] = inst
.operands
[2];
11206 inst
.operands
[2] = inst
.operands
[0];
11211 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
11213 if (inst
.operands
[2].isreg
)
11216 neon_exchange_operands ();
11217 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
11221 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11222 struct neon_type_el et
= neon_check_type (2, rs
,
11223 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
11225 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11226 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11227 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11228 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11229 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11230 inst
.instruction
|= neon_quad (rs
) << 6;
11231 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11232 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11234 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11241 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
11245 do_neon_cmp_inv (void)
11247 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
11253 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
11256 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11257 scalars, which are encoded in 5 bits, M : Rm.
11258 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11259 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11263 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
11265 unsigned regno
= NEON_SCALAR_REG (scalar
);
11266 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
11271 if (regno
> 7 || elno
> 3)
11273 return regno
| (elno
<< 3);
11276 if (regno
> 15 || elno
> 1)
11278 return regno
| (elno
<< 4);
11282 first_error (_("scalar out of range for multiply instruction"));
11288 /* Encode multiply / multiply-accumulate scalar instructions. */
11291 neon_mul_mac (struct neon_type_el et
, int ubit
)
11295 /* Give a more helpful error message if we have an invalid type. */
11296 if (et
.type
== NT_invtype
)
11299 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
11300 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11301 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11302 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11303 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11304 inst
.instruction
|= LOW4 (scalar
);
11305 inst
.instruction
|= HI1 (scalar
) << 5;
11306 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11307 inst
.instruction
|= neon_logbits (et
.size
) << 20;
11308 inst
.instruction
|= (ubit
!= 0) << 24;
11310 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11314 do_neon_mac_maybe_scalar (void)
11316 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
11319 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11322 if (inst
.operands
[2].isscalar
)
11324 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
11325 struct neon_type_el et
= neon_check_type (3, rs
,
11326 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
11327 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11328 neon_mul_mac (et
, neon_quad (rs
));
11332 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11333 affected if we specify unsigned args. */
11334 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
11341 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11342 struct neon_type_el et
= neon_check_type (3, rs
,
11343 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11344 neon_three_same (neon_quad (rs
), 0, et
.size
);
11347 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
11348 same types as the MAC equivalents. The polynomial type for this instruction
11349 is encoded the same as the integer type. */
11354 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
11357 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11360 if (inst
.operands
[2].isscalar
)
11361 do_neon_mac_maybe_scalar ();
11363 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
11367 do_neon_qdmulh (void)
11369 if (inst
.operands
[2].isscalar
)
11371 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
11372 struct neon_type_el et
= neon_check_type (3, rs
,
11373 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
11374 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11375 neon_mul_mac (et
, neon_quad (rs
));
11379 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11380 struct neon_type_el et
= neon_check_type (3, rs
,
11381 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
11382 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11383 /* The U bit (rounding) comes from bit mask. */
11384 neon_three_same (neon_quad (rs
), 0, et
.size
);
11389 do_neon_fcmp_absolute (void)
11391 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11392 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
11393 /* Size field comes from bit mask. */
11394 neon_three_same (neon_quad (rs
), 1, -1);
11398 do_neon_fcmp_absolute_inv (void)
11400 neon_exchange_operands ();
11401 do_neon_fcmp_absolute ();
11405 do_neon_step (void)
11407 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11408 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
11409 neon_three_same (neon_quad (rs
), 0, -1);
11413 do_neon_abs_neg (void)
11415 enum neon_shape rs
;
11416 struct neon_type_el et
;
11418 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
11421 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11424 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
11425 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
11427 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11428 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11429 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11430 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11431 inst
.instruction
|= neon_quad (rs
) << 6;
11432 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11433 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11435 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11441 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11442 struct neon_type_el et
= neon_check_type (2, rs
,
11443 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11444 int imm
= inst
.operands
[2].imm
;
11445 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
11446 _("immediate out of range for insert"));
11447 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
11453 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11454 struct neon_type_el et
= neon_check_type (2, rs
,
11455 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11456 int imm
= inst
.operands
[2].imm
;
11457 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11458 _("immediate out of range for insert"));
11459 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
11463 do_neon_qshlu_imm (void)
11465 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11466 struct neon_type_el et
= neon_check_type (2, rs
,
11467 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
11468 int imm
= inst
.operands
[2].imm
;
11469 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
11470 _("immediate out of range for shift"));
11471 /* Only encodes the 'U present' variant of the instruction.
11472 In this case, signed types have OP (bit 8) set to 0.
11473 Unsigned types have OP set to 1. */
11474 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
11475 /* The rest of the bits are the same as other immediate shifts. */
11476 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
11480 do_neon_qmovn (void)
11482 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11483 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
11484 /* Saturating move where operands can be signed or unsigned, and the
11485 destination has the same signedness. */
11486 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11487 if (et
.type
== NT_unsigned
)
11488 inst
.instruction
|= 0xc0;
11490 inst
.instruction
|= 0x80;
11491 neon_two_same (0, 1, et
.size
/ 2);
11495 do_neon_qmovun (void)
11497 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11498 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
11499 /* Saturating move with unsigned results. Operands must be signed. */
11500 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11501 neon_two_same (0, 1, et
.size
/ 2);
11505 do_neon_rshift_sat_narrow (void)
11507 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11508 or unsigned. If operands are unsigned, results must also be unsigned. */
11509 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11510 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
11511 int imm
= inst
.operands
[2].imm
;
11512 /* This gets the bounds check, size encoding and immediate bits calculation
11516 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
11517 VQMOVN.I<size> <Dd>, <Qm>. */
11520 inst
.operands
[2].present
= 0;
11521 inst
.instruction
= N_MNEM_vqmovn
;
11526 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11527 _("immediate out of range"));
11528 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
11532 do_neon_rshift_sat_narrow_u (void)
11534 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11535 or unsigned. If operands are unsigned, results must also be unsigned. */
11536 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11537 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
11538 int imm
= inst
.operands
[2].imm
;
11539 /* This gets the bounds check, size encoding and immediate bits calculation
11543 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
11544 VQMOVUN.I<size> <Dd>, <Qm>. */
11547 inst
.operands
[2].present
= 0;
11548 inst
.instruction
= N_MNEM_vqmovun
;
11553 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11554 _("immediate out of range"));
11555 /* FIXME: The manual is kind of unclear about what value U should have in
11556 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
11558 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
11562 do_neon_movn (void)
11564 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
11565 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
11566 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11567 neon_two_same (0, 1, et
.size
/ 2);
11571 do_neon_rshift_narrow (void)
11573 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
11574 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
11575 int imm
= inst
.operands
[2].imm
;
11576 /* This gets the bounds check, size encoding and immediate bits calculation
11580 /* If immediate is zero then we are a pseudo-instruction for
11581 VMOVN.I<size> <Dd>, <Qm> */
11584 inst
.operands
[2].present
= 0;
11585 inst
.instruction
= N_MNEM_vmovn
;
11590 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11591 _("immediate out of range for narrowing operation"));
11592 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
11596 do_neon_shll (void)
11598 /* FIXME: Type checking when lengthening. */
11599 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
11600 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
11601 unsigned imm
= inst
.operands
[2].imm
;
11603 if (imm
== et
.size
)
11605 /* Maximum shift variant. */
11606 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11607 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11608 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11609 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11610 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11611 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11613 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11617 /* A more-specific type check for non-max versions. */
11618 et
= neon_check_type (2, NS_QDI
,
11619 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11620 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11621 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
11625 /* Check the various types for the VCVT instruction, and return which version
11626 the current instruction is. */
11629 neon_cvt_flavour (enum neon_shape rs
)
11631 #define CVT_VAR(C,X,Y) \
11632 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
11633 if (et.type != NT_invtype) \
11635 inst.error = NULL; \
11638 struct neon_type_el et
;
11639 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
11640 || rs
== NS_FF
) ? N_VFP
: 0;
11641 /* The instruction versions which take an immediate take one register
11642 argument, which is extended to the width of the full register. Thus the
11643 "source" and "destination" registers must have the same width. Hack that
11644 here by making the size equal to the key (wider, in this case) operand. */
11645 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
11647 CVT_VAR (0, N_S32
, N_F32
);
11648 CVT_VAR (1, N_U32
, N_F32
);
11649 CVT_VAR (2, N_F32
, N_S32
);
11650 CVT_VAR (3, N_F32
, N_U32
);
11654 /* VFP instructions. */
11655 CVT_VAR (4, N_F32
, N_F64
);
11656 CVT_VAR (5, N_F64
, N_F32
);
11657 CVT_VAR (6, N_S32
, N_F64
| key
);
11658 CVT_VAR (7, N_U32
, N_F64
| key
);
11659 CVT_VAR (8, N_F64
| key
, N_S32
);
11660 CVT_VAR (9, N_F64
| key
, N_U32
);
11661 /* VFP instructions with bitshift. */
11662 CVT_VAR (10, N_F32
| key
, N_S16
);
11663 CVT_VAR (11, N_F32
| key
, N_U16
);
11664 CVT_VAR (12, N_F64
| key
, N_S16
);
11665 CVT_VAR (13, N_F64
| key
, N_U16
);
11666 CVT_VAR (14, N_S16
, N_F32
| key
);
11667 CVT_VAR (15, N_U16
, N_F32
| key
);
11668 CVT_VAR (16, N_S16
, N_F64
| key
);
11669 CVT_VAR (17, N_U16
, N_F64
| key
);
11672 CVT_VAR (18, N_F32
, N_F16
);
11673 CVT_VAR (19, N_F16
, N_F32
);
11679 /* Neon-syntax VFP conversions. */
11682 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
11684 const char *opname
= 0;
11686 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
11688 /* Conversions with immediate bitshift. */
11689 const char *enc
[] =
11711 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
11713 opname
= enc
[flavour
];
11714 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
11715 _("operands 0 and 1 must be the same register"));
11716 inst
.operands
[1] = inst
.operands
[2];
11717 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
11720 else if (rs
== NS_QD
|| rs
== NS_DQ
)
11722 /* Conversions between half-percision and single-precision. */
11725 opname
= "fcvtshp";
11727 else if (flavour
== 19)
11729 opname
= "fcvthps";
11732 else if (rs
== NS_FF
&& (flavour
== 18 || flavour
== 19))
11735 * Conversions between half-percision (in top or bottom half of register)
11736 * and single-precision. The routines do_neon_cvtt() and do_neon_cvtb()
11737 * set or cleared the T bit (0x80) in the inst.instruction to pass that
11738 * info to say this is for the top half of the register (T bit set) or the
11739 * bottom half of the register (T bit cleared) information here to know
11740 * which opname to use. This is done this way because the call to
11741 * do_vfp_nsyn_opcode() will set inst.instruction and loose this info.
11745 if((inst
.instruction
& 0x80) == 0x80)
11746 opname
= "fcvtthps";
11748 opname
= "fcvtbhps";
11750 else if(flavour
== 19)
11752 if((inst
.instruction
& 0x80) == 0x80)
11753 opname
= "fcvttshp";
11755 opname
= "fcvtbshp";
11760 /* Conversions without bitshift. */
11761 const char *enc
[] =
11775 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
11776 opname
= enc
[flavour
];
11780 do_vfp_nsyn_opcode (opname
);
11784 do_vfp_nsyn_cvtr (void)
11786 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
11787 int flavour
= neon_cvt_flavour (rs
);
11788 const char *enc
[] =
11800 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
11801 do_vfp_nsyn_opcode (enc
[flavour
]);
11807 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
11808 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
11809 int flavour
= neon_cvt_flavour (rs
);
11811 /* VFP rather than Neon conversions. */
11814 do_vfp_nsyn_cvt (rs
, flavour
);
11823 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11826 /* Fixed-point conversion with #0 immediate is encoded as an
11827 integer conversion. */
11828 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
11830 unsigned immbits
= 32 - inst
.operands
[2].imm
;
11831 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11832 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11834 inst
.instruction
|= enctab
[flavour
];
11835 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11836 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11837 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11838 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11839 inst
.instruction
|= neon_quad (rs
) << 6;
11840 inst
.instruction
|= 1 << 21;
11841 inst
.instruction
|= immbits
<< 16;
11843 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11851 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
11853 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11855 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11859 inst
.instruction
|= enctab
[flavour
];
11861 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11862 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11863 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11864 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11865 inst
.instruction
|= neon_quad (rs
) << 6;
11866 inst
.instruction
|= 2 << 18;
11868 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11873 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
11874 do_vfp_nsyn_cvt (rs
, flavour
);
11879 do_neon_cvtt (void)
11881 /* set the T bit to say this is for the top half of the register for the
11882 routine do_vfp_nsyn_cvt() to use to pick the opname */
11883 inst
.instruction
|= 0x80;
11888 do_neon_cvtb (void)
11890 /* clear the T bit to say this is for the bottom half of the register for the
11891 routine do_vfp_nsyn_cvt() to use to pick the opname */
11892 inst
.instruction
= inst
.instruction
& ~(0x80);
11897 neon_move_immediate (void)
11899 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11900 struct neon_type_el et
= neon_check_type (2, rs
,
11901 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11902 unsigned immlo
, immhi
= 0, immbits
;
11903 int op
, cmode
, float_p
;
11905 constraint (et
.type
== NT_invtype
,
11906 _("operand size must be specified for immediate VMOV"));
11908 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11909 op
= (inst
.instruction
& (1 << 5)) != 0;
11911 immlo
= inst
.operands
[1].imm
;
11912 if (inst
.operands
[1].regisimm
)
11913 immhi
= inst
.operands
[1].reg
;
11915 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
11916 _("immediate has bits set outside the operand size"));
11918 float_p
= inst
.operands
[1].immisfloat
;
11920 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
11921 et
.size
, et
.type
)) == FAIL
)
11923 /* Invert relevant bits only. */
11924 neon_invert_size (&immlo
, &immhi
, et
.size
);
11925 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11926 with one or the other; those cases are caught by
11927 neon_cmode_for_move_imm. */
11929 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
11930 &op
, et
.size
, et
.type
)) == FAIL
)
11932 first_error (_("immediate out of range"));
11937 inst
.instruction
&= ~(1 << 5);
11938 inst
.instruction
|= op
<< 5;
11940 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11941 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11942 inst
.instruction
|= neon_quad (rs
) << 6;
11943 inst
.instruction
|= cmode
<< 8;
11945 neon_write_immbits (immbits
);
11951 if (inst
.operands
[1].isreg
)
11953 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
11955 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11956 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11957 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11958 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11959 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11960 inst
.instruction
|= neon_quad (rs
) << 6;
11964 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11965 neon_move_immediate ();
11968 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11971 /* Encode instructions of form:
11973 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11974 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11979 neon_mixed_length (struct neon_type_el et
, unsigned size
)
11981 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11982 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11983 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11984 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11985 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11986 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11987 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
11988 inst
.instruction
|= neon_logbits (size
) << 20;
11990 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11994 do_neon_dyadic_long (void)
11996 /* FIXME: Type checking for lengthening op. */
11997 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11998 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11999 neon_mixed_length (et
, et
.size
);
12003 do_neon_abal (void)
12005 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12006 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12007 neon_mixed_length (et
, et
.size
);
12011 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12013 if (inst
.operands
[2].isscalar
)
12015 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12016 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12017 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12018 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12022 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12023 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12024 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12025 neon_mixed_length (et
, et
.size
);
12030 do_neon_mac_maybe_scalar_long (void)
12032 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12036 do_neon_dyadic_wide (void)
12038 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12039 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12040 neon_mixed_length (et
, et
.size
);
12044 do_neon_dyadic_narrow (void)
12046 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12047 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12048 /* Operand sign is unimportant, and the U bit is part of the opcode,
12049 so force the operand type to integer. */
12050 et
.type
= NT_integer
;
12051 neon_mixed_length (et
, et
.size
/ 2);
12055 do_neon_mul_sat_scalar_long (void)
12057 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12061 do_neon_vmull (void)
12063 if (inst
.operands
[2].isscalar
)
12064 do_neon_mac_maybe_scalar_long ();
12067 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12068 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12069 if (et
.type
== NT_poly
)
12070 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12072 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12073 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12074 zero. Should be OK as-is. */
12075 neon_mixed_length (et
, et
.size
);
12082 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12083 struct neon_type_el et
= neon_check_type (3, rs
,
12084 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12085 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12086 constraint (imm
>= (neon_quad (rs
) ? 16 : 8), _("shift out of range"));
12087 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12088 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12089 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12090 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12091 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12092 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12093 inst
.instruction
|= neon_quad (rs
) << 6;
12094 inst
.instruction
|= imm
<< 8;
12096 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12102 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12103 struct neon_type_el et
= neon_check_type (2, rs
,
12104 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12105 unsigned op
= (inst
.instruction
>> 7) & 3;
12106 /* N (width of reversed regions) is encoded as part of the bitmask. We
12107 extract it here to check the elements to be reversed are smaller.
12108 Otherwise we'd get a reserved instruction. */
12109 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
12110 assert (elsize
!= 0);
12111 constraint (et
.size
>= elsize
,
12112 _("elements must be smaller than reversal region"));
12113 neon_two_same (neon_quad (rs
), 1, et
.size
);
12119 if (inst
.operands
[1].isscalar
)
12121 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
12122 struct neon_type_el et
= neon_check_type (2, rs
,
12123 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12124 unsigned sizebits
= et
.size
>> 3;
12125 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12126 int logsize
= neon_logbits (et
.size
);
12127 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
12129 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
12132 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12133 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12134 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12135 inst
.instruction
|= LOW4 (dm
);
12136 inst
.instruction
|= HI1 (dm
) << 5;
12137 inst
.instruction
|= neon_quad (rs
) << 6;
12138 inst
.instruction
|= x
<< 17;
12139 inst
.instruction
|= sizebits
<< 16;
12141 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12145 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
12146 struct neon_type_el et
= neon_check_type (2, rs
,
12147 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12148 /* Duplicate ARM register to lanes of vector. */
12149 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
12152 case 8: inst
.instruction
|= 0x400000; break;
12153 case 16: inst
.instruction
|= 0x000020; break;
12154 case 32: inst
.instruction
|= 0x000000; break;
12157 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12158 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
12159 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
12160 inst
.instruction
|= neon_quad (rs
) << 21;
12161 /* The encoding for this instruction is identical for the ARM and Thumb
12162 variants, except for the condition field. */
12163 do_vfp_cond_or_thumb ();
12167 /* VMOV has particularly many variations. It can be one of:
12168 0. VMOV<c><q> <Qd>, <Qm>
12169 1. VMOV<c><q> <Dd>, <Dm>
12170 (Register operations, which are VORR with Rm = Rn.)
12171 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12172 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12174 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12175 (ARM register to scalar.)
12176 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12177 (Two ARM registers to vector.)
12178 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12179 (Scalar to ARM register.)
12180 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12181 (Vector to two ARM registers.)
12182 8. VMOV.F32 <Sd>, <Sm>
12183 9. VMOV.F64 <Dd>, <Dm>
12184 (VFP register moves.)
12185 10. VMOV.F32 <Sd>, #imm
12186 11. VMOV.F64 <Dd>, #imm
12187 (VFP float immediate load.)
12188 12. VMOV <Rd>, <Sm>
12189 (VFP single to ARM reg.)
12190 13. VMOV <Sd>, <Rm>
12191 (ARM reg to VFP single.)
12192 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12193 (Two ARM regs to two VFP singles.)
12194 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12195 (Two VFP singles to two ARM regs.)
12197 These cases can be disambiguated using neon_select_shape, except cases 1/9
12198 and 3/11 which depend on the operand type too.
12200 All the encoded bits are hardcoded by this function.
12202 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12203 Cases 5, 7 may be used with VFPv2 and above.
12205 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12206 can specify a type where it doesn't make sense to, and is ignored).
12212 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
12213 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
12215 struct neon_type_el et
;
12216 const char *ldconst
= 0;
12220 case NS_DD
: /* case 1/9. */
12221 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12222 /* It is not an error here if no type is given. */
12224 if (et
.type
== NT_float
&& et
.size
== 64)
12226 do_vfp_nsyn_opcode ("fcpyd");
12229 /* fall through. */
12231 case NS_QQ
: /* case 0/1. */
12233 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12235 /* The architecture manual I have doesn't explicitly state which
12236 value the U bit should have for register->register moves, but
12237 the equivalent VORR instruction has U = 0, so do that. */
12238 inst
.instruction
= 0x0200110;
12239 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12240 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12241 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12242 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12243 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12244 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12245 inst
.instruction
|= neon_quad (rs
) << 6;
12247 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12251 case NS_DI
: /* case 3/11. */
12252 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12254 if (et
.type
== NT_float
&& et
.size
== 64)
12256 /* case 11 (fconstd). */
12257 ldconst
= "fconstd";
12258 goto encode_fconstd
;
12260 /* fall through. */
12262 case NS_QI
: /* case 2/3. */
12263 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12265 inst
.instruction
= 0x0800010;
12266 neon_move_immediate ();
12267 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12270 case NS_SR
: /* case 4. */
12272 unsigned bcdebits
= 0;
12273 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12274 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12275 int logsize
= neon_logbits (et
.size
);
12276 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
12277 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
12279 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12281 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12282 && et
.size
!= 32, _(BAD_FPU
));
12283 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12284 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12288 case 8: bcdebits
= 0x8; break;
12289 case 16: bcdebits
= 0x1; break;
12290 case 32: bcdebits
= 0x0; break;
12294 bcdebits
|= x
<< logsize
;
12296 inst
.instruction
= 0xe000b10;
12297 do_vfp_cond_or_thumb ();
12298 inst
.instruction
|= LOW4 (dn
) << 16;
12299 inst
.instruction
|= HI1 (dn
) << 7;
12300 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12301 inst
.instruction
|= (bcdebits
& 3) << 5;
12302 inst
.instruction
|= (bcdebits
>> 2) << 21;
12306 case NS_DRR
: /* case 5 (fmdrr). */
12307 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12310 inst
.instruction
= 0xc400b10;
12311 do_vfp_cond_or_thumb ();
12312 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
12313 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
12314 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12315 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12318 case NS_RS
: /* case 6. */
12320 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12321 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
12322 unsigned logsize
= neon_logbits (et
.size
);
12323 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12324 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
12325 unsigned abcdebits
= 0;
12327 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12329 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12330 && et
.size
!= 32, _(BAD_FPU
));
12331 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12332 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12336 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
12337 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
12338 case 32: abcdebits
= 0x00; break;
12342 abcdebits
|= x
<< logsize
;
12343 inst
.instruction
= 0xe100b10;
12344 do_vfp_cond_or_thumb ();
12345 inst
.instruction
|= LOW4 (dn
) << 16;
12346 inst
.instruction
|= HI1 (dn
) << 7;
12347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12348 inst
.instruction
|= (abcdebits
& 3) << 5;
12349 inst
.instruction
|= (abcdebits
>> 2) << 21;
12353 case NS_RRD
: /* case 7 (fmrrd). */
12354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12357 inst
.instruction
= 0xc500b10;
12358 do_vfp_cond_or_thumb ();
12359 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12360 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12361 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12362 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12365 case NS_FF
: /* case 8 (fcpys). */
12366 do_vfp_nsyn_opcode ("fcpys");
12369 case NS_FI
: /* case 10 (fconsts). */
12370 ldconst
= "fconsts";
12372 if (is_quarter_float (inst
.operands
[1].imm
))
12374 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
12375 do_vfp_nsyn_opcode (ldconst
);
12378 first_error (_("immediate out of range"));
12381 case NS_RF
: /* case 12 (fmrs). */
12382 do_vfp_nsyn_opcode ("fmrs");
12385 case NS_FR
: /* case 13 (fmsr). */
12386 do_vfp_nsyn_opcode ("fmsr");
12389 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12390 (one of which is a list), but we have parsed four. Do some fiddling to
12391 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12393 case NS_RRFF
: /* case 14 (fmrrs). */
12394 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
12395 _("VFP registers must be adjacent"));
12396 inst
.operands
[2].imm
= 2;
12397 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
12398 do_vfp_nsyn_opcode ("fmrrs");
12401 case NS_FFRR
: /* case 15 (fmsrr). */
12402 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
12403 _("VFP registers must be adjacent"));
12404 inst
.operands
[1] = inst
.operands
[2];
12405 inst
.operands
[2] = inst
.operands
[3];
12406 inst
.operands
[0].imm
= 2;
12407 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
12408 do_vfp_nsyn_opcode ("fmsrr");
12417 do_neon_rshift_round_imm (void)
12419 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12420 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12421 int imm
= inst
.operands
[2].imm
;
12423 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
12426 inst
.operands
[2].present
= 0;
12431 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12432 _("immediate out of range for shift"));
12433 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12438 do_neon_movl (void)
12440 struct neon_type_el et
= neon_check_type (2, NS_QD
,
12441 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12442 unsigned sizebits
= et
.size
>> 3;
12443 inst
.instruction
|= sizebits
<< 19;
12444 neon_two_same (0, et
.type
== NT_unsigned
, -1);
12450 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12451 struct neon_type_el et
= neon_check_type (2, rs
,
12452 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12453 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12454 neon_two_same (neon_quad (rs
), 1, et
.size
);
12458 do_neon_zip_uzp (void)
12460 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12461 struct neon_type_el et
= neon_check_type (2, rs
,
12462 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12463 if (rs
== NS_DD
&& et
.size
== 32)
12465 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
12466 inst
.instruction
= N_MNEM_vtrn
;
12470 neon_two_same (neon_quad (rs
), 1, et
.size
);
12474 do_neon_sat_abs_neg (void)
12476 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12477 struct neon_type_el et
= neon_check_type (2, rs
,
12478 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
12479 neon_two_same (neon_quad (rs
), 1, et
.size
);
12483 do_neon_pair_long (void)
12485 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12486 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
12487 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
12488 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
12489 neon_two_same (neon_quad (rs
), 1, et
.size
);
12493 do_neon_recip_est (void)
12495 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12496 struct neon_type_el et
= neon_check_type (2, rs
,
12497 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
12498 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12499 neon_two_same (neon_quad (rs
), 1, et
.size
);
12505 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12506 struct neon_type_el et
= neon_check_type (2, rs
,
12507 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
12508 neon_two_same (neon_quad (rs
), 1, et
.size
);
12514 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12515 struct neon_type_el et
= neon_check_type (2, rs
,
12516 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
12517 neon_two_same (neon_quad (rs
), 1, et
.size
);
12523 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12524 struct neon_type_el et
= neon_check_type (2, rs
,
12525 N_EQK
| N_INT
, N_8
| N_KEY
);
12526 neon_two_same (neon_quad (rs
), 1, et
.size
);
12532 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12533 neon_two_same (neon_quad (rs
), 1, -1);
12537 do_neon_tbl_tbx (void)
12539 unsigned listlenbits
;
12540 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
12542 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
12544 first_error (_("bad list length for table lookup"));
12548 listlenbits
= inst
.operands
[1].imm
- 1;
12549 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12550 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12551 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12552 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12553 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12554 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12555 inst
.instruction
|= listlenbits
<< 8;
12557 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12561 do_neon_ldm_stm (void)
12563 /* P, U and L bits are part of bitmask. */
12564 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
12565 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
12567 if (inst
.operands
[1].issingle
)
12569 do_vfp_nsyn_ldm_stm (is_dbmode
);
12573 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
12574 _("writeback (!) must be used for VLDMDB and VSTMDB"));
12576 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
12577 _("register list must contain at least 1 and at most 16 "
12580 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
12581 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
12582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
12585 inst
.instruction
|= offsetbits
;
12587 do_vfp_cond_or_thumb ();
12591 do_neon_ldr_str (void)
12593 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
12595 if (inst
.operands
[0].issingle
)
12598 do_vfp_nsyn_opcode ("flds");
12600 do_vfp_nsyn_opcode ("fsts");
12605 do_vfp_nsyn_opcode ("fldd");
12607 do_vfp_nsyn_opcode ("fstd");
12611 /* "interleave" version also handles non-interleaving register VLD1/VST1
12615 do_neon_ld_st_interleave (void)
12617 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
12618 N_8
| N_16
| N_32
| N_64
);
12619 unsigned alignbits
= 0;
12621 /* The bits in this table go:
12622 0: register stride of one (0) or two (1)
12623 1,2: register list length, minus one (1, 2, 3, 4).
12624 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
12625 We use -1 for invalid entries. */
12626 const int typetable
[] =
12628 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
12629 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
12630 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
12631 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
12635 if (et
.type
== NT_invtype
)
12638 if (inst
.operands
[1].immisalign
)
12639 switch (inst
.operands
[1].imm
>> 8)
12641 case 64: alignbits
= 1; break;
12643 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
12644 goto bad_alignment
;
12648 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
12649 goto bad_alignment
;
12654 first_error (_("bad alignment"));
12658 inst
.instruction
|= alignbits
<< 4;
12659 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12661 /* Bits [4:6] of the immediate in a list specifier encode register stride
12662 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
12663 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
12664 up the right value for "type" in a table based on this value and the given
12665 list style, then stick it back. */
12666 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
12667 | (((inst
.instruction
>> 8) & 3) << 3);
12669 typebits
= typetable
[idx
];
12671 constraint (typebits
== -1, _("bad list type for instruction"));
12673 inst
.instruction
&= ~0xf00;
12674 inst
.instruction
|= typebits
<< 8;
12677 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
12678 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
12679 otherwise. The variable arguments are a list of pairs of legal (size, align)
12680 values, terminated with -1. */
12683 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
12686 int result
= FAIL
, thissize
, thisalign
;
12688 if (!inst
.operands
[1].immisalign
)
12694 va_start (ap
, do_align
);
12698 thissize
= va_arg (ap
, int);
12699 if (thissize
== -1)
12701 thisalign
= va_arg (ap
, int);
12703 if (size
== thissize
&& align
== thisalign
)
12706 while (result
!= SUCCESS
);
12710 if (result
== SUCCESS
)
12713 first_error (_("unsupported alignment for instruction"));
12719 do_neon_ld_st_lane (void)
12721 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
12722 int align_good
, do_align
= 0;
12723 int logsize
= neon_logbits (et
.size
);
12724 int align
= inst
.operands
[1].imm
>> 8;
12725 int n
= (inst
.instruction
>> 8) & 3;
12726 int max_el
= 64 / et
.size
;
12728 if (et
.type
== NT_invtype
)
12731 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
12732 _("bad list length"));
12733 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
12734 _("scalar index out of range"));
12735 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
12737 _("stride of 2 unavailable when element size is 8"));
12741 case 0: /* VLD1 / VST1. */
12742 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
12744 if (align_good
== FAIL
)
12748 unsigned alignbits
= 0;
12751 case 16: alignbits
= 0x1; break;
12752 case 32: alignbits
= 0x3; break;
12755 inst
.instruction
|= alignbits
<< 4;
12759 case 1: /* VLD2 / VST2. */
12760 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
12762 if (align_good
== FAIL
)
12765 inst
.instruction
|= 1 << 4;
12768 case 2: /* VLD3 / VST3. */
12769 constraint (inst
.operands
[1].immisalign
,
12770 _("can't use alignment with this instruction"));
12773 case 3: /* VLD4 / VST4. */
12774 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
12775 16, 64, 32, 64, 32, 128, -1);
12776 if (align_good
== FAIL
)
12780 unsigned alignbits
= 0;
12783 case 8: alignbits
= 0x1; break;
12784 case 16: alignbits
= 0x1; break;
12785 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
12788 inst
.instruction
|= alignbits
<< 4;
12795 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
12796 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12797 inst
.instruction
|= 1 << (4 + logsize
);
12799 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
12800 inst
.instruction
|= logsize
<< 10;
12803 /* Encode single n-element structure to all lanes VLD<n> instructions. */
12806 do_neon_ld_dup (void)
12808 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
12809 int align_good
, do_align
= 0;
12811 if (et
.type
== NT_invtype
)
12814 switch ((inst
.instruction
>> 8) & 3)
12816 case 0: /* VLD1. */
12817 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
12818 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
12819 &do_align
, 16, 16, 32, 32, -1);
12820 if (align_good
== FAIL
)
12822 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
12825 case 2: inst
.instruction
|= 1 << 5; break;
12826 default: first_error (_("bad list length")); return;
12828 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12831 case 1: /* VLD2. */
12832 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
12833 &do_align
, 8, 16, 16, 32, 32, 64, -1);
12834 if (align_good
== FAIL
)
12836 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
12837 _("bad list length"));
12838 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12839 inst
.instruction
|= 1 << 5;
12840 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12843 case 2: /* VLD3. */
12844 constraint (inst
.operands
[1].immisalign
,
12845 _("can't use alignment with this instruction"));
12846 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
12847 _("bad list length"));
12848 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12849 inst
.instruction
|= 1 << 5;
12850 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12853 case 3: /* VLD4. */
12855 int align
= inst
.operands
[1].imm
>> 8;
12856 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
12857 16, 64, 32, 64, 32, 128, -1);
12858 if (align_good
== FAIL
)
12860 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
12861 _("bad list length"));
12862 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
12863 inst
.instruction
|= 1 << 5;
12864 if (et
.size
== 32 && align
== 128)
12865 inst
.instruction
|= 0x3 << 6;
12867 inst
.instruction
|= neon_logbits (et
.size
) << 6;
12874 inst
.instruction
|= do_align
<< 4;
12877 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12878 apart from bits [11:4]. */
12881 do_neon_ldx_stx (void)
12883 switch (NEON_LANE (inst
.operands
[0].imm
))
12885 case NEON_INTERLEAVE_LANES
:
12886 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
12887 do_neon_ld_st_interleave ();
12890 case NEON_ALL_LANES
:
12891 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
12896 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
12897 do_neon_ld_st_lane ();
12900 /* L bit comes from bit mask. */
12901 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12902 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12903 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12905 if (inst
.operands
[1].postind
)
12907 int postreg
= inst
.operands
[1].imm
& 0xf;
12908 constraint (!inst
.operands
[1].immisreg
,
12909 _("post-index must be a register"));
12910 constraint (postreg
== 0xd || postreg
== 0xf,
12911 _("bad register for post-index"));
12912 inst
.instruction
|= postreg
;
12914 else if (inst
.operands
[1].writeback
)
12916 inst
.instruction
|= 0xd;
12919 inst
.instruction
|= 0xf;
12922 inst
.instruction
|= 0xf9000000;
12924 inst
.instruction
|= 0xf4000000;
12928 /* Overall per-instruction processing. */
12930 /* We need to be able to fix up arbitrary expressions in some statements.
12931 This is so that we can handle symbols that are an arbitrary distance from
12932 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12933 which returns part of an address in a form which will be valid for
12934 a data instruction. We do this by pushing the expression into a symbol
12935 in the expr_section, and creating a fix for that. */
12938 fix_new_arm (fragS
* frag
,
12943 int pcrel_reloc
, /* HACK_GUESS */
12958 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, pcrel_reloc
, reloc
);
12961 new_fix
= fix_new (frag
,
12965 exp
->X_subtract_symbol
,
12971 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
12973 #endif /* NOTYET */
12977 /* Mark whether the fix is to a THUMB instruction, or an ARM
12979 new_fix
->tc_fix_data
= &thumb_mode
;
12982 /* Create a frg for an instruction requiring relaxation. */
12984 output_relax_insn (void)
12990 /* The size of the instruction is unknown, so tie the debug info to the
12991 start of the instruction. */
12992 dwarf2_emit_insn (0);
12994 switch (inst
.reloc
.exp
.X_op
)
12997 sym
= inst
.reloc
.exp
.X_add_symbol
;
12998 offset
= inst
.reloc
.exp
.X_add_number
;
13002 offset
= inst
.reloc
.exp
.X_add_number
;
13005 /* Avoid make_expr_symbol() if their is no subtract symbol and the
13006 symbol is just an undefined symbol or absolute, if so use that in the
13008 if (inst
.reloc
.exp
.X_subtract_symbol
== NULL
&&
13009 inst
.reloc
.exp
.X_add_symbol
!= NULL
&&
13010 ((inst
.reloc
.exp
.X_add_symbol
->sy_nlist
.n_type
& N_TYPE
) == N_UNDF
||
13011 (inst
.reloc
.exp
.X_add_symbol
->sy_nlist
.n_type
& N_TYPE
) == N_ABS
) )
13013 sym
= inst
.reloc
.exp
.X_add_symbol
;
13014 offset
= inst
.reloc
.exp
.X_add_number
;
13018 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13023 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13024 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13025 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13028 /* Write a 32-bit thumb instruction to buf. */
13030 put_thumb32_insn (char * buf
, uint32_t insn
)
13032 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13033 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13036 output_inst (const char * str
)
13042 as_bad ("%s -- `%s'", inst
.error
, str
);
13046 output_relax_insn();
13049 if (inst
.size
== 0)
13052 to
= frag_more (inst
.size
);
13054 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13056 assert (inst
.size
== (2 * THUMB_SIZE
));
13057 put_thumb32_insn (to
, inst
.instruction
);
13059 else if (inst
.size
> INSN_SIZE
)
13061 assert (inst
.size
== (2 * INSN_SIZE
));
13062 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13063 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13066 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13068 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13069 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13070 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13071 /* HACK_GUESS */ inst
.reloc
.pcrel_reloc
,
13074 dwarf2_emit_insn (inst
.size
);
13077 /* Tag values used in struct asm_opcode's tag field. */
13080 OT_unconditional
, /* Instruction cannot be conditionalized.
13081 The ARM condition field is still 0xE. */
13082 OT_unconditionalF
, /* Instruction cannot be conditionalized
13083 and carries 0xF in its ARM condition field. */
13084 OT_csuffix
, /* Instruction takes a conditional suffix. */
13085 OT_csuffixF
, /* Some forms of the instruction take a conditional
13086 suffix, others place 0xF where the condition field
13088 OT_cinfix3
, /* Instruction takes a conditional infix,
13089 beginning at character index 3. (In
13090 unified mode, it becomes a suffix.) */
13091 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13092 tsts, cmps, cmns, and teqs. */
13093 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13094 character index 3, even in unified mode. Used for
13095 legacy instructions where suffix and infix forms
13096 may be ambiguous. */
13097 OT_csuf_or_in3
, /* Instruction takes either a conditional
13098 suffix or an infix at character index 3. */
13099 OT_odd_infix_unc
, /* This is the unconditional variant of an
13100 instruction that takes a conditional infix
13101 at an unusual position. In unified mode,
13102 this variant will accept a suffix. */
13103 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13104 are the conditional variants of instructions that
13105 take conditional infixes in unusual positions.
13106 The infix appears at character index
13107 (tag - OT_odd_infix_0). These are not accepted
13108 in unified mode. */
13111 /* Subroutine of md_assemble, responsible for looking up the primary
13112 opcode from the mnemonic the user wrote. STR points to the
13113 beginning of the mnemonic.
13115 This is not simply a hash table lookup, because of conditional
13116 variants. Most instructions have conditional variants, which are
13117 expressed with a _conditional affix_ to the mnemonic. If we were
13118 to encode each conditional variant as a literal string in the opcode
13119 table, it would have approximately 20,000 entries.
13121 Most mnemonics take this affix as a suffix, and in unified syntax,
13122 'most' is upgraded to 'all'. However, in the divided syntax, some
13123 instructions take the affix as an infix, notably the s-variants of
13124 the arithmetic instructions. Of those instructions, all but six
13125 have the infix appear after the third character of the mnemonic.
13127 Accordingly, the algorithm for looking up primary opcodes given
13130 1. Look up the identifier in the opcode table.
13131 If we find a match, go to step U.
13133 2. Look up the last two characters of the identifier in the
13134 conditions table. If we find a match, look up the first N-2
13135 characters of the identifier in the opcode table. If we
13136 find a match, go to step CE.
13138 3. Look up the fourth and fifth characters of the identifier in
13139 the conditions table. If we find a match, extract those
13140 characters from the identifier, and look up the remaining
13141 characters in the opcode table. If we find a match, go
13146 U. Examine the tag field of the opcode structure, in case this is
13147 one of the six instructions with its conditional infix in an
13148 unusual place. If it is, the tag tells us where to find the
13149 infix; look it up in the conditions table and set inst.cond
13150 accordingly. Otherwise, this is an unconditional instruction.
13151 Again set inst.cond accordingly. Return the opcode structure.
13153 CE. Examine the tag field to make sure this is an instruction that
13154 should receive a conditional suffix. If it is not, fail.
13155 Otherwise, set inst.cond from the suffix we already looked up,
13156 and return the opcode structure.
13158 CM. Examine the tag field to make sure this is an instruction that
13159 should receive a conditional infix after the third character.
13160 If it is not, fail. Otherwise, undo the edits to the current
13161 line of input and proceed as for case CE. */
13163 static const struct asm_opcode
*
13164 opcode_lookup (char **str
)
13168 const struct asm_opcode
*opcode
;
13169 const struct asm_cond
*cond
;
13171 bfd_boolean neon_supported
;
13173 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
13175 /* Scan up to the end of the mnemonic, which must end in white space,
13176 '.' (in unified mode, or for Neon instructions), or end of string. */
13177 for (base
= end
= *str
; *end
!= '\0'; end
++)
13178 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
13184 /* Handle a possible width suffix and/or Neon type suffix. */
13189 /* The .w and .n suffixes are only valid if the unified syntax is in
13191 if (unified_syntax
&& end
[1] == 'w')
13193 else if (unified_syntax
&& end
[1] == 'n')
13198 inst
.vectype
.elems
= 0;
13200 *str
= end
+ offset
;
13202 if (end
[offset
] == '.')
13204 /* See if we have a Neon type suffix (possible in either unified or
13205 non-unified ARM syntax mode). */
13206 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
13209 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
13215 /* Look for unaffixed or special-case affixed mnemonic. */
13216 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
13220 if (opcode
->tag
< OT_odd_infix_0
)
13222 inst
.cond
= COND_ALWAYS
;
13226 if (unified_syntax
)
13227 as_warn (_("conditional infixes are deprecated in unified syntax"));
13228 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
13229 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13232 inst
.cond
= cond
->value
;
13236 /* Cannot have a conditional suffix on a mnemonic of less than two
13238 if (end
- base
< 3)
13241 /* Look for suffixed mnemonic. */
13243 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13244 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
13245 if (opcode
&& cond
)
13248 switch (opcode
->tag
)
13250 case OT_cinfix3_legacy
:
13251 /* Ignore conditional suffixes matched on infix only mnemonics. */
13255 case OT_cinfix3_deprecated
:
13256 case OT_odd_infix_unc
:
13257 if (!unified_syntax
)
13259 /* else fall through */
13263 case OT_csuf_or_in3
:
13264 inst
.cond
= cond
->value
;
13267 case OT_unconditional
:
13268 case OT_unconditionalF
:
13271 inst
.cond
= cond
->value
;
13275 /* delayed diagnostic */
13276 inst
.error
= BAD_COND
;
13277 inst
.cond
= COND_ALWAYS
;
13286 /* Cannot have a usual-position infix on a mnemonic of less than
13287 six characters (five would be a suffix). */
13288 if (end
- base
< 6)
13291 /* Look for infixed mnemonic in the usual position. */
13293 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13297 memcpy (save
, affix
, 2);
13298 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
13299 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
13300 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
13301 memcpy (affix
, save
, 2);
13304 && (opcode
->tag
== OT_cinfix3
13305 || opcode
->tag
== OT_cinfix3_deprecated
13306 || opcode
->tag
== OT_csuf_or_in3
13307 || opcode
->tag
== OT_cinfix3_legacy
))
13311 && (opcode
->tag
== OT_cinfix3
13312 || opcode
->tag
== OT_cinfix3_deprecated
))
13313 as_warn (_("conditional infixes are deprecated in unified syntax"));
13315 inst
.cond
= cond
->value
;
13323 md_assemble (char *str
)
13326 const struct asm_opcode
* opcode
;
13329 /* Align the previous label if needed. */
13330 if (last_label_seen
!= NULL
)
13332 symbol_set_frag (last_label_seen
, frag_now
);
13333 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
13334 S_SET_SEGMENT (last_label_seen
, now_seg
);
13336 #endif /* NOTYET */
13338 memset (&inst
, '\0', sizeof (inst
));
13339 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
13341 opcode
= opcode_lookup (&p
);
13344 /* It wasn't an instruction, but it might be a register alias of
13345 the form alias .req reg, or a Neon .dn/.qn directive. */
13346 if (!create_register_alias (str
, p
)
13347 && !create_neon_reg_alias (str
, p
))
13348 as_bad (_("bad instruction `%s'"), str
);
13353 if (opcode
->tag
== OT_cinfix3_deprecated
)
13354 as_warn (_("s suffix on comparison instruction is deprecated"));
13356 /* The value which unconditional instructions should have in place of the
13357 condition field. */
13358 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
13362 arm_feature_set variant
;
13364 variant
= cpu_variant
;
13365 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13366 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
13367 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
13368 /* Check that this instruction is supported for this CPU. */
13369 if (!opcode
->tvariant
13370 || (thumb_mode
== 1
13371 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)
13372 && !force_cpusubtype_ALL
))
13374 as_bad (_("selected processor does not support `%s'"), str
);
13377 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
13378 && opcode
->tencode
!= do_t_branch
)
13380 as_bad (_("Thumb does not support conditional execution"));
13384 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
) && !inst
.size_req
)
13386 /* Implicit require narrow instructions on Thumb-1. This avoids
13387 relaxation accidentally introducing Thumb-2 instructions. */
13388 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
)
13392 /* Check conditional suffixes. */
13393 if (current_it_mask
)
13396 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
13397 current_it_mask
<<= 1;
13398 current_it_mask
&= 0x1f;
13399 /* The BKPT instruction is unconditional even in an IT block. */
13401 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
13403 as_bad (_("incorrect condition in IT block"));
13407 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
13409 as_bad (_("thumb conditional instruction not in IT block"));
13413 mapping_state (MAP_THUMB
);
13414 inst
.instruction
= opcode
->tvalue
;
13416 if (!parse_operands (p
, opcode
->operands
))
13417 opcode
->tencode ();
13419 /* Clear current_it_mask at the end of an IT block. */
13420 if (current_it_mask
== 0x10)
13421 current_it_mask
= 0;
13423 if (!(inst
.error
|| inst
.relax
))
13425 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
13426 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
13427 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
13429 as_bad (_("cannot honor width suffix -- `%s'"), str
);
13434 /* Something has gone badly wrong if we try to relax a fixed size
13436 assert (inst
.size_req
== 0 || !inst
.relax
);
13438 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13439 *opcode
->tvariant
);
13440 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13441 set those bits when Thumb-2 32-bit instructions are seen. ie.
13442 anything other than bl/blx.
13443 This is overly pessimistic for relaxable instructions. */
13444 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
13446 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13449 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
13451 /* Check that this instruction is supported for this CPU. */
13452 if (!opcode
->avariant
||
13453 (!ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
) &&
13454 !force_cpusubtype_ALL
))
13456 as_bad (_("selected processor does not support `%s'"), str
);
13459 /* Allow width suffixes with unified_syntax */
13460 if (inst
.size_req
&& !unified_syntax
)
13462 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
13466 mapping_state (MAP_ARM
);
13467 inst
.instruction
= opcode
->avalue
;
13468 if (opcode
->tag
== OT_unconditionalF
)
13469 inst
.instruction
|= 0xF << 28;
13471 inst
.instruction
|= inst
.cond
<< 28;
13472 inst
.size
= INSN_SIZE
;
13473 if (!parse_operands (p
, opcode
->operands
))
13474 opcode
->aencode ();
13475 /* Arm mode bx is marked as both v4T and v5 because it's still required
13476 on a hypothetical non-thumb v5 core. */
13477 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
13478 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
13479 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
13481 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
13482 *opcode
->avariant
);
13486 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
13492 /* FROM Mac OS X port */
13494 * If the -g flag is present generate a line number stab for the
13497 * See the detailed comments about stabs in read_a_source_file() for a
13498 * description of what is going on here.
13500 if (flagseen
['g'] && frchain_now
->frch_nsect
== text_nsect
)
13506 logical_input_line
/* n_desc, line number */,
13507 obstack_next_free(&frags
) - frag_now
->fr_literal
,
13512 * We are putting a machine instruction in this section so mark it as
13513 * containg some machine instructions.
13515 frchain_now
->frch_section
.flags
|= S_ATTR_SOME_INSTRUCTIONS
;
13518 /* Various frobbings of labels and their addresses. */
13521 arm_start_line_hook (void)
13523 last_label_seen
= NULL
;
13527 arm_frob_label (symbolS
* sym
)
13530 last_label_seen
= sym
;
13532 ARM_SET_THUMB (sym
, thumb_mode
);
13534 #if defined OBJ_COFF || defined OBJ_ELF
13535 ARM_SET_INTERWORK (sym
, support_interwork
);
13537 #endif /* NOTYET */
13539 /* Note - do not allow local symbols (.Lxxx) to be labeled
13540 as Thumb functions. This is because these labels, whilst
13541 they exist inside Thumb code, are not the entry points for
13542 possible ARM->Thumb calls. Also, these labels can be used
13543 as part of a computed goto or switch statement. eg gcc
13544 can generate code that looks like this:
13546 ldr r2, [pc, .Laaa]
13556 The first instruction loads the address of the jump table.
13557 The second instruction converts a table index into a byte offset.
13558 The third instruction gets the jump address out of the table.
13559 The fourth instruction performs the jump.
13561 If the address stored at .Laaa is that of a symbol which has the
13562 Thumb_Func bit set, then the linker will arrange for this address
13563 to have the bottom bit set, which in turn would mean that the
13564 address computation performed by the third instruction would end
13565 up with the bottom bit set. Since the ARM is capable of unaligned
13566 word loads, the instruction would then load the incorrect address
13567 out of the jump table, and chaos would ensue. */
13568 if (label_is_thumb_function_name
13569 && (S_GET_NAME (sym
)[0] != 'L')
13571 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
13573 && (frchain_now
->frch_nsect
== text_nsect
))
13576 /* When the address of a Thumb function is taken the bottom
13577 bit of that address should be set. This will allow
13578 interworking between Arm and Thumb functions to work
13581 THUMB_SET_FUNC (sym
, 1);
13582 sym
->sy_desc
|= N_ARM_THUMB_DEF
;
13584 label_is_thumb_function_name
= FALSE
;
13587 dwarf2_emit_label (sym
);
13591 arm_data_in_code (void)
13593 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
13595 *input_line_pointer
= '/';
13596 input_line_pointer
+= 5;
13597 *input_line_pointer
= 0;
13605 arm_canonicalize_symbol_name (char * name
)
13609 if (thumb_mode
&& (len
= strlen (name
)) > 5
13610 && streq (name
+ len
- 5, "/data"))
13611 *(name
+ len
- 5) = 0;
13616 /* Table of all register names defined by default. The user can
13617 define additional names with .req. Note that all register names
13618 should appear in both upper and lowercase variants. Some registers
13619 also have mixed-case names. */
13621 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
13622 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
13623 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
13624 #define REGSET(p,t) \
13625 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
13626 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
13627 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
13628 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
13629 #define REGSETH(p,t) \
13630 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
13631 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
13632 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
13633 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
13634 #define REGSET2(p,t) \
13635 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
13636 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
13637 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
13638 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
13640 static const struct reg_entry reg_names
[] =
13642 /* ARM integer registers. */
13643 REGSET(r
, RN
), REGSET(R
, RN
),
13645 /* ATPCS synonyms. */
13646 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
13647 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
13648 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
13650 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
13651 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
13652 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
13654 /* Well-known aliases. */
13655 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
13656 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
13658 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
13659 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
13661 /* Coprocessor numbers. */
13662 REGSET(p
, CP
), REGSET(P
, CP
),
13664 /* Coprocessor register numbers. The "cr" variants are for backward
13666 REGSET(c
, CN
), REGSET(C
, CN
),
13667 REGSET(cr
, CN
), REGSET(CR
, CN
),
13669 /* FPA registers. */
13670 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
13671 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
13673 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
13674 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
13676 /* VFP SP registers. */
13677 REGSET(s
,VFS
), REGSET(S
,VFS
),
13678 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
13680 /* VFP DP Registers. */
13681 REGSET(d
,VFD
), REGSET(D
,VFD
),
13682 /* Extra Neon DP registers. */
13683 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
13685 /* Neon QP registers. */
13686 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
13688 /* VFP control registers. */
13689 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
13690 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
13692 /* Maverick DSP coprocessor registers. */
13693 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
13694 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
13696 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
13697 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
13698 REGDEF(dspsc
,0,DSPSC
),
13700 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
13701 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
13702 REGDEF(DSPSC
,0,DSPSC
),
13704 /* iWMMXt data registers - p0, c0-15. */
13705 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
13707 /* iWMMXt control registers - p1, c0-3. */
13708 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
13709 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
13710 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
13711 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
13713 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
13714 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
13715 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
13716 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
13717 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
13719 /* XScale accumulator registers. */
13720 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
13726 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
13727 within psr_required_here. */
13728 static const struct asm_psr psrs
[] =
13730 /* Backward compatibility notation. Note that "all" is no longer
13731 truly all possible PSR bits. */
13732 {"all", PSR_c
| PSR_f
},
13736 /* Individual flags. */
13741 /* Combinations of flags. */
13742 {"fs", PSR_f
| PSR_s
},
13743 {"fx", PSR_f
| PSR_x
},
13744 {"fc", PSR_f
| PSR_c
},
13745 {"sf", PSR_s
| PSR_f
},
13746 {"sx", PSR_s
| PSR_x
},
13747 {"sc", PSR_s
| PSR_c
},
13748 {"xf", PSR_x
| PSR_f
},
13749 {"xs", PSR_x
| PSR_s
},
13750 {"xc", PSR_x
| PSR_c
},
13751 {"cf", PSR_c
| PSR_f
},
13752 {"cs", PSR_c
| PSR_s
},
13753 {"cx", PSR_c
| PSR_x
},
13754 {"fsx", PSR_f
| PSR_s
| PSR_x
},
13755 {"fsc", PSR_f
| PSR_s
| PSR_c
},
13756 {"fxs", PSR_f
| PSR_x
| PSR_s
},
13757 {"fxc", PSR_f
| PSR_x
| PSR_c
},
13758 {"fcs", PSR_f
| PSR_c
| PSR_s
},
13759 {"fcx", PSR_f
| PSR_c
| PSR_x
},
13760 {"sfx", PSR_s
| PSR_f
| PSR_x
},
13761 {"sfc", PSR_s
| PSR_f
| PSR_c
},
13762 {"sxf", PSR_s
| PSR_x
| PSR_f
},
13763 {"sxc", PSR_s
| PSR_x
| PSR_c
},
13764 {"scf", PSR_s
| PSR_c
| PSR_f
},
13765 {"scx", PSR_s
| PSR_c
| PSR_x
},
13766 {"xfs", PSR_x
| PSR_f
| PSR_s
},
13767 {"xfc", PSR_x
| PSR_f
| PSR_c
},
13768 {"xsf", PSR_x
| PSR_s
| PSR_f
},
13769 {"xsc", PSR_x
| PSR_s
| PSR_c
},
13770 {"xcf", PSR_x
| PSR_c
| PSR_f
},
13771 {"xcs", PSR_x
| PSR_c
| PSR_s
},
13772 {"cfs", PSR_c
| PSR_f
| PSR_s
},
13773 {"cfx", PSR_c
| PSR_f
| PSR_x
},
13774 {"csf", PSR_c
| PSR_s
| PSR_f
},
13775 {"csx", PSR_c
| PSR_s
| PSR_x
},
13776 {"cxf", PSR_c
| PSR_x
| PSR_f
},
13777 {"cxs", PSR_c
| PSR_x
| PSR_s
},
13778 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
13779 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
13780 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
13781 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
13782 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
13783 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
13784 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
13785 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
13786 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
13787 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
13788 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
13789 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
13790 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
13791 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
13792 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
13793 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
13794 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
13795 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
13796 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
13797 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
13798 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
13799 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
13800 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
13801 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
13804 /* Table of V7M psr names. */
13805 static const struct asm_psr v7m_psrs
[] =
13807 {"apsr", 0 }, {"APSR", 0 },
13808 {"iapsr", 1 }, {"IAPSR", 1 },
13809 {"eapsr", 2 }, {"EAPSR", 2 },
13810 {"psr", 3 }, {"PSR", 3 },
13811 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
13812 {"ipsr", 5 }, {"IPSR", 5 },
13813 {"epsr", 6 }, {"EPSR", 6 },
13814 {"iepsr", 7 }, {"IEPSR", 7 },
13815 {"msp", 8 }, {"MSP", 8 },
13816 {"psp", 9 }, {"PSP", 9 },
13817 {"primask", 16}, {"PRIMASK", 16},
13818 {"basepri", 17}, {"BASEPRI", 17},
13819 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
13820 {"faultmask", 19}, {"FAULTMASK", 19},
13821 {"control", 20}, {"CONTROL", 20}
13824 /* Table of all shift-in-operand names. */
13825 static const struct asm_shift_name shift_names
[] =
13827 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
13828 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
13829 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
13830 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
13831 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
13832 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
13835 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
13836 static const struct asm_cond conds
[] =
13840 {"cs", 0x2}, {"hs", 0x2},
13841 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
13855 static struct asm_barrier_opt barrier_opt_names
[] =
13869 /* Table of ARM-format instructions. */
13871 /* Macros for gluing together operand strings. N.B. In all cases
13872 other than OPS0, the trailing OP_stop comes from default
13873 zero-initialization of the unspecified elements of the array. */
13874 #define OPS0() { OP_stop, }
13875 #define OPS1(a) { OP_##a, }
13876 #define OPS2(a,b) { OP_##a,OP_##b, }
13877 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
13878 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
13879 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
13880 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
13882 /* These macros abstract out the exact format of the mnemonic table and
13883 save some repeated characters. */
13885 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
13886 #define TxCE(mnem, op, top, nops, ops, ae, te) \
13887 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
13888 THUMB_VARIANT, do_##ae, do_##te }
13890 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
13891 a T_MNEM_xyz enumerator. */
13892 #define TCE(mnem, aop, top, nops, ops, ae, te) \
13893 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
13894 #define tCE(mnem, aop, top, nops, ops, ae, te) \
13895 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13897 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
13898 infix after the third character. */
13899 #define TxC3(mnem, op, top, nops, ops, ae, te) \
13900 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
13901 THUMB_VARIANT, do_##ae, do_##te }
13902 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
13903 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
13904 THUMB_VARIANT, do_##ae, do_##te }
13905 #define TC3(mnem, aop, top, nops, ops, ae, te) \
13906 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
13907 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
13908 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
13909 #define tC3(mnem, aop, top, nops, ops, ae, te) \
13910 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13911 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
13912 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13914 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
13915 appear in the condition table. */
13916 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
13917 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13918 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
13920 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
13921 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
13922 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
13923 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
13924 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
13925 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
13926 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
13927 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
13928 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
13929 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
13930 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
13931 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13932 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13933 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13934 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13935 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13936 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13937 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13938 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13939 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13941 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13942 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13943 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13944 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13946 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13947 field is still 0xE. Many of the Thumb variants can be executed
13948 conditionally, so this is checked separately. */
13949 #define TUE(mnem, op, top, nops, ops, ae, te) \
13950 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13951 THUMB_VARIANT, do_##ae, do_##te }
13953 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13954 condition code field. */
13955 #define TUF(mnem, op, top, nops, ops, ae, te) \
13956 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13957 THUMB_VARIANT, do_##ae, do_##te }
13959 /* ARM-only variants of all the above. */
13960 #define CE(mnem, op, nops, ops, ae) \
13961 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13963 #define C3(mnem, op, nops, ops, ae) \
13964 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13966 /* Legacy mnemonics that always have conditional infix after the third
13968 #define CL(mnem, op, nops, ops, ae) \
13969 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13970 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13972 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13973 #define cCE(mnem, op, nops, ops, ae) \
13974 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13976 /* Legacy coprocessor instructions where conditional infix and conditional
13977 suffix are ambiguous. For consistency this includes all FPA instructions,
13978 not just the potentially ambiguous ones. */
13979 #define cCL(mnem, op, nops, ops, ae) \
13980 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13981 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13983 /* Coprocessor, takes either a suffix or a position-3 infix
13984 (for an FPA corner case). */
13985 #define C3E(mnem, op, nops, ops, ae) \
13986 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
13987 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13989 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
13990 { #m1 #m2 #m3, OPS##nops ops, \
13991 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13992 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13994 #define CM(m1, m2, op, nops, ops, ae) \
13995 xCM_(m1, , m2, op, nops, ops, ae), \
13996 xCM_(m1, eq, m2, op, nops, ops, ae), \
13997 xCM_(m1, ne, m2, op, nops, ops, ae), \
13998 xCM_(m1, cs, m2, op, nops, ops, ae), \
13999 xCM_(m1, hs, m2, op, nops, ops, ae), \
14000 xCM_(m1, cc, m2, op, nops, ops, ae), \
14001 xCM_(m1, ul, m2, op, nops, ops, ae), \
14002 xCM_(m1, lo, m2, op, nops, ops, ae), \
14003 xCM_(m1, mi, m2, op, nops, ops, ae), \
14004 xCM_(m1, pl, m2, op, nops, ops, ae), \
14005 xCM_(m1, vs, m2, op, nops, ops, ae), \
14006 xCM_(m1, vc, m2, op, nops, ops, ae), \
14007 xCM_(m1, hi, m2, op, nops, ops, ae), \
14008 xCM_(m1, ls, m2, op, nops, ops, ae), \
14009 xCM_(m1, ge, m2, op, nops, ops, ae), \
14010 xCM_(m1, lt, m2, op, nops, ops, ae), \
14011 xCM_(m1, gt, m2, op, nops, ops, ae), \
14012 xCM_(m1, le, m2, op, nops, ops, ae), \
14013 xCM_(m1, al, m2, op, nops, ops, ae)
14015 #define UE(mnem, op, nops, ops, ae) \
14016 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14018 #define UF(mnem, op, nops, ops, ae) \
14019 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14021 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14022 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14023 use the same encoding function for each. */
14024 #define NUF(mnem, op, nops, ops, enc) \
14025 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14026 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14028 /* Neon data processing, version which indirects through neon_enc_tab for
14029 the various overloaded versions of opcodes. */
14030 #define nUF(mnem, op, nops, ops, enc) \
14031 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14032 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14034 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14036 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14037 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14038 THUMB_VARIANT, do_##enc, do_##enc }
14040 #define NCE(mnem, op, nops, ops, enc) \
14041 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14043 #define NCEF(mnem, op, nops, ops, enc) \
14044 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14046 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14047 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14048 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14049 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14051 #define nCE(mnem, op, nops, ops, enc) \
14052 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14054 #define nCEF(mnem, op, nops, ops, enc) \
14055 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14059 /* Thumb-only, unconditional. */
14060 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14062 static const struct asm_opcode insns
[] =
14064 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14065 #define THUMB_VARIANT &arm_ext_v4t
14067 TUE(trap
, 7ffdefe
, defe
, 0, (), noargs
, noargs
),
14069 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14070 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14071 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14072 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14073 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14074 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14075 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14076 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14077 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14078 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14079 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14080 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14081 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14082 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14083 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14084 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14086 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14087 for setting PSR flag bits. They are obsolete in V6 and do not
14088 have Thumb equivalents. */
14089 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14090 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14091 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14092 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14093 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14094 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14095 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14096 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14097 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14099 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14100 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14101 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14102 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14104 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14105 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14106 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14107 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14109 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14110 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14111 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14112 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14113 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14114 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14116 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14117 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14118 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14119 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14122 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14123 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14124 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14126 /* Thumb-compatibility pseudo ops. */
14127 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14128 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14129 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14130 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14131 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14132 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14133 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14134 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14135 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14136 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14137 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14138 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14140 /* These may simplify to neg. */
14141 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14142 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14144 TCE(rrx
, 1a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rd_rm
),
14145 TCE(rrxs
, 1b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rd_rm
),
14147 #undef THUMB_VARIANT
14148 #define THUMB_VARIANT &arm_ext_v6
14149 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14151 /* V1 instructions with no Thumb analogue prior to V6T2. */
14152 #undef THUMB_VARIANT
14153 #define THUMB_VARIANT &arm_ext_v6t2
14154 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14155 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14156 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
14158 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14159 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14160 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14161 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14163 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14164 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14166 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14167 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14169 /* V1 instructions with no Thumb analogue at all. */
14170 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
14171 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
14173 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14174 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14175 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14176 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14177 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14178 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14179 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14180 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14183 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14184 #undef THUMB_VARIANT
14185 #define THUMB_VARIANT &arm_ext_v4t
14186 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14187 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14189 #undef THUMB_VARIANT
14190 #define THUMB_VARIANT &arm_ext_v6t2
14191 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14192 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
14194 /* Generic coprocessor instructions. */
14195 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14196 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14197 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14198 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14199 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14200 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14201 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14204 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14205 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14206 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14209 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14210 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
14211 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
14214 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14215 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14216 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14217 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14218 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14219 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14220 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14221 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14222 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14225 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14226 #undef THUMB_VARIANT
14227 #define THUMB_VARIANT &arm_ext_v4t
14228 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14229 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14230 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14231 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14232 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14233 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14236 #define ARM_VARIANT &arm_ext_v4t_5
14237 /* ARM Architecture 4T. */
14238 /* Note: bx (and blx) are required on V5, even if the processor does
14239 not support Thumb. */
14240 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
14243 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14244 #undef THUMB_VARIANT
14245 #define THUMB_VARIANT &arm_ext_v5t
14246 /* Note: blx has 2 variants; the .value coded here is for
14247 BLX(2). Only this variant has conditional execution. */
14248 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
14249 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
14251 #undef THUMB_VARIANT
14252 #define THUMB_VARIANT &arm_ext_v6t2
14253 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
14254 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14255 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14256 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14257 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14258 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14259 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14260 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14263 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14264 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14265 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14266 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14267 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14269 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14270 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14272 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14273 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14274 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14275 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14277 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14278 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14279 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14280 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14282 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14283 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14285 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_rd_rm_rn
),
14286 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_rd_rm_rn
),
14287 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_rd_rm_rn
),
14288 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_rd_rm_rn
),
14291 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14292 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
14293 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14294 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14296 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14297 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14300 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14301 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
14304 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14305 #undef THUMB_VARIANT
14306 #define THUMB_VARIANT &arm_ext_v6
14307 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14308 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14309 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14310 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14311 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14312 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14313 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14314 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14315 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14316 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
14318 #undef THUMB_VARIANT
14319 #define THUMB_VARIANT &arm_ext_v6t2
14320 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
14321 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
14322 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14323 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14325 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
14326 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
14328 /* ARM V6 not included in V7M (eg. integer SIMD). */
14329 #undef THUMB_VARIANT
14330 #define THUMB_VARIANT &arm_ext_v6_notm
14331 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
14332 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
14333 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
14334 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14335 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14336 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14337 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14338 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14339 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14340 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14341 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14342 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14343 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14344 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14345 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14346 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14347 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14348 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14349 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14350 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14351 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14352 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14353 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14354 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14355 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14356 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14357 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14358 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14359 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14360 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14361 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14362 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14363 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14364 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14365 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14366 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14367 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14368 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14369 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14370 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14371 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
14372 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
14373 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14374 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14375 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
14376 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
14377 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14378 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14379 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14380 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14381 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14382 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14383 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14384 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14385 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14386 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14387 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14388 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14389 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14390 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14391 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14392 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14393 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14394 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14395 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14396 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14397 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14398 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14399 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14400 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14401 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14402 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14403 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14404 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14405 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
14406 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
14407 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
14408 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
14409 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
14410 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
14411 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14412 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14413 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
14416 #define ARM_VARIANT &arm_ext_v6k
14417 #undef THUMB_VARIANT
14418 #define THUMB_VARIANT &arm_ext_v6k
14419 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
14420 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
14421 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
14422 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
14424 #undef THUMB_VARIANT
14425 #define THUMB_VARIANT &arm_ext_v6_notm
14426 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
14427 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
14429 #undef THUMB_VARIANT
14430 #define THUMB_VARIANT &arm_ext_v6t2
14431 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14432 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
14433 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14434 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
14435 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
14438 #define ARM_VARIANT &arm_ext_v6z
14439 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
14442 #define ARM_VARIANT &arm_ext_v6t2
14443 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
14444 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
14445 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14446 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
14448 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14449 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14450 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
14451 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
14453 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14454 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14455 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14456 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
14458 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
14459 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
14460 /* ARM does not really have an IT instruction, so always allow it. */
14462 #define ARM_VARIANT &arm_ext_v1
14463 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
14464 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
14465 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
14466 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
14467 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
14468 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
14469 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
14470 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
14471 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
14472 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
14473 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
14474 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
14475 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
14476 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
14477 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
14479 /* Thumb2 only instructions. */
14481 #define ARM_VARIANT NULL
14483 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
14484 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
14485 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
14486 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
14487 TCE(orn
, 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
14488 TCE(orns
, 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
14490 /* Thumb-2 hardware division instructions (R and M profiles only). */
14491 #undef THUMB_VARIANT
14492 #define THUMB_VARIANT &arm_ext_div
14493 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
14494 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
14496 /* ARM V7 instructions. */
14498 #define ARM_VARIANT &arm_ext_v7
14499 #undef THUMB_VARIANT
14500 #define THUMB_VARIANT &arm_ext_v7
14501 TUF(pldw
, 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
14502 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
14503 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
14504 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
14505 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
14506 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
14509 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
14510 cCE(wfs
, e200110
, 1, (RR
), rd
),
14511 cCE(rfs
, e300110
, 1, (RR
), rd
),
14512 cCE(wfc
, e400110
, 1, (RR
), rd
),
14513 cCE(rfc
, e500110
, 1, (RR
), rd
),
14515 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14516 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14517 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14518 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14520 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14521 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14522 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14523 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
14525 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
14526 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
14527 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
14528 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
14529 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
14530 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
14531 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
14532 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
14533 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
14534 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
14535 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
14536 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
14538 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
14539 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
14540 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
14541 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
14542 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
14543 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
14544 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
14545 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
14546 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
14547 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
14548 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
14549 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
14551 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
14552 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
14553 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
14554 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
14555 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
14556 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
14557 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
14558 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
14559 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
14560 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
14561 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
14562 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
14564 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
14565 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
14566 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
14567 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
14568 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
14569 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
14570 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
14571 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
14572 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
14573 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
14574 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
14575 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
14577 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
14578 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
14579 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
14580 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
14581 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
14582 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
14583 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
14584 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
14585 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
14586 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
14587 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
14588 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
14590 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
14591 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
14592 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
14593 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
14594 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
14595 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
14596 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
14597 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
14598 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
14599 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
14600 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
14601 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
14603 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
14604 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
14605 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
14606 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
14607 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
14608 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
14609 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
14610 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
14611 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
14612 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
14613 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
14614 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
14616 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
14617 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
14618 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
14619 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
14620 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
14621 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
14622 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
14623 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
14624 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
14625 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
14626 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
14627 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
14629 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
14630 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
14631 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
14632 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
14633 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
14634 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
14635 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
14636 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
14637 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
14638 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
14639 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
14640 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
14642 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
14643 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
14644 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
14645 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
14646 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
14647 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
14648 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
14649 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
14650 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
14651 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
14652 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
14653 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
14655 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
14656 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
14657 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
14658 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
14659 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
14660 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
14661 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
14662 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
14663 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
14664 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
14665 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
14666 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
14668 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
14669 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
14670 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
14671 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
14672 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
14673 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
14674 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
14675 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
14676 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
14677 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
14678 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
14679 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
14681 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
14682 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
14683 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
14684 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
14685 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
14686 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
14687 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
14688 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
14689 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
14690 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
14691 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
14692 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
14694 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
14695 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
14696 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
14697 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
14698 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
14699 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
14700 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
14701 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
14702 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
14703 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
14704 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
14705 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
14707 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
14708 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
14709 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
14710 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
14711 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
14712 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
14713 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
14714 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
14715 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
14716 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
14717 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
14718 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
14720 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
14721 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
14722 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
14723 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
14724 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
14725 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
14726 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
14727 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
14728 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
14729 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
14730 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
14731 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
14733 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14734 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14735 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14736 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14737 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14738 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14739 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14740 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14741 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14742 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14743 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14744 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14746 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14747 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14748 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14749 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14750 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14751 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14752 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14753 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14754 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14755 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14756 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14757 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14759 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14760 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14761 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14762 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14763 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14764 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14765 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14766 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14767 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14768 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14769 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14770 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14772 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14773 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14774 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14775 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14776 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14777 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14778 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14779 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14780 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14781 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14782 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14783 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14785 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14786 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14787 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14788 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14789 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14790 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14791 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14792 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14793 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14794 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14795 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14796 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14798 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14799 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14800 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14801 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14802 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14803 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14804 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14805 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14806 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14807 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14808 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14809 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14811 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14812 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14813 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14814 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14815 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14816 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14817 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14818 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14819 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14820 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14821 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14822 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14824 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14825 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14826 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14827 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14828 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14829 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14830 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14831 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14832 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14833 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14834 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14835 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14837 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14838 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14839 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14840 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14841 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14842 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14843 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14844 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14845 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14846 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14847 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14848 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14850 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14851 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14852 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14853 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14854 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14855 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14856 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14857 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14858 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14859 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14860 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14861 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14863 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14864 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14865 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14866 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14867 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14868 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14869 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14870 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14871 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14872 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14873 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14874 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14876 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14877 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14878 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14879 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14880 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14881 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14882 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14883 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14884 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14885 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14886 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14887 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14889 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14890 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14891 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14892 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14893 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14894 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14895 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14896 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14897 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14898 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14899 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14900 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
14902 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14903 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14904 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14905 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
14907 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
14908 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
14909 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
14910 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
14911 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
14912 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
14913 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
14914 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
14915 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
14916 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
14917 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
14918 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
14920 /* The implementation of the FIX instruction is broken on some
14921 assemblers, in that it accepts a precision specifier as well as a
14922 rounding specifier, despite the fact that this is meaningless.
14923 To be more compatible, we accept it as well, though of course it
14924 does not set any bits. */
14925 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
14926 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
14927 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
14928 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
14929 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
14930 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
14931 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
14932 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
14933 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
14934 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
14935 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
14936 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
14937 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
14939 /* Instructions that were new with the real FPA, call them V2. */
14941 #define ARM_VARIANT &fpu_fpa_ext_v2
14942 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14943 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14944 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14945 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14946 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14947 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
14950 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
14951 /* Moves and type conversions. */
14952 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14953 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
14954 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
14955 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
14956 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14957 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14958 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14959 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14960 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14961 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14962 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
14963 cCE(vmrs
, ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
14964 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
14965 cCE(vmsr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
14967 /* Memory operations. */
14968 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
14969 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
14970 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14971 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14972 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14973 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14974 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14975 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14976 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14977 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14978 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14979 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
14980 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14981 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
14982 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14983 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
14984 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14985 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
14987 /* Monadic operations. */
14988 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14989 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14990 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
14992 /* Dyadic operations. */
14993 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14994 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14995 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14996 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14997 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14998 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
14999 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15000 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15001 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15004 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15005 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15006 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15007 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15010 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15011 /* Moves and type conversions. */
15012 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15013 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15014 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15015 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15016 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15017 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15018 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15019 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15020 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15021 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15022 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15023 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15024 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15025 cCE(fcvtshp
, 0b60600, 2, (RNQ
, RVD
), vfp_sp_hp_cvt
),
15026 cCE(fcvthps
, 0b60700, 2, (RVD
, RNQ
), vfp_hp_sp_cvt
),
15027 cCE(fcvttshp
, eb30ac0
, 2, (RVS
, RVS
), vfp_t_sp_hp_cvt
),
15028 cCE(fcvtbshp
, eb30a40
, 2, (RVS
, RVS
), vfp_b_sp_hp_cvt
),
15029 cCE(fcvtthps
, eb20ac0
, 2, (RVS
, RVS
), vfp_t_hp_sp_cvt
),
15030 cCE(fcvtbhps
, eb20a40
, 2, (RVS
, RVS
), vfp_b_hp_sp_cvt
),
15032 /* Memory operations. */
15033 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15034 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15035 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15036 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15037 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15038 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15039 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15040 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15041 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15042 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15044 /* Monadic operations. */
15045 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15046 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15047 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15049 /* Dyadic operations. */
15050 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15051 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15052 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15053 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15054 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15055 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15056 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15057 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15058 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15061 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15062 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15063 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15064 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15067 #define ARM_VARIANT &fpu_vfp_ext_v2
15068 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15069 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15070 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15071 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15073 /* Instructions which may belong to either the Neon or VFP instruction sets.
15074 Individual encoder functions perform additional architecture checks. */
15076 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15077 #undef THUMB_VARIANT
15078 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15079 /* These mnemonics are unique to VFP. */
15080 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15081 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15082 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15083 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15084 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15085 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15086 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15087 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15088 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15089 NCE(vcvtr
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtr
),
15091 /* Mnemonics shared by Neon and VFP. */
15092 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15093 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15094 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15096 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15097 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15099 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15100 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15102 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15103 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15104 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15105 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15106 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15107 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15108 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15109 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15111 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15112 nCEF(vcvtt
, vcvtt
, 2, (RVS
, RVS
), neon_cvtt
),
15113 nCEF(vcvtb
, vcvtt
, 2, (RVS
, RVS
), neon_cvtb
),
15115 /* NOTE: All VMOV encoding is special-cased! */
15116 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15117 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15119 #undef THUMB_VARIANT
15120 #define THUMB_VARIANT &fpu_neon_ext_v1
15122 #define ARM_VARIANT &fpu_neon_ext_v1
15123 /* Data processing with three registers of the same length. */
15124 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15125 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15126 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15127 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15128 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15129 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15130 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15131 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15132 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15133 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15134 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15135 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15136 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15137 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15138 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15139 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15140 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15141 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15142 /* If not immediate, fall back to neon_dyadic_i64_su.
15143 shl_imm should accept I8 I16 I32 I64,
15144 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15145 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15146 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15147 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15148 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15149 /* Logic ops, types optional & ignored. */
15150 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15151 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15152 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15153 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15154 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15155 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15156 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15157 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15158 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15159 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15160 /* Bitfield ops, untyped. */
15161 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15162 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15163 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15164 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15165 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15166 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15167 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15168 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15169 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15170 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15171 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15172 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15173 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15174 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15175 back to neon_dyadic_if_su. */
15176 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15177 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15178 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15179 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15180 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15181 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15182 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15183 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15184 /* Comparison. Type I8 I16 I32 F32. */
15185 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
15186 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
15187 /* As above, D registers only. */
15188 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15189 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15190 /* Int and float variants, signedness unimportant. */
15191 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15192 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15193 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
15194 /* Add/sub take types I8 I16 I32 I64 F32. */
15195 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15196 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15197 /* vtst takes sizes 8, 16, 32. */
15198 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
15199 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
15200 /* VMUL takes I8 I16 I32 F32 P8. */
15201 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
15202 /* VQD{R}MULH takes S16 S32. */
15203 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15204 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15205 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15206 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15207 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15208 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15209 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15210 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15211 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15212 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15213 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15214 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15215 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15216 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15217 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15218 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15220 /* Two address, int/float. Types S8 S16 S32 F32. */
15221 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15222 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15224 /* Data processing with two registers and a shift amount. */
15225 /* Right shifts, and variants with rounding.
15226 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15227 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15228 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15229 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15230 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15231 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15232 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15233 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15234 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15235 /* Shift and insert. Sizes accepted 8 16 32 64. */
15236 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
15237 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
15238 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
15239 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
15240 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15241 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
15242 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
15243 /* Right shift immediate, saturating & narrowing, with rounding variants.
15244 Types accepted S16 S32 S64 U16 U32 U64. */
15245 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15246 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15247 /* As above, unsigned. Types accepted S16 S32 S64. */
15248 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15249 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15250 /* Right shift narrowing. Types accepted I16 I32 I64. */
15251 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15252 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15253 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15254 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
15255 /* CVT with optional immediate for fixed-point variant. */
15256 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
15258 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
15259 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
15261 /* Data processing, three registers of different lengths. */
15262 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15263 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
15264 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15265 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15266 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15267 /* If not scalar, fall back to neon_dyadic_long.
15268 Vector types as above, scalar types S16 S32 U16 U32. */
15269 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15270 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15271 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15272 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15273 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15274 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15275 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15276 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15277 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15278 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15279 /* Saturating doubling multiplies. Types S16 S32. */
15280 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15281 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15282 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15283 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15284 S16 S32 U16 U32. */
15285 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
15287 /* Extract. Size 8. */
15288 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
15289 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
15291 /* Two registers, miscellaneous. */
15292 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15293 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
15294 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
15295 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
15296 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
15297 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
15298 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
15299 /* Vector replicate. Sizes 8 16 32. */
15300 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
15301 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
15302 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15303 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
15304 /* VMOVN. Types I16 I32 I64. */
15305 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
15306 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15307 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
15308 /* VQMOVUN. Types S16 S32 S64. */
15309 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
15310 /* VZIP / VUZP. Sizes 8 16 32. */
15311 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15312 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15313 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15314 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15315 /* VQABS / VQNEG. Types S8 S16 S32. */
15316 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15317 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15318 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15319 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15320 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15321 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15322 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
15323 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15324 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
15325 /* Reciprocal estimates. Types U32 F32. */
15326 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15327 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
15328 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15329 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
15330 /* VCLS. Types S8 S16 S32. */
15331 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
15332 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
15333 /* VCLZ. Types I8 I16 I32. */
15334 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
15335 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
15336 /* VCNT. Size 8. */
15337 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
15338 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
15339 /* Two address, untyped. */
15340 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
15341 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
15342 /* VTRN. Sizes 8 16 32. */
15343 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
15344 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
15346 /* Table lookup. Size 8. */
15347 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15348 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15350 #undef THUMB_VARIANT
15351 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15353 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15354 /* Neon element/structure load/store. */
15355 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15356 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15357 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15358 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15359 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15360 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15361 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15362 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15364 #undef THUMB_VARIANT
15365 #define THUMB_VARIANT &fpu_vfp_ext_v3
15367 #define ARM_VARIANT &fpu_vfp_ext_v3
15368 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
15369 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
15370 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15371 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15372 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15373 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15374 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15375 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15376 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15377 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15378 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15379 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15380 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15381 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15382 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15383 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15384 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15385 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15387 #undef THUMB_VARIANT
15389 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15390 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15391 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15392 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15393 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15394 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15395 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15396 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
15397 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
15400 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15401 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
15402 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
15403 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
15404 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
15405 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
15406 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
15407 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
15408 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
15409 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
15410 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15411 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15412 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15413 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15414 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15415 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15416 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15417 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15418 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15419 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
15420 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
15421 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15422 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15423 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15424 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15425 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15426 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15427 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
15428 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
15429 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
15430 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
15431 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
15432 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
15433 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
15434 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
15435 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15436 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15437 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15438 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15439 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15440 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15441 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15442 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15443 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15444 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15445 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15446 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15447 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
15448 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15449 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15450 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15451 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15452 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15453 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15454 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15455 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15456 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15457 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15458 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15459 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15460 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15461 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15462 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15463 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15464 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15465 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15466 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15467 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15468 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15469 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
15470 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
15471 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15472 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15473 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15474 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15475 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15476 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15477 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15478 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15479 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15480 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15481 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15482 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15483 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15484 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15485 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15486 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15487 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15488 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15489 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
15490 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15491 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15492 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15493 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15494 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15495 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15496 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15497 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15498 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15499 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15500 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15501 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15502 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15503 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15504 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15505 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15506 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15507 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15508 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15509 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15510 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15511 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
15512 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15513 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15514 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15515 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15516 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15517 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15518 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15519 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15520 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15521 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15522 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15523 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15524 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15525 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15526 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15527 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15528 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
15529 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
15530 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15531 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
15532 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
15533 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
15534 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15535 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15536 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15537 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15538 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15539 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15540 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15541 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15542 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15543 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15544 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15545 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15546 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15547 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15548 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
15549 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15550 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15551 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15552 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15553 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15554 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15555 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15556 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15557 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
15558 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15559 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15560 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15561 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15562 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
15565 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
15566 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
15567 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
15568 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
15569 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15570 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15571 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15572 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15573 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15574 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15575 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15576 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15577 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15578 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15579 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15580 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15581 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15582 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15583 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15584 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15585 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15586 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
15587 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15588 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15589 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15590 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15591 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15592 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15593 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15594 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15595 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15596 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15597 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15598 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15599 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15600 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15601 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15602 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15603 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15604 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15605 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15606 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15607 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15608 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15609 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15610 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15611 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15612 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15613 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15614 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15615 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15616 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15617 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15618 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15619 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15620 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15621 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15622 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15625 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
15626 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
15627 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
15628 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
15629 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
15630 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
15631 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
15632 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
15633 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
15634 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
15635 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
15636 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
15637 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
15638 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
15639 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
15640 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
15641 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
15642 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
15643 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
15644 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
15645 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
15646 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
15647 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
15648 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
15649 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
15650 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
15651 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
15652 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
15653 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
15654 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
15655 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
15656 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
15657 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
15658 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
15659 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
15660 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
15661 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
15662 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
15663 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
15664 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
15665 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
15666 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
15667 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
15668 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
15669 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
15670 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
15671 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
15672 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
15673 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
15674 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
15675 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
15676 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
15677 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
15678 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
15679 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
15680 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15681 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15682 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15683 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15684 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
15685 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
15686 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
15687 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
15688 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
15689 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
15690 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15691 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15692 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15693 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15694 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15695 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
15696 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15697 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
15698 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
15699 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
15700 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
15701 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
15704 #undef THUMB_VARIANT
15731 /* MD interface: bits in the object file. */
15733 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
15734 for use in the a.out file, and stores them in the array pointed to by buf.
15735 This knows about the endian-ness of the target machine and does
15736 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
15737 2 (short) and 4 (long) Floating numbers are put out as a series of
15738 LITTLENUMS (shorts, here at least). */
15741 md_number_to_chars (char * buf
, signed_expr_t val
, int n
)
15743 if (target_big_endian
)
15744 number_to_chars_bigendian (buf
, val
, n
);
15746 number_to_chars_littleendian (buf
, val
, n
);
15750 md_chars_to_number (char * buf
, int n
)
15753 unsigned char * where
= (unsigned char *) buf
;
15755 if (target_big_endian
)
15760 result
|= (*where
++ & 255);
15768 result
|= (where
[n
] & 255);
15775 /* MD interface: Sections. */
15777 /* Estimate the size of a frag before relaxing. Assume everything fits in
15781 md_estimate_size_before_relax (fragS
* fragp
,
15782 int segtype ATTRIBUTE_UNUSED
)
15788 /* FIXME - Looks like the old way "relaxation" is done in relax_section() in
15789 layout.c will need to change to drive the arm "relaxation" */
15790 const relax_typeS md_relax_table
[] = { {0} };
15792 /* Convert a machine dependent frag. */
15795 md_convert_frag (/* bfd *abfd, segT asec ATTRIBUTE_UNUSED, */ fragS
*fragp
)
15806 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
15809 old_op
= bfd_get_16(abfd
, buf
);
15811 old_op
= md_chars_to_number(buf
, THUMB_SIZE
);
15813 if (fragp
->fr_symbol
) {
15814 exp
.X_op
= O_symbol
;
15815 exp
.X_add_symbol
= fragp
->fr_symbol
;
15817 exp
.X_op
= O_constant
;
15819 exp
.X_add_number
= fragp
->fr_offset
;
15820 opcode
= fragp
->fr_subtype
;
15823 case T_MNEM_ldr_pc
:
15824 case T_MNEM_ldr_pc2
:
15825 case T_MNEM_ldr_sp
:
15826 case T_MNEM_str_sp
:
15833 if (fragp
->fr_var
== 4)
15835 insn
= THUMB_OP32(opcode
);
15836 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
15838 insn
|= (old_op
& 0x700) << 4;
15842 insn
|= (old_op
& 7) << 12;
15843 insn
|= (old_op
& 0x38) << 13;
15845 insn
|= 0x00000c00;
15846 put_thumb32_insn (buf
, insn
);
15847 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
15851 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
15853 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
15856 if (fragp
->fr_var
== 4)
15858 insn
= THUMB_OP32 (opcode
);
15859 insn
|= (old_op
& 0xf0) << 4;
15860 put_thumb32_insn (buf
, insn
);
15861 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
15865 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15866 exp
.X_add_number
-= 4;
15874 if (fragp
->fr_var
== 4)
15876 int r0off
= (opcode
== T_MNEM_mov
15877 || opcode
== T_MNEM_movs
) ? 0 : 8;
15878 insn
= THUMB_OP32 (opcode
);
15879 insn
= (insn
& 0xe1ffffff) | 0x10000000;
15880 insn
|= (old_op
& 0x700) << r0off
;
15881 put_thumb32_insn (buf
, insn
);
15882 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
15886 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
15891 if (fragp
->fr_var
== 4)
15893 insn
= THUMB_OP32(opcode
);
15894 put_thumb32_insn (buf
, insn
);
15895 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
15898 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
15902 if (fragp
->fr_var
== 4)
15904 insn
= THUMB_OP32(opcode
);
15905 insn
|= (old_op
& 0xf00) << 14;
15906 put_thumb32_insn (buf
, insn
);
15907 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
15910 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
15913 case T_MNEM_add_sp
:
15914 case T_MNEM_add_pc
:
15915 case T_MNEM_inc_sp
:
15916 case T_MNEM_dec_sp
:
15917 if (fragp
->fr_var
== 4)
15919 /* ??? Choose between add and addw. */
15920 insn
= THUMB_OP32 (opcode
);
15921 insn
|= (old_op
& 0xf0) << 4;
15922 put_thumb32_insn (buf
, insn
);
15923 if (opcode
== T_MNEM_add_pc
)
15924 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
15926 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
15929 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15937 if (fragp
->fr_var
== 4)
15939 insn
= THUMB_OP32 (opcode
);
15940 insn
|= (old_op
& 0xf0) << 4;
15941 insn
|= (old_op
& 0xf) << 16;
15942 put_thumb32_insn (buf
, insn
);
15943 if (insn
& (1 << 20))
15944 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
15946 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
15949 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
15955 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
15956 /* HACK GUESS, pcrel_reloc */ FALSE
,
15958 fixp
->fx_file
= fragp
->fr_file
;
15959 fixp
->fx_line
= fragp
->fr_line
;
15960 fragp
->fr_fix
+= fragp
->fr_var
;
15963 /* Return the size of a relaxable immediate operand instruction.
15964 SHIFT and SIZE specify the form of the allowable immediate. */
15966 relax_immediate (fragS
*fragp
, int size
, int shift
)
15972 /* ??? Should be able to do better than this. */
15973 if (fragp
->fr_symbol
)
15976 low
= (1 << shift
) - 1;
15977 mask
= (1 << (shift
+ size
)) - (1 << shift
);
15978 offset
= fragp
->fr_offset
;
15979 /* Force misaligned offsets to 32-bit variant. */
15982 if (offset
& ~mask
)
15987 /* Get the address of a symbol during relaxation. */
15989 relaxed_symbol_addr(fragS
*fragp
, int32_t stretch
)
15995 sym
= fragp
->fr_symbol
;
15996 sym_frag
= symbol_get_frag (sym
);
15997 know (S_GET_SEGMENT (sym
) != absolute_section
15998 || sym_frag
== &zero_address_frag
);
16000 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
16002 addr
= fragp
->fr_symbol
->sy_nlist
.n_value
+
16003 fragp
->fr_symbol
->sy_frag
->fr_address
+
16007 /* If frag has yet to be reached on this pass, assume it will
16008 move by STRETCH just as we did. If this is not so, it will
16009 be because some frag between grows, and that will force
16013 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
16019 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16022 relax_adr (fragS
*fragp
, /* HACK asection *sec, */ int32_t stretch
)
16027 /* Assume worst case for symbols not known to be in the same section. */
16028 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16030 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
16035 val
= relaxed_symbol_addr(fragp
, stretch
);
16036 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
16037 addr
= (addr
+ 4) & ~3;
16038 /* Force misaligned targets to 32-bit variant. */
16042 if (val
< 0 || val
> 1020)
16047 /* Return the size of a relaxable add/sub immediate instruction. */
16049 relax_addsub (fragS
*fragp
/* HACK , asection *sec */)
16054 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16056 op
= bfd_get_16(sec
->owner
, buf
);
16058 op
= md_chars_to_number(buf
, THUMB_SIZE
);
16060 if ((op
& 0xf) == ((op
>> 4) & 0xf))
16061 return relax_immediate (fragp
, 8, 0);
16063 return relax_immediate (fragp
, 3, 0);
16067 /* Return the size of a relaxable branch instruction. BITS is the
16068 size of the offset field in the narrow instruction. */
16071 relax_branch (fragS
*fragp
, int nsect
, int bits
, int32_t stretch
)
16077 /* Assume worst case for symbols not known to be in the same section. */
16078 if (!S_IS_DEFINED(fragp
->fr_symbol
) ||
16079 (fragp
->fr_symbol
->sy_nlist
.n_type
& N_TYPE
) != N_SECT
||
16080 fragp
->fr_symbol
->sy_nlist
.n_sect
!= nsect
)
16083 val
= relaxed_symbol_addr(fragp
, stretch
);
16084 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
16087 /* Offset is a signed value *2 */
16089 if (val
>= limit
|| val
< -limit
)
16095 /* Relax a machine dependent frag. This returns the amount by which
16096 the current size of the frag should change. */
16099 arm_relax_frag (int nsect
, fragS
*fragp
, int32_t stretch
)
16104 oldsize
= fragp
->fr_var
;
16105 switch (fragp
->fr_subtype
)
16107 case T_MNEM_ldr_pc2
:
16108 newsize
= relax_adr(fragp
, /* HACK sec, */ stretch
);
16110 case T_MNEM_ldr_pc
:
16111 case T_MNEM_ldr_sp
:
16112 case T_MNEM_str_sp
:
16113 newsize
= relax_immediate(fragp
, 8, 2);
16117 newsize
= relax_immediate(fragp
, 5, 2);
16121 newsize
= relax_immediate(fragp
, 5, 1);
16125 newsize
= relax_immediate(fragp
, 5, 0);
16128 newsize
= relax_adr(fragp
, /* HACK sec, */ stretch
);
16134 newsize
= relax_immediate(fragp
, 8, 0);
16137 newsize
= relax_branch(fragp
, nsect
, 11, stretch
);
16140 newsize
= relax_branch(fragp
, nsect
, 8, stretch
);
16142 case T_MNEM_add_sp
:
16143 case T_MNEM_add_pc
:
16144 newsize
= relax_immediate (fragp
, 8, 2);
16146 case T_MNEM_inc_sp
:
16147 case T_MNEM_dec_sp
:
16148 newsize
= relax_immediate (fragp
, 7, 2);
16154 newsize
= relax_addsub (fragp
/*, HACK sec */);
16160 fragp
->fr_var
= newsize
;
16161 /* Freeze wide instructions that are at or before the same location as
16162 in the previous pass. This avoids infinite loops.
16163 Don't freeze them unconditionally because targets may be artificialy
16164 misaligned by the expansion of preceeding frags. */
16165 if (stretch
<= 0 && newsize
> 2)
16167 md_convert_frag (/* sec->owner, sec, */ fragp
);
16171 return newsize
- oldsize
;
16174 /* Round up a section size to the appropriate boundary. */
16177 md_section_align (segT segment ATTRIBUTE_UNUSED
,
16180 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16181 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
16183 /* For a.out, force the section size to be aligned. If we don't do
16184 this, BFD will align it for us, but it will not write out the
16185 final bytes of the section. This may be a bug in BFD, but it is
16186 easier to fix it here since that is how the other a.out targets
16190 align
= bfd_get_section_alignment (stdoutput
, segment
);
16191 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
16199 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16200 of an rs_align_code fragment. */
16203 arm_handle_align (fragS
* fragP
)
16205 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16206 static char const thumb_noop
[2] = { 0xc0, 0x46 };
16207 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16208 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
16210 int bytes
, fix
, noop_size
;
16214 if (fragP
->fr_type
!= rs_align_code
)
16217 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
16218 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
16221 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16222 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
16224 if (fragP
->tc_frag_data
)
16226 if (target_big_endian
)
16227 noop
= thumb_bigend_noop
;
16230 noop_size
= sizeof (thumb_noop
);
16234 if (target_big_endian
)
16235 noop
= arm_bigend_noop
;
16238 noop_size
= sizeof (arm_noop
);
16241 if (bytes
& (noop_size
- 1))
16243 fix
= bytes
& (noop_size
- 1);
16244 memset (p
, 0, fix
);
16249 while (bytes
>= noop_size
)
16251 memcpy (p
, noop
, noop_size
);
16253 bytes
-= noop_size
;
16257 fragP
->fr_fix
+= fix
;
16258 fragP
->fr_var
= noop_size
;
16261 /* Called from md_do_align. Used to create an alignment
16262 frag in a code section. */
16265 arm_frag_align_code (int n
, int max
)
16269 /* We assume that there will never be a requirement
16270 to support alignments greater than 32 bytes. */
16271 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16272 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16274 p
= frag_var (rs_align_code
,
16275 MAX_MEM_FOR_RS_ALIGN_CODE
,
16277 (relax_substateT
) max
,
16284 /* Perform target specific initialisation of a frag. */
16287 arm_init_frag (fragS
* fragP
)
16289 /* Record whether this frag is in an ARM or a THUMB area. */
16290 fragP
->tc_frag_data
= thumb_mode
;
16292 #endif /* NOTYET */
16294 /* MD interface: Symbol and relocation handling. */
16296 /* Return the address within the segment that a PC-relative fixup is
16297 relative to. For ARM, PC-relative fixups applied to instructions
16298 are generally relative to the location of the fixup plus 8 bytes.
16299 Thumb branches are offset by 4, and Thumb loads relative to PC
16300 require special handling. */
16303 md_pcrel_from_section (fixS
* fixP
, segT seg
)
16305 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
16308 /* If this is pc-relative and we are going to emit a relocation
16309 then we just want to put out any pipeline compensation that the linker
16310 will need. Otherwise we want to use the calculated base.
16311 For WinCE we skip the bias for externals as well, since this
16312 is how the MS ARM-CE assembler behaves and we want to be compatible. */
16314 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
16315 || (arm_force_relocation (fixP
)
16317 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
16321 #endif /* NOTYET */
16323 switch (fixP
->fx_r_type
)
16325 /* PC relative addressing on the Thumb is slightly odd as the
16326 bottom two bits of the PC are forced to zero for the
16327 calculation. This happens *after* application of the
16328 pipeline offset. However, Thumb adrl already adjusts for
16329 this, so we need not do it again. */
16330 case BFD_RELOC_ARM_THUMB_ADD
:
16333 case BFD_RELOC_ARM_THUMB_OFFSET
:
16334 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16335 case BFD_RELOC_ARM_T32_ADD_PC12
:
16336 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
16337 return (base
+ 4) & ~3;
16339 /* Thumb branches are simply offset by +4. */
16340 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
16341 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
16342 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
16343 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16344 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16345 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16346 case BFD_RELOC_THUMB_PCREL_BLX
:
16349 /* ARM mode branches are offset by +8. However, the Windows CE
16350 loader expects the relocation not to take this into account. */
16351 case BFD_RELOC_ARM_PCREL_BRANCH
:
16352 case BFD_RELOC_ARM_PCREL_CALL
:
16353 case BFD_RELOC_ARM_PCREL_JUMP
:
16354 case BFD_RELOC_ARM_PCREL_BLX
:
16355 case BFD_RELOC_ARM_PLT32
:
16357 /* When handling fixups immediately, because we have already
16358 discovered the value of a symbol, or the address of the frag involved
16359 we must account for the offset by +8, as the OS loader will never see the reloc.
16360 see fixup_segment() in write.c
16361 The S_IS_EXTERNAL test handles the case of global symbols.
16362 Those need the calculated base, not just the pipe compensation the linker will need. */
16364 && fixP
->fx_addsy
!= NULL
16365 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
16366 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
16373 /* ARM mode loads relative to PC are also offset by +8. Unlike
16374 branches, the Windows CE loader *does* expect the relocation
16375 to take this into account. */
16376 case BFD_RELOC_ARM_OFFSET_IMM
:
16377 case BFD_RELOC_ARM_OFFSET_IMM8
:
16378 case BFD_RELOC_ARM_HWLITERAL
:
16379 case BFD_RELOC_ARM_LITERAL
:
16380 case BFD_RELOC_ARM_CP_OFF_IMM
:
16384 /* Other PC-relative relocations are un-offset. */
16390 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
16391 Otherwise we have no need to default values of symbols. */
16394 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
16397 if (name
[0] == '_' && name
[1] == 'G'
16398 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
16402 if (symbol_find (name
))
16403 as_bad ("GOT already in the symbol table");
16405 GOT_symbol
= symbol_new (name
, undefined_section
,
16406 (valueT
) 0, & zero_address_frag
);
16416 /* Subroutine of md_apply_fix. Check to see if an immediate can be
16417 computed as two separate immediate values, added together. We
16418 already know that this value cannot be computed by just one ARM
16421 static unsigned int
16422 validate_immediate_twopart (unsigned int val
,
16423 unsigned int * highpart
)
16428 for (i
= 0; i
< 32; i
+= 2)
16429 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
16435 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
16437 else if (a
& 0xff0000)
16439 if (a
& 0xff000000)
16441 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
16445 assert (a
& 0xff000000);
16446 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
16449 return (a
& 0xff) | (i
<< 7);
16456 validate_offset_imm (unsigned int val
, int hwse
)
16458 if ((hwse
&& val
> 255) || val
> 4095)
16463 /* Subroutine of md_apply_fix. Do those data_ops which can take a
16464 negative immediate constant by altering the instruction. A bit of
16469 by inverting the second operand, and
16472 by negating the second operand. */
16475 negate_data_op (uint32_t * instruction
,
16479 uint32_t negated
, inverted
;
16481 negated
= encode_arm_immediate (-value
);
16482 inverted
= encode_arm_immediate (~value
);
16484 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
16487 /* First negates. */
16488 case OPCODE_SUB
: /* ADD <-> SUB */
16489 new_inst
= OPCODE_ADD
;
16494 new_inst
= OPCODE_SUB
;
16498 case OPCODE_CMP
: /* CMP <-> CMN */
16499 new_inst
= OPCODE_CMN
;
16504 new_inst
= OPCODE_CMP
;
16508 /* Now Inverted ops. */
16509 case OPCODE_MOV
: /* MOV <-> MVN */
16510 new_inst
= OPCODE_MVN
;
16515 new_inst
= OPCODE_MOV
;
16519 case OPCODE_AND
: /* AND <-> BIC */
16520 new_inst
= OPCODE_BIC
;
16525 new_inst
= OPCODE_AND
;
16529 case OPCODE_ADC
: /* ADC <-> SBC */
16530 new_inst
= OPCODE_SBC
;
16535 new_inst
= OPCODE_ADC
;
16539 /* We cannot do anything. */
16544 if (value
== (unsigned) FAIL
)
16547 *instruction
&= OPCODE_MASK
;
16548 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
16552 /* Like negate_data_op, but for Thumb-2. */
16554 static unsigned int
16555 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
16559 unsigned int negated
, inverted
;
16561 negated
= encode_thumb32_immediate (-value
);
16562 inverted
= encode_thumb32_immediate (~value
);
16564 rd
= (*instruction
>> 8) & 0xf;
16565 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
16568 /* ADD <-> SUB. Includes CMP <-> CMN. */
16569 case T2_OPCODE_SUB
:
16570 new_inst
= T2_OPCODE_ADD
;
16574 case T2_OPCODE_ADD
:
16575 new_inst
= T2_OPCODE_SUB
;
16579 /* ORR <-> ORN. Includes MOV <-> MVN. */
16580 case T2_OPCODE_ORR
:
16581 new_inst
= T2_OPCODE_ORN
;
16585 case T2_OPCODE_ORN
:
16586 new_inst
= T2_OPCODE_ORR
;
16590 /* AND <-> BIC. TST has no inverted equivalent. */
16591 case T2_OPCODE_AND
:
16592 new_inst
= T2_OPCODE_BIC
;
16599 case T2_OPCODE_BIC
:
16600 new_inst
= T2_OPCODE_AND
;
16605 case T2_OPCODE_ADC
:
16606 new_inst
= T2_OPCODE_SBC
;
16610 case T2_OPCODE_SBC
:
16611 new_inst
= T2_OPCODE_ADC
;
16615 /* We cannot do anything. */
16620 if (value
== (unsigned int)FAIL
)
16623 *instruction
&= T2_OPCODE_MASK
;
16624 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
16628 /* Read a 32-bit thumb instruction from buf. */
16630 get_thumb32_insn (char * buf
)
16633 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
16634 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16639 /* We usually want to set the low bit on the address of thumb function
16640 symbols. In particular .word foo - . should have the low bit set.
16641 Generic code tries to fold the difference of two symbols to
16642 a constant. Prevent this and force a relocation when the first symbols
16643 is a thumb function. */
16645 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
16648 if (op
== O_subtract
16649 && l
->X_op
== O_symbol
16650 && r
->X_op
== O_symbol
16651 && THUMB_IS_FUNC (l
->X_add_symbol
))
16653 l
->X_op
= O_subtract
;
16654 l
->X_op_symbol
= r
->X_add_symbol
;
16655 l
->X_add_number
-= r
->X_add_number
;
16658 #endif /* NOTYET */
16659 /* Process as normal. */
16664 md_apply_fix (fixS
* fixP
,
16668 offsetT value
= * valP
;
16670 unsigned int newimm
;
16673 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
16675 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
16677 /* Note whether this will delete the relocation. */
16679 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
16682 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16683 consistency with the behavior on 32-bit hosts. Remember value
16685 value
&= 0xffffffff;
16686 value
^= 0x80000000;
16687 value
-= 0x80000000;
16690 fixP
->fx_addnumber
= value
;
16692 /* Same treatment for fixP->fx_offset. */
16693 fixP
->fx_offset
&= 0xffffffff;
16694 fixP
->fx_offset
^= 0x80000000;
16695 fixP
->fx_offset
-= 0x80000000;
16697 switch (fixP
->fx_r_type
)
16700 case BFD_RELOC_NONE
:
16701 /* This will need to go in the object file. */
16706 case BFD_RELOC_ARM_IMMEDIATE
:
16707 /* We claim that this fixup has been processed here,
16708 even if in fact we generate an error because we do
16709 not have a reloc for it, so tc_gen_reloc will reject it. */
16713 && ! S_IS_DEFINED (fixP
->fx_addsy
))
16715 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16716 _("undefined symbol %s used as an immediate value"),
16717 S_GET_NAME (fixP
->fx_addsy
));
16721 newimm
= encode_arm_immediate (value
);
16722 temp
= md_chars_to_number (buf
, INSN_SIZE
);
16724 /* If the instruction will fail, see if we can fix things up by
16725 changing the opcode. */
16726 if (newimm
== (unsigned int) FAIL
16727 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
16729 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16730 _("invalid constant (%x) after fixup"),
16735 newimm
|= (temp
& 0xfffff000);
16736 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
16739 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
16741 unsigned int highpart
= 0;
16742 unsigned int newinsn
= 0xe1a00000; /* nop. */
16744 newimm
= encode_arm_immediate (value
);
16745 temp
= md_chars_to_number (buf
, INSN_SIZE
);
16747 /* If the instruction will fail, see if we can fix things up by
16748 changing the opcode. */
16749 if (newimm
== (unsigned int) FAIL
16750 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
16752 /* No ? OK - try using two ADD instructions to generate
16754 newimm
= validate_immediate_twopart (value
, & highpart
);
16756 /* Yes - then make sure that the second instruction is
16758 if (newimm
!= (unsigned int) FAIL
)
16760 /* Still No ? Try using a negated value. */
16761 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
16762 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
16763 /* Otherwise - give up. */
16766 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16767 _("unable to compute ADRL instructions for PC offset of 0x%x"),
16772 /* Replace the first operand in the 2nd instruction (which
16773 is the PC) with the destination register. We have
16774 already added in the PC in the first instruction and we
16775 do not want to do it again. */
16776 newinsn
&= ~ 0xf0000;
16777 newinsn
|= ((newinsn
& 0x0f000) << 4);
16780 newimm
|= (temp
& 0xfffff000);
16781 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
16783 highpart
|= (newinsn
& 0xfffff000);
16784 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
16788 case BFD_RELOC_ARM_OFFSET_IMM
:
16790 if (!fixP
->fx_done
&& seg
->use_rela_p
)
16792 if (!fixP
->fx_done
&& 0)
16796 case BFD_RELOC_ARM_LITERAL
:
16802 if (validate_offset_imm (value
, 0) == FAIL
)
16804 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
16805 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16806 _("invalid literal constant: pool needs to be closer"));
16808 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16809 _("bad immediate value for offset (%d)"),
16814 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16815 newval
&= 0xff7ff000;
16816 newval
|= value
| (sign
? INDEX_UP
: 0);
16817 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16820 case BFD_RELOC_ARM_OFFSET_IMM8
:
16821 case BFD_RELOC_ARM_HWLITERAL
:
16827 if (validate_offset_imm (value
, 1) == FAIL
)
16829 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
16830 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16831 _("invalid literal constant: pool needs to be closer"));
16833 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16834 _("bad immediate value for 8-bit offset (%d)"),
16839 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16840 newval
&= 0xff7ff0f0;
16841 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
16842 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16845 case BFD_RELOC_ARM_T32_OFFSET_U8
:
16846 if (value
< 0 || value
> 1020 || value
% 4 != 0)
16847 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16848 _("bad immediate value for offset (%d)"), (int32_t) value
);
16851 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
16853 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
16856 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16857 /* This is a complicated relocation used for all varieties of Thumb32
16858 load/store instruction with immediate offset:
16860 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16861 *4, optional writeback(W)
16862 (doubleword load/store)
16864 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16865 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16866 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16867 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16868 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16870 Uppercase letters indicate bits that are already encoded at
16871 this point. Lowercase letters are our problem. For the
16872 second block of instructions, the secondary opcode nybble
16873 (bits 8..11) is present, and bit 23 is zero, even if this is
16874 a PC-relative operation. */
16875 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16877 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
16879 if ((newval
& 0xf0000000) == 0xe0000000)
16881 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16883 newval
|= (1 << 23);
16886 if (value
% 4 != 0)
16888 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16889 _("offset not a multiple of 4"));
16895 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16896 _("offset out of range"));
16901 else if ((newval
& 0x000f0000) == 0x000f0000)
16903 /* PC-relative, 12-bit offset. */
16905 newval
|= (1 << 23);
16910 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16911 _("offset out of range"));
16916 else if ((newval
& 0x00000100) == 0x00000100)
16918 /* Writeback: 8-bit, +/- offset. */
16920 newval
|= (1 << 9);
16925 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16926 _("offset out of range"));
16931 else if ((newval
& 0x00000f00) == 0x00000e00)
16933 /* T-instruction: positive 8-bit offset. */
16934 if (value
< 0 || value
> 0xff)
16936 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16937 _("offset out of range"));
16945 /* Positive 12-bit or negative 8-bit offset. */
16949 newval
|= (1 << 23);
16959 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16960 _("offset out of range"));
16967 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
16968 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
16971 case BFD_RELOC_ARM_SHIFT_IMM
:
16972 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16973 if (((uint32_t) value
) > 32
16975 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
16977 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16978 _("shift expression is too large"));
16983 /* Shifts of zero must be done as lsl. */
16985 else if (value
== 32)
16987 newval
&= 0xfffff07f;
16988 newval
|= (value
& 0x1f) << 7;
16989 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16992 case BFD_RELOC_ARM_T32_IMMEDIATE
:
16993 case BFD_RELOC_ARM_T32_ADD_IMM
:
16994 case BFD_RELOC_ARM_T32_IMM12
:
16995 case BFD_RELOC_ARM_T32_ADD_PC12
:
16996 /* We claim that this fixup has been processed here,
16997 even if in fact we generate an error because we do
16998 not have a reloc for it, so tc_gen_reloc will reject it. */
17002 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17004 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17005 _("undefined symbol %s used as an immediate value"),
17006 S_GET_NAME (fixP
->fx_addsy
));
17010 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17012 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
17015 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
17016 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17018 newimm
= encode_thumb32_immediate (value
);
17019 if (newimm
== (unsigned int) FAIL
)
17020 newimm
= thumb32_negate_data_op (&newval
, value
);
17022 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
17023 && newimm
== (unsigned int) FAIL
)
17025 /* Turn add/sum into addw/subw. */
17026 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17027 newval
= (newval
& 0xfeffffff) | 0x02000000;
17029 /* 12 bit immediate for addw/subw. */
17033 newval
^= 0x00a00000;
17036 newimm
= (unsigned int) FAIL
;
17041 if (newimm
== (unsigned int)FAIL
)
17043 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17044 _("invalid constant (%x) after fixup"),
17049 newval
|= (newimm
& 0x800) << 15;
17050 newval
|= (newimm
& 0x700) << 4;
17051 newval
|= (newimm
& 0x0ff);
17053 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
17054 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
17057 case BFD_RELOC_ARM_SMC
:
17058 if (((uint32_t) value
) > 0xffff)
17059 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17060 _("invalid smc expression"));
17061 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17062 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
17063 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17066 case BFD_RELOC_ARM_SWI
:
17067 if (*((int*)fixP
->tc_fix_data
) != 0)
17069 if (((uint32_t) value
) > 0xff)
17070 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17071 _("invalid swi expression"));
17072 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17074 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17078 if (((uint32_t) value
) > 0x00ffffff)
17079 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17080 _("invalid swi expression"));
17081 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17083 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17087 case BFD_RELOC_ARM_MULTI
:
17088 if (((uint32_t) value
) > 0xffff)
17089 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17090 _("invalid expression in load/store multiple"));
17091 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
17092 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17096 case BFD_RELOC_ARM_PCREL_CALL
:
17097 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17098 if ((newval
& 0xf0000000) == 0xf0000000)
17102 goto arm_branch_common
;
17104 case BFD_RELOC_ARM_PCREL_JUMP
:
17105 case BFD_RELOC_ARM_PLT32
:
17107 case BFD_RELOC_ARM_PCREL_BRANCH
:
17109 goto arm_branch_common
;
17111 case BFD_RELOC_ARM_PCREL_BLX
:
17114 /* Our linker wants the same reloc for bl and blx. */
17115 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_BRANCH
;
17118 /* We are going to store value (shifted right by two) in the
17119 instruction, in a 24 bit, signed field. Bits 26 through 32 either
17120 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
17121 also be be clear. */
17123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17124 _("misaligned branch destination"));
17125 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
17126 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
17127 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17128 _("branch out of range"));
17131 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17133 if (fixP
->fx_done
|| !0)
17136 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17137 newval
|= (value
>> 2) & 0x00ffffff;
17138 /* Set the H bit on BLX instructions. */
17142 newval
|= 0x01000000;
17144 newval
&= ~0x01000000;
17146 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17150 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
17151 /* CBZ can only branch forward. */
17153 /* Attempts to use CBZ to branch to the next instruction
17154 (which, strictly speaking, are prohibited) will be turned into
17157 FIXME: It may be better to remove the instruction completely and
17158 perform relaxation. */
17161 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17162 newval
= 0xbf00; /* NOP encoding T1 */
17163 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17168 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17169 _("branch out of range"));
17172 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17174 if (fixP
->fx_done
|| !0)
17177 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17178 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
17179 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17184 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
17185 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
17186 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17187 _("branch out of range"));
17190 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17192 if (fixP
->fx_done
|| !0)
17195 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17196 newval
|= (value
& 0x1ff) >> 1;
17197 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17201 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
17202 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
17203 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17204 _("branch out of range"));
17207 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17209 if (fixP
->fx_done
|| !0)
17212 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17213 newval
|= (value
& 0xfff) >> 1;
17214 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17218 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17219 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
17220 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17221 _("conditional branch out of range"));
17224 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17226 if (fixP
->fx_done
|| !0)
17230 addressT S
, J1
, J2
, lo
, hi
;
17232 S
= (value
& 0x00100000) >> 20;
17233 J2
= (value
& 0x00080000) >> 19;
17234 J1
= (value
& 0x00040000) >> 18;
17235 hi
= (value
& 0x0003f000) >> 12;
17236 lo
= (value
& 0x00000ffe) >> 1;
17238 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17239 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17240 newval
|= (S
<< 10) | hi
;
17241 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
17242 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17243 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17247 case BFD_RELOC_THUMB_PCREL_BLX
:
17248 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17249 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
17250 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17251 _("branch out of range"));
17253 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
17254 /* For a BLX instruction, make sure that the relocation is rounded up
17255 to a word boundary. This follows the semantics of the instruction
17256 which specifies that bit 1 of the target address will come from bit
17257 1 of the base address. */
17258 value
= (value
+ 2) & ~ 2;
17261 /* Our linker wants the same reloc for bl and blx. */
17262 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
17266 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17268 if (fixP
->fx_done
|| !0)
17273 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17274 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17275 newval
|= (value
& 0x7fffff) >> 12;
17276 newval2
|= (value
& 0xfff) >> 1;
17277 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17278 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17282 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17283 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
17284 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17285 _("branch out of range"));
17287 /* Our linker wants the same reloc for bl and blx. */
17288 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
17292 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17294 if (fixP
->fx_done
|| !0)
17298 addressT S
, I1
, I2
, lo
, hi
;
17300 S
= (value
& 0x01000000) >> 24;
17301 I1
= (value
& 0x00800000) >> 23;
17302 I2
= (value
& 0x00400000) >> 22;
17303 hi
= (value
& 0x003ff000) >> 12;
17304 lo
= (value
& 0x00000ffe) >> 1;
17309 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17310 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17311 newval
|= (S
<< 10) | hi
;
17312 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
17313 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17314 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
17320 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17322 if (fixP
->fx_done
|| !0)
17324 md_number_to_chars (buf
, value
, 1);
17329 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17331 if (fixP
->fx_done
|| !0)
17333 md_number_to_chars (buf
, value
, 2);
17337 case BFD_RELOC_ARM_TLS_GD32
:
17338 case BFD_RELOC_ARM_TLS_LE32
:
17339 case BFD_RELOC_ARM_TLS_IE32
:
17340 case BFD_RELOC_ARM_TLS_LDM32
:
17341 case BFD_RELOC_ARM_TLS_LDO32
:
17342 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
17345 case BFD_RELOC_ARM_GOT32
:
17346 case BFD_RELOC_ARM_GOTOFF
:
17347 case BFD_RELOC_ARM_TARGET2
:
17349 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17351 if (fixP
->fx_done
|| !0)
17353 md_number_to_chars (buf
, 0, 4);
17357 case BFD_RELOC_RVA
:
17359 case BFD_RELOC_ARM_TARGET1
:
17360 case BFD_RELOC_ARM_ROSEGREL32
:
17361 case BFD_RELOC_ARM_SBREL32
:
17362 case BFD_RELOC_32_PCREL
:
17364 case BFD_RELOC_32_SECREL
:
17367 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17369 if (fixP
->fx_done
|| !0)
17372 /* For WinCE we only do this for pcrel fixups. */
17373 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
17375 md_number_to_chars (buf
, value
, 4);
17379 case BFD_RELOC_ARM_PREL31
:
17381 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17383 if (fixP
->fx_done
|| !0)
17386 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
17387 if ((value
^ (value
>> 1)) & 0x40000000)
17389 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17390 _("rel31 relocation overflow"));
17392 newval
|= value
& 0x7fffffff;
17393 md_number_to_chars (buf
, newval
, 4);
17398 case BFD_RELOC_ARM_CP_OFF_IMM
:
17399 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17400 if (value
< -1023 || value
> 1023 || (value
& 3))
17401 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17402 _("co-processor offset out of range"));
17407 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
17408 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
17409 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17411 newval
= get_thumb32_insn (buf
);
17412 newval
&= 0xff7fff00;
17413 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
17414 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
17415 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
17416 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17418 put_thumb32_insn (buf
, newval
);
17421 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
17422 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
17423 if (value
< -255 || value
> 255)
17424 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17425 _("co-processor offset out of range"));
17427 goto cp_off_common
;
17429 case BFD_RELOC_ARM_THUMB_OFFSET
:
17430 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17431 /* Exactly what ranges, and where the offset is inserted depends
17432 on the type of instruction, we can establish this from the
17434 switch (newval
>> 12)
17436 case 4: /* PC load. */
17437 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
17438 forced to zero for these loads; md_pcrel_from has already
17439 compensated for this. */
17441 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17442 _("invalid offset, target not word aligned (0x%08x)"),
17443 (((uint32_t) fixP
->fx_frag
->fr_address
17444 + (uint32_t) fixP
->fx_where
) & ~3)
17445 + (uint32_t) value
);
17447 if (value
& ~0x3fc)
17448 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17449 _("invalid offset, value too big (0x%08x)"),
17452 newval
|= value
>> 2;
17455 case 9: /* SP load/store. */
17456 if (value
& ~0x3fc)
17457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17458 _("invalid offset, value too big (0x%08x)"),
17460 newval
|= value
>> 2;
17463 case 6: /* Word load/store. */
17465 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17466 _("invalid offset, value too big (0x%08x)"),
17468 newval
|= value
<< 4; /* 6 - 2. */
17471 case 7: /* Byte load/store. */
17473 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17474 _("invalid offset, value too big (0x%08x)"),
17476 newval
|= value
<< 6;
17479 case 8: /* Halfword load/store. */
17481 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17482 _("invalid offset, value too big (0x%08x)"),
17484 newval
|= value
<< 5; /* 6 - 1. */
17488 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17489 "Unable to process relocation for thumb opcode: %x",
17490 (uint32_t) newval
);
17493 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17496 case BFD_RELOC_ARM_THUMB_ADD
:
17497 /* This is a complicated relocation, since we use it for all of
17498 the following immediate relocations:
17502 9bit ADD/SUB SP word-aligned
17503 10bit ADD PC/SP word-aligned
17505 The type of instruction being processed is encoded in the
17512 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17514 int rd
= (newval
>> 4) & 0xf;
17515 int rs
= newval
& 0xf;
17516 int subtract
= !!(newval
& 0x8000);
17518 /* Check for HI regs, only very restricted cases allowed:
17519 Adjusting SP, and using PC or SP to get an address. */
17520 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
17521 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
17522 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17523 _("invalid Hi register with immediate"));
17525 /* If value is negative, choose the opposite instruction. */
17529 subtract
= !subtract
;
17531 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17532 _("immediate value out of range"));
17537 if (value
& ~0x1fc)
17538 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17539 _("invalid immediate for stack address calculation"));
17540 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
17541 newval
|= value
>> 2;
17543 else if (rs
== REG_PC
|| rs
== REG_SP
)
17545 if (subtract
|| value
& ~0x3fc)
17546 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17547 _("invalid immediate for address calculation (value = 0x%08x)"),
17549 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
17551 newval
|= value
>> 2;
17556 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17557 _("immediate value out of range"));
17558 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
17559 newval
|= (rd
<< 8) | value
;
17564 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17565 _("immediate value out of range"));
17566 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
17567 newval
|= rd
| (rs
<< 3) | (value
<< 6);
17570 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17573 case BFD_RELOC_ARM_THUMB_IMM
:
17574 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17575 if (value
< 0 || value
> 255)
17576 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17577 _("invalid immediate: %d is too large"),
17580 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17583 case BFD_RELOC_ARM_THUMB_SHIFT
:
17584 /* 5bit shift value (0..32). LSL cannot take 32. */
17585 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
17586 temp
= newval
& 0xf800;
17587 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
17588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17589 _("invalid shift value: %d"), (int32_t) value
);
17590 /* Shifts of zero must be encoded as LSL. */
17592 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
17593 /* Shifts of 32 are encoded as zero. */
17594 else if (value
== 32)
17596 newval
|= value
<< 6;
17597 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
17601 case BFD_RELOC_VTABLE_INHERIT
:
17602 case BFD_RELOC_VTABLE_ENTRY
:
17607 case BFD_RELOC_ARM_MOVW
:
17608 case BFD_RELOC_ARM_MOVT
:
17609 case BFD_RELOC_ARM_THUMB_MOVW
:
17610 case BFD_RELOC_ARM_THUMB_MOVT
:
17612 if (fixP
->fx_done
|| !seg
->use_rela_p
)
17614 if (fixP
->fx_done
|| !0)
17617 /* REL format relocations are limited to a 16-bit addend. */
17618 if (!fixP
->fx_done
)
17620 if (value
< -0x1000 || value
> 0xffff)
17621 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17622 _("offset too big"));
17624 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
17625 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
17630 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
17631 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
17633 newval
= get_thumb32_insn (buf
);
17634 newval
&= 0xfbf08f00;
17635 newval
|= (value
& 0xf000) << 4;
17636 newval
|= (value
& 0x0800) << 15;
17637 newval
|= (value
& 0x0700) << 4;
17638 newval
|= (value
& 0x00ff);
17639 put_thumb32_insn (buf
, newval
);
17643 newval
= md_chars_to_number (buf
, 4);
17644 newval
&= 0xfff0f000;
17645 newval
|= value
& 0x0fff;
17646 newval
|= (value
& 0xf000) << 4;
17647 md_number_to_chars (buf
, newval
, 4);
17652 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
17653 case BFD_RELOC_ARM_ALU_PC_G0
:
17654 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
17655 case BFD_RELOC_ARM_ALU_PC_G1
:
17656 case BFD_RELOC_ARM_ALU_PC_G2
:
17657 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
17658 case BFD_RELOC_ARM_ALU_SB_G0
:
17659 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
17660 case BFD_RELOC_ARM_ALU_SB_G1
:
17661 case BFD_RELOC_ARM_ALU_SB_G2
:
17662 assert (!fixP
->fx_done
);
17664 if (!seg
->use_rela_p
)
17670 bfd_vma encoded_addend
;
17671 bfd_vma addend_abs
= abs (value
);
17673 /* Check that the absolute value of the addend can be
17674 expressed as an 8-bit constant plus a rotation. */
17675 encoded_addend
= encode_arm_immediate (addend_abs
);
17676 if (encoded_addend
== (unsigned int) FAIL
)
17677 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17678 _("the offset 0x%08x is not representable"),
17679 (uint32_t)addend_abs
);
17681 /* Extract the instruction. */
17682 insn
= md_chars_to_number (buf
, INSN_SIZE
);
17684 /* If the addend is positive, use an ADD instruction.
17685 Otherwise use a SUB. Take care not to destroy the S bit. */
17686 insn
&= 0xff1fffff;
17692 /* Place the encoded addend into the first 12 bits of the
17694 insn
&= 0xfffff000;
17695 insn
|= encoded_addend
;
17697 /* Update the instruction. */
17698 md_number_to_chars (buf
, insn
, INSN_SIZE
);
17702 case BFD_RELOC_ARM_LDR_PC_G0
:
17703 case BFD_RELOC_ARM_LDR_PC_G1
:
17704 case BFD_RELOC_ARM_LDR_PC_G2
:
17705 case BFD_RELOC_ARM_LDR_SB_G0
:
17706 case BFD_RELOC_ARM_LDR_SB_G1
:
17707 case BFD_RELOC_ARM_LDR_SB_G2
:
17708 assert (!fixP
->fx_done
);
17710 if (!seg
->use_rela_p
)
17716 bfd_vma addend_abs
= abs (value
);
17718 /* Check that the absolute value of the addend can be
17719 encoded in 12 bits. */
17720 if (addend_abs
>= 0x1000)
17721 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17722 _("bad offset 0x%08x (only 12 bits available for the magnitude)"),
17723 (uint32_t)addend_abs
);
17725 /* Extract the instruction. */
17726 insn
= md_chars_to_number (buf
, INSN_SIZE
);
17728 /* If the addend is negative, clear bit 23 of the instruction.
17729 Otherwise set it. */
17731 insn
&= ~(1 << 23);
17735 /* Place the absolute value of the addend into the first 12 bits
17736 of the instruction. */
17737 insn
&= 0xfffff000;
17738 insn
|= addend_abs
;
17740 /* Update the instruction. */
17741 md_number_to_chars (buf
, insn
, INSN_SIZE
);
17745 case BFD_RELOC_ARM_LDRS_PC_G0
:
17746 case BFD_RELOC_ARM_LDRS_PC_G1
:
17747 case BFD_RELOC_ARM_LDRS_PC_G2
:
17748 case BFD_RELOC_ARM_LDRS_SB_G0
:
17749 case BFD_RELOC_ARM_LDRS_SB_G1
:
17750 case BFD_RELOC_ARM_LDRS_SB_G2
:
17751 assert (!fixP
->fx_done
);
17753 if (!seg
->use_rela_p
)
17759 bfd_vma addend_abs
= abs (value
);
17761 /* Check that the absolute value of the addend can be
17762 encoded in 8 bits. */
17763 if (addend_abs
>= 0x100)
17764 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17765 _("bad offset 0x%08x (only 8 bits available for the magnitude)"),
17766 (uint32_t)addend_abs
);
17768 /* Extract the instruction. */
17769 insn
= md_chars_to_number (buf
, INSN_SIZE
);
17771 /* If the addend is negative, clear bit 23 of the instruction.
17772 Otherwise set it. */
17774 insn
&= ~(1 << 23);
17778 /* Place the first four bits of the absolute value of the addend
17779 into the first 4 bits of the instruction, and the remaining
17780 four into bits 8 .. 11. */
17781 insn
&= 0xfffff0f0;
17782 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
17784 /* Update the instruction. */
17785 md_number_to_chars (buf
, insn
, INSN_SIZE
);
17789 case BFD_RELOC_ARM_LDC_PC_G0
:
17790 case BFD_RELOC_ARM_LDC_PC_G1
:
17791 case BFD_RELOC_ARM_LDC_PC_G2
:
17792 case BFD_RELOC_ARM_LDC_SB_G0
:
17793 case BFD_RELOC_ARM_LDC_SB_G1
:
17794 case BFD_RELOC_ARM_LDC_SB_G2
:
17795 assert (!fixP
->fx_done
);
17797 if (!seg
->use_rela_p
)
17803 bfd_vma addend_abs
= abs (value
);
17805 /* Check that the absolute value of the addend is a multiple of
17806 four and, when divided by four, fits in 8 bits. */
17807 if (addend_abs
& 0x3)
17808 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17809 _("bad offset 0x%08x (must be word-aligned)"),
17810 (uint32_t)addend_abs
);
17812 if ((addend_abs
>> 2) > 0xff)
17813 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17814 _("bad offset 0x%08x (must be an 8-bit number of words)"),
17815 (uint32_t)addend_abs
);
17817 /* Extract the instruction. */
17818 insn
= md_chars_to_number (buf
, INSN_SIZE
);
17820 /* If the addend is negative, clear bit 23 of the instruction.
17821 Otherwise set it. */
17823 insn
&= ~(1 << 23);
17827 /* Place the addend (divided by four) into the first eight
17828 bits of the instruction. */
17829 insn
&= 0xfffffff0;
17830 insn
|= addend_abs
>> 2;
17832 /* Update the instruction. */
17833 md_number_to_chars (buf
, insn
, INSN_SIZE
);
17837 case BFD_RELOC_UNUSED
:
17839 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17840 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
17843 /* Don't allow relocations to escape into the object file that aren't
17844 supported by the linker. */
17845 if (fixP
->fx_addsy
!= NULL
17846 && fixP
->fx_r_type
!= ARM_THUMB_RELOC_BR22
17847 && fixP
->fx_r_type
!= ARM_RELOC_BR24
)
17848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17849 _("unsupported relocation on symbol %s"),
17850 S_GET_NAME (fixP
->fx_addsy
));
17855 arm_force_relocation (struct fix
* fixp
)
17857 #if defined (OBJ_COFF) && defined (TE_PE)
17858 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
17863 if (fixp
->fx_r_type
== BFD_RELOC_ARM_PCREL_BRANCH
17864 || fixp
->fx_r_type
== BFD_RELOC_ARM_PCREL_BLX
17865 || fixp
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
17866 || fixp
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
17867 || fixp
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH25
)
17869 if (fixp
->fx_addsy
!= NULL
)
17871 const char *name
= S_GET_NAME (fixp
->fx_addsy
);
17872 if (! flagseen
['L'] && name
&& name
[0] == 'L')
17879 /* Resolve these relocations even if the symbol is extern or weak. */
17880 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
17881 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
17882 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
17883 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
17884 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
17885 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
17886 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
17889 /* Always leave these relocations for the linker. */
17890 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
17891 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
17892 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
17896 /* Always generate relocations against function symbols. */
17897 if (fixp
->fx_r_type
== BFD_RELOC_32
17899 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
17902 return generic_force_reloc (fixp
);
17908 /* MD interface: Finalization. */
17910 /* A good place to do this, although this was probably not intended
17911 for this kind of use. We need to dump the literal pool before
17912 references are made to a null symbol pointer. */
17918 literal_pool
* pool
;
17920 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
17922 /* Put it at the end of the relevent section. */
17923 subseg_set (pool
->section
, pool
->sub_section
);
17925 arm_elf_change_section ();
17930 #endif /* NOTYET */
17932 /* MD interface: Initialization. */
17935 set_constant_flonums (void)
17939 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
17940 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
17944 /* Auto-select Thumb mode if it's the only available instruction set for the
17945 given architecture. */
17949 autoselect_thumb_from_cpu_variant (void)
17951 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
17952 opcode_select (16);
17954 #endif /* NOTYET */
17964 if ( (arm_ops_hsh
= hash_new ()) == NULL
17965 || (arm_cond_hsh
= hash_new ()) == NULL
17966 || (arm_shift_hsh
= hash_new ()) == NULL
17967 || (arm_psr_hsh
= hash_new ()) == NULL
17968 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
17969 || (arm_reg_hsh
= hash_new ()) == NULL
17970 || (arm_reloc_hsh
= hash_new ()) == NULL
17971 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
17972 as_fatal (_("virtual memory exhausted"));
17974 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
17975 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
17976 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
17977 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
17978 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
17979 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
17980 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
17981 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
17982 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
17983 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
17984 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
17985 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
17987 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
17989 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
17990 (PTR
) (barrier_opt_names
+ i
));
17992 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
17993 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
17996 set_constant_flonums ();
17999 /* Set the cpu variant based on the command-line options. We prefer
18000 -mcpu= over -march= if both are set (as for GCC); and we prefer
18001 -mfpu= over any other way of setting the floating point unit.
18002 Use of legacy options with new options are faulted. */
18005 if (mcpu_cpu_opt
|| march_cpu_opt
)
18006 as_bad (_("use of old and new-style options to set CPU type"));
18008 mcpu_cpu_opt
= legacy_cpu
;
18010 else if (!mcpu_cpu_opt
)
18011 mcpu_cpu_opt
= march_cpu_opt
;
18016 as_bad (_("use of old and new-style options to set FPU type"));
18018 mfpu_opt
= legacy_fpu
;
18020 else if (!mfpu_opt
)
18022 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
18023 /* Some environments specify a default FPU. If they don't, infer it
18024 from the processor. */
18026 mfpu_opt
= mcpu_fpu_opt
;
18028 mfpu_opt
= march_fpu_opt
;
18030 mfpu_opt
= &fpu_default
;
18036 if (mcpu_cpu_opt
!= NULL
)
18037 mfpu_opt
= &fpu_default
;
18038 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
18039 mfpu_opt
= &fpu_arch_vfp_v2
;
18041 mfpu_opt
= &fpu_arch_fpa
;
18047 mcpu_cpu_opt
= &cpu_default
;
18048 selected_cpu
= cpu_default
;
18052 selected_cpu
= *mcpu_cpu_opt
;
18054 mcpu_cpu_opt
= &arm_arch_any
;
18057 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18059 autoselect_thumb_from_cpu_variant ();
18061 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
18063 #if defined OBJ_COFF || defined OBJ_ELF
18065 unsigned int flags
= 0;
18067 #if defined OBJ_ELF
18068 flags
= meabi_flags
;
18070 switch (meabi_flags
)
18072 case EF_ARM_EABI_UNKNOWN
:
18074 /* Set the flags in the private structure. */
18075 if (uses_apcs_26
) flags
|= F_APCS26
;
18076 if (support_interwork
) flags
|= F_INTERWORK
;
18077 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
18078 if (pic_code
) flags
|= F_PIC
;
18079 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
18080 flags
|= F_SOFT_FLOAT
;
18082 switch (mfloat_abi_opt
)
18084 case ARM_FLOAT_ABI_SOFT
:
18085 case ARM_FLOAT_ABI_SOFTFP
:
18086 flags
|= F_SOFT_FLOAT
;
18089 case ARM_FLOAT_ABI_HARD
:
18090 if (flags
& F_SOFT_FLOAT
)
18091 as_bad (_("hard-float conflicts with specified fpu"));
18095 /* Using pure-endian doubles (even if soft-float). */
18096 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
18097 flags
|= F_VFP_FLOAT
;
18099 #if defined OBJ_ELF
18100 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
18101 flags
|= EF_ARM_MAVERICK_FLOAT
;
18104 case EF_ARM_EABI_VER4
:
18105 case EF_ARM_EABI_VER5
:
18106 /* No additional flags to set. */
18113 bfd_set_private_flags (stdoutput
, flags
);
18115 /* We have run out flags in the COFF header to encode the
18116 status of ATPCS support, so instead we create a dummy,
18117 empty, debug section called .arm.atpcs. */
18122 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
18126 bfd_set_section_flags
18127 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
18128 bfd_set_section_size (stdoutput
, sec
, 0);
18129 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
18135 /* Record the CPU type as well. */
18136 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
18137 mach
= bfd_mach_arm_iWMMXt2
;
18138 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
18139 mach
= bfd_mach_arm_iWMMXt
;
18140 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
18141 mach
= bfd_mach_arm_XScale
;
18142 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
18143 mach
= bfd_mach_arm_ep9312
;
18144 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
18145 mach
= bfd_mach_arm_5TE
;
18146 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
18148 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
18149 mach
= bfd_mach_arm_5T
;
18151 mach
= bfd_mach_arm_5
;
18153 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
18155 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
18156 mach
= bfd_mach_arm_4T
;
18158 mach
= bfd_mach_arm_4
;
18160 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
18161 mach
= bfd_mach_arm_3M
;
18162 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
18163 mach
= bfd_mach_arm_3
;
18164 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
18165 mach
= bfd_mach_arm_2a
;
18166 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
18167 mach
= bfd_mach_arm_2
;
18169 mach
= bfd_mach_arm_unknown
;
18171 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
18172 #endif /* NOTYET */
18174 if (force_cpusubtype_ALL
)
18176 cpu_variant
= arm_arch_full
;
18177 archflag_cpusubtype
= CPU_SUBTYPE_ARM_ALL
;
18180 switch (archflag_cpusubtype
)
18182 case CPU_SUBTYPE_ARM_V5TEJ
:
18184 static const arm_feature_set arm_arch_v5tej
= ARM_ARCH_V5TEJ
;
18185 cpu_variant
= arm_arch_v5tej
;
18188 case CPU_SUBTYPE_ARM_XSCALE
:
18190 static const arm_feature_set arm_arch_xscale
= ARM_ARCH_XSCALE
;
18191 cpu_variant
= arm_arch_xscale
;
18194 case CPU_SUBTYPE_ARM_V6
:
18196 static const arm_feature_set arm_arch_v6zk_vfp_v2
=
18197 ARM_FEATURE (ARM_AEXT_V6ZK
, FPU_VFP_V2
);
18198 cpu_variant
= arm_arch_v6zk_vfp_v2
;
18201 case CPU_SUBTYPE_ARM_V7
:
18203 static const arm_feature_set arm_arch_v7_vfp_v3_plus_neon_v1
=
18204 ARM_FEATURE (ARM_AEXT_V7_ARM
| ARM_EXT_V7M
| ARM_EXT_DIV
,
18205 FPU_VFP_V3
| FPU_NEON_EXT_V1
);
18206 cpu_variant
= arm_arch_v7_vfp_v3_plus_neon_v1
;
18213 * md_end() is called from main() in as.c after assembly ends. It is used
18214 * to allow target machine dependent clean up.
18222 * md_parse_option() is called from main() in as.c to parse target machine
18223 * dependent command line options. This routine returns 0 if it is passed an
18224 * option that is not recognized non-zero otherwise.
18240 * md_number_to_imm() is the target machine dependent routine that puts out
18241 * a binary value of size 4, 2, or 1 bytes into the specified buffer with
18242 * reguard to a possible relocation entry (the fixP->fx_r_type field in the fixS
18243 * structure pointed to by fixP) for the section with the ordinal nsect. This
18244 * is done in the target machine's byte sex using it's relocation types.
18245 * In this case the byte order is little endian.
18249 unsigned char *buf
,
18257 signed_target_addr_t newval
;
18260 if(fixP
->fx_r_type
== NO_RELOC
||
18261 fixP
->fx_r_type
== ARM_RELOC_VANILLA
){
18264 *buf
++ = val
& 0xff;
18265 *buf
++ = (val
>> 8) & 0xff;
18266 *buf
++ = (val
>> 16) & 0xff;
18267 *buf
++ = (val
>> 24) & 0xff;
18268 *buf
++ = (val
>> 32) & 0xff;
18269 *buf
++ = (val
>> 40) & 0xff;
18270 *buf
++ = (val
>> 48) & 0xff;
18271 *buf
++ = (val
>> 56) & 0xff;
18274 *buf
++ = val
& 0xff;
18275 *buf
++ = (val
>> 8) & 0xff;
18276 *buf
++ = (val
>> 16) & 0xff;
18277 *buf
++ = (val
>> 24) & 0xff;
18280 *buf
++ = val
& 0xff;
18281 *buf
++ = (val
>> 8) & 0xff;
18291 switch(fixP
->fx_r_type
){
18293 case ARM_RELOC_BR24
:
18295 /* GUESS this should be 4 not 8 which seems to disassemble correctly for local
18296 defined labels. But this seems to be off by 8 for external undefined labels
18297 and the target address is not 0 but 0xfffffff8 */
18299 if((val
& 0xfc000000) && ((val
& 0xfc000000) != 0xfc000000)){
18300 layout_file
= fixP
->file
;
18301 layout_line
= fixP
->line
;
18302 as_warn("Fixup of %u too large for field width of 26 bits",val
);
18304 if((val
& 0x3) != 0){
18305 layout_file
= fixP
->file
;
18306 layout_line
= fixP
->line
;
18307 as_warn("Fixup of %u is not to a 4 byte address", val
);
18309 buf
[0] = (val
>> 2) & 0xff;
18310 buf
[1] = (val
>> 10) & 0xff;
18311 buf
[2] = (val
>> 18) & 0xff;
18312 /* buf[3] has the opcode part of the instruction */
18315 /* Code taken and modified from tc-arm.c md_apply_fix3() line 11473 */
18316 case BFD_RELOC_ARM_OFFSET_IMM
:
18317 /* GUESS this needs 4 added to val. Which then better matches what the FSF GAS
18318 produces for an ldr instruction */
18326 if(validate_offset_imm(val
, 0) == FAIL
){
18327 layout_file
= fixP
->file
;
18328 layout_line
= fixP
->line
;
18329 as_warn("bad immediate value for offset (%d)", (int32_t) val
);
18333 newval
= (buf
[3] << 24) | (buf
[2] << 16) | (buf
[1] << 8) | buf
[0];
18335 newval
&= 0xff7ff000;
18336 newval
|= val
| (sign
? INDEX_UP
: 0);
18338 *buf
++ = newval
& 0xff;
18339 *buf
++ = (newval
>> 8) & 0xff;
18340 *buf
++ = (newval
>> 16) & 0xff;
18341 *buf
++ = (newval
>> 24) & 0xff;
18346 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
18349 valueT newval
= val
;
18351 /* Die if we have more bytes than md_apply_fix3 knows how to
18353 if (sizeof (valueT
) < nbytes
)
18356 md_apply_fix (fixP
, &newval
, now_seg
);