1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
80 /* Bit N indicates that an R_ARM_NONE relocation has been output for
81 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
82 emitted only once per section, to save unnecessary bloat. */
83 static unsigned int marked_pr_dependency
= 0;
87 /* Results from operand parsing worker functions. */
91 PARSE_OPERAND_SUCCESS
,
93 PARSE_OPERAND_FAIL_NO_BACKTRACK
94 } parse_operand_result
;
103 /* Types of processor to assemble for. */
105 #if defined __XSCALE__
106 #define CPU_DEFAULT ARM_ARCH_XSCALE
108 #if defined __thumb__
109 #define CPU_DEFAULT ARM_ARCH_V5T
116 # define FPU_DEFAULT FPU_ARCH_FPA
117 # elif defined (TE_NetBSD)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
121 /* Legacy a.out format. */
122 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
124 # elif defined (TE_VXWORKS)
125 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
127 /* For backwards compatibility, default to FPA. */
128 # define FPU_DEFAULT FPU_ARCH_FPA
130 #endif /* ifndef FPU_DEFAULT */
132 #define streq(a, b) (strcmp (a, b) == 0)
134 static arm_feature_set cpu_variant
;
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
149 /* Variables that we set while parsing command-line options. Once all
150 options have been read we re-process these values to set the real
152 static const arm_feature_set
*legacy_cpu
= NULL
;
153 static const arm_feature_set
*legacy_fpu
= NULL
;
155 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
156 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
157 static const arm_feature_set
*march_cpu_opt
= NULL
;
158 static const arm_feature_set
*march_fpu_opt
= NULL
;
159 static const arm_feature_set
*mfpu_opt
= NULL
;
160 static const arm_feature_set
*object_arch
= NULL
;
162 /* Constants for known architecture features. */
163 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
164 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
165 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
166 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
167 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
168 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
169 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
170 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
171 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
174 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
177 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
178 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
179 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
180 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
181 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
182 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
183 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
184 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
185 static const arm_feature_set arm_ext_v4t_5
=
186 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
187 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
188 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
189 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
190 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
191 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
192 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
193 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
194 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
195 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
196 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
197 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
198 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
199 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
200 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
201 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
202 static const arm_feature_set arm_ext_m
=
203 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
205 static const arm_feature_set arm_arch_any
= ARM_ANY
;
206 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
208 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
210 static const arm_feature_set arm_cext_iwmmxt2
=
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
212 static const arm_feature_set arm_cext_iwmmxt
=
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
214 static const arm_feature_set arm_cext_xscale
=
215 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
216 static const arm_feature_set arm_cext_maverick
=
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
218 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
219 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
220 static const arm_feature_set fpu_vfp_ext_v1xd
=
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
222 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
223 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
224 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
225 static const arm_feature_set fpu_vfp_ext_d32
=
226 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
227 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
228 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
229 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
230 static const arm_feature_set fpu_neon_fp16
= ARM_FEATURE (0, FPU_NEON_FP16
);
232 static int mfloat_abi_opt
= -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name
[16];
239 static int meabi_flags
= EABI_DEFAULT
;
241 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
244 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
249 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS
* GOT_symbol
;
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
262 static int thumb_mode
= 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
268 /* Specifies the intrinsic IT insn behavior mode. */
269 enum implicit_it_mode
271 IMPLICIT_IT_MODE_NEVER
= 0x00,
272 IMPLICIT_IT_MODE_ARM
= 0x01,
273 IMPLICIT_IT_MODE_THUMB
= 0x02,
274 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
276 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
278 /* If unified_syntax is true, we are processing the new unified
279 ARM/Thumb syntax. Important differences from the old ARM mode:
281 - Immediate operands do not require a # prefix.
282 - Conditional affixes always appear at the end of the
283 instruction. (For backward compatibility, those instructions
284 that formerly had them in the middle, continue to accept them
286 - The IT instruction may appear, and if it does is validated
287 against subsequent conditional affixes. It does not generate
290 Important differences from the old Thumb mode:
292 - Immediate operands do not require a # prefix.
293 - Most of the V6T2 instructions are only available in unified mode.
294 - The .N and .W suffixes are recognized and honored (it is an error
295 if they cannot be honored).
296 - All instructions set the flags if and only if they have an 's' affix.
297 - Conditional affixes may be used. They are validated against
298 preceding IT instructions. Unlike ARM mode, you cannot use a
299 conditional affix except in the scope of an IT instruction. */
301 static bfd_boolean unified_syntax
= FALSE
;
316 enum neon_el_type type
;
320 #define NEON_MAX_TYPE_ELS 4
324 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
328 enum it_instruction_type
333 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
334 if inside, should be the last one. */
335 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
336 i.e. BKPT and NOP. */
337 IT_INSN
/* The IT insn has been parsed. */
343 unsigned long instruction
;
347 /* "uncond_value" is set to the value in place of the conditional field in
348 unconditional versions of the instruction, or -1 if nothing is
351 struct neon_type vectype
;
352 /* Set to the opcode if the instruction needs relaxation.
353 Zero if the instruction is not relaxed. */
357 bfd_reloc_code_real_type type
;
362 enum it_instruction_type it_insn_type
;
368 struct neon_type_el vectype
;
369 unsigned present
: 1; /* Operand present. */
370 unsigned isreg
: 1; /* Operand was a register. */
371 unsigned immisreg
: 1; /* .imm field is a second register. */
372 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
373 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
374 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
375 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
376 instructions. This allows us to disambiguate ARM <-> vector insns. */
377 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
378 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
379 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
380 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
381 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
382 unsigned writeback
: 1; /* Operand has trailing ! */
383 unsigned preind
: 1; /* Preindexed address. */
384 unsigned postind
: 1; /* Postindexed address. */
385 unsigned negative
: 1; /* Index register was negated. */
386 unsigned shifted
: 1; /* Shift applied to operation. */
387 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
391 static struct arm_it inst
;
393 #define NUM_FLOAT_VALS 8
395 const char * fp_const
[] =
397 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
400 /* Number of littlenums required to hold an extended precision number. */
401 #define MAX_LITTLENUMS 6
403 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
413 #define CP_T_X 0x00008000
414 #define CP_T_Y 0x00400000
416 #define CONDS_BIT 0x00100000
417 #define LOAD_BIT 0x00100000
419 #define DOUBLE_LOAD_FLAG 0x00000001
423 const char * template;
427 #define COND_ALWAYS 0xE
431 const char * template;
435 struct asm_barrier_opt
437 const char * template;
441 /* The bit that distinguishes CPSR and SPSR. */
442 #define SPSR_BIT (1 << 22)
444 /* The individual PSR flag bits. */
445 #define PSR_c (1 << 16)
446 #define PSR_x (1 << 17)
447 #define PSR_s (1 << 18)
448 #define PSR_f (1 << 19)
453 bfd_reloc_code_real_type reloc
;
458 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
459 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
464 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
467 /* Bits for DEFINED field in neon_typed_alias. */
468 #define NTA_HASTYPE 1
469 #define NTA_HASINDEX 2
471 struct neon_typed_alias
473 unsigned char defined
;
475 struct neon_type_el eltype
;
478 /* ARM register categories. This includes coprocessor numbers and various
479 architecture extensions' registers. */
505 /* Structure for a hash table entry for a register.
506 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
507 information which states whether a vector type or index is specified (for a
508 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
512 unsigned char number
;
514 unsigned char builtin
;
515 struct neon_typed_alias
* neon
;
518 /* Diagnostics used when we don't get a register of the expected type. */
519 const char * const reg_expected_msgs
[] =
521 N_("ARM register expected"),
522 N_("bad or missing co-processor number"),
523 N_("co-processor register expected"),
524 N_("FPA register expected"),
525 N_("VFP single precision register expected"),
526 N_("VFP/Neon double precision register expected"),
527 N_("Neon quad precision register expected"),
528 N_("VFP single or double precision register expected"),
529 N_("Neon double or quad precision register expected"),
530 N_("VFP single, double or Neon quad precision register expected"),
531 N_("VFP system register expected"),
532 N_("Maverick MVF register expected"),
533 N_("Maverick MVD register expected"),
534 N_("Maverick MVFX register expected"),
535 N_("Maverick MVDX register expected"),
536 N_("Maverick MVAX register expected"),
537 N_("Maverick DSPSC register expected"),
538 N_("iWMMXt data register expected"),
539 N_("iWMMXt control register expected"),
540 N_("iWMMXt scalar register expected"),
541 N_("XScale accumulator register expected"),
544 /* Some well known registers that we refer to directly elsewhere. */
549 /* ARM instructions take 4bytes in the object file, Thumb instructions
555 /* Basic string to match. */
556 const char * template;
558 /* Parameters to instruction. */
559 unsigned char operands
[8];
561 /* Conditional tag - see opcode_lookup. */
562 unsigned int tag
: 4;
564 /* Basic instruction code. */
565 unsigned int avalue
: 28;
567 /* Thumb-format instruction code. */
570 /* Which architecture variant provides this instruction. */
571 const arm_feature_set
* avariant
;
572 const arm_feature_set
* tvariant
;
574 /* Function to call to encode instruction in ARM format. */
575 void (* aencode
) (void);
577 /* Function to call to encode instruction in Thumb format. */
578 void (* tencode
) (void);
581 /* Defines for various bits that we will want to toggle. */
582 #define INST_IMMEDIATE 0x02000000
583 #define OFFSET_REG 0x02000000
584 #define HWOFFSET_IMM 0x00400000
585 #define SHIFT_BY_REG 0x00000010
586 #define PRE_INDEX 0x01000000
587 #define INDEX_UP 0x00800000
588 #define WRITE_BACK 0x00200000
589 #define LDM_TYPE_2_OR_3 0x00400000
590 #define CPSI_MMOD 0x00020000
592 #define LITERAL_MASK 0xf000f000
593 #define OPCODE_MASK 0xfe1fffff
594 #define V4_STR_BIT 0x00000020
596 #define T2_SUBS_PC_LR 0xf3de8f00
598 #define DATA_OP_SHIFT 21
600 #define T2_OPCODE_MASK 0xfe1fffff
601 #define T2_DATA_OP_SHIFT 21
603 /* Codes to distinguish the arithmetic instructions. */
614 #define OPCODE_CMP 10
615 #define OPCODE_CMN 11
616 #define OPCODE_ORR 12
617 #define OPCODE_MOV 13
618 #define OPCODE_BIC 14
619 #define OPCODE_MVN 15
621 #define T2_OPCODE_AND 0
622 #define T2_OPCODE_BIC 1
623 #define T2_OPCODE_ORR 2
624 #define T2_OPCODE_ORN 3
625 #define T2_OPCODE_EOR 4
626 #define T2_OPCODE_ADD 8
627 #define T2_OPCODE_ADC 10
628 #define T2_OPCODE_SBC 11
629 #define T2_OPCODE_SUB 13
630 #define T2_OPCODE_RSB 14
632 #define T_OPCODE_MUL 0x4340
633 #define T_OPCODE_TST 0x4200
634 #define T_OPCODE_CMN 0x42c0
635 #define T_OPCODE_NEG 0x4240
636 #define T_OPCODE_MVN 0x43c0
638 #define T_OPCODE_ADD_R3 0x1800
639 #define T_OPCODE_SUB_R3 0x1a00
640 #define T_OPCODE_ADD_HI 0x4400
641 #define T_OPCODE_ADD_ST 0xb000
642 #define T_OPCODE_SUB_ST 0xb080
643 #define T_OPCODE_ADD_SP 0xa800
644 #define T_OPCODE_ADD_PC 0xa000
645 #define T_OPCODE_ADD_I8 0x3000
646 #define T_OPCODE_SUB_I8 0x3800
647 #define T_OPCODE_ADD_I3 0x1c00
648 #define T_OPCODE_SUB_I3 0x1e00
650 #define T_OPCODE_ASR_R 0x4100
651 #define T_OPCODE_LSL_R 0x4080
652 #define T_OPCODE_LSR_R 0x40c0
653 #define T_OPCODE_ROR_R 0x41c0
654 #define T_OPCODE_ASR_I 0x1000
655 #define T_OPCODE_LSL_I 0x0000
656 #define T_OPCODE_LSR_I 0x0800
658 #define T_OPCODE_MOV_I8 0x2000
659 #define T_OPCODE_CMP_I8 0x2800
660 #define T_OPCODE_CMP_LR 0x4280
661 #define T_OPCODE_MOV_HR 0x4600
662 #define T_OPCODE_CMP_HR 0x4500
664 #define T_OPCODE_LDR_PC 0x4800
665 #define T_OPCODE_LDR_SP 0x9800
666 #define T_OPCODE_STR_SP 0x9000
667 #define T_OPCODE_LDR_IW 0x6800
668 #define T_OPCODE_STR_IW 0x6000
669 #define T_OPCODE_LDR_IH 0x8800
670 #define T_OPCODE_STR_IH 0x8000
671 #define T_OPCODE_LDR_IB 0x7800
672 #define T_OPCODE_STR_IB 0x7000
673 #define T_OPCODE_LDR_RW 0x5800
674 #define T_OPCODE_STR_RW 0x5000
675 #define T_OPCODE_LDR_RH 0x5a00
676 #define T_OPCODE_STR_RH 0x5200
677 #define T_OPCODE_LDR_RB 0x5c00
678 #define T_OPCODE_STR_RB 0x5400
680 #define T_OPCODE_PUSH 0xb400
681 #define T_OPCODE_POP 0xbc00
683 #define T_OPCODE_BRANCH 0xe000
685 #define THUMB_SIZE 2 /* Size of thumb instruction. */
686 #define THUMB_PP_PC_LR 0x0100
687 #define THUMB_LOAD_BIT 0x0800
688 #define THUMB2_LOAD_BIT 0x00100000
690 #define BAD_ARGS _("bad arguments to instruction")
691 #define BAD_SP _("r13 not allowed here")
692 #define BAD_PC _("r15 not allowed here")
693 #define BAD_COND _("instruction cannot be conditional")
694 #define BAD_OVERLAP _("registers may not be the same")
695 #define BAD_HIREG _("lo register required")
696 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
697 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
698 #define BAD_BRANCH _("branch must be last instruction in IT block")
699 #define BAD_NOT_IT _("instruction not allowed in IT block")
700 #define BAD_FPU _("selected FPU does not support instruction")
701 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
702 #define BAD_IT_COND _("incorrect condition in IT block")
703 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
704 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
706 static struct hash_control
* arm_ops_hsh
;
707 static struct hash_control
* arm_cond_hsh
;
708 static struct hash_control
* arm_shift_hsh
;
709 static struct hash_control
* arm_psr_hsh
;
710 static struct hash_control
* arm_v7m_psr_hsh
;
711 static struct hash_control
* arm_reg_hsh
;
712 static struct hash_control
* arm_reloc_hsh
;
713 static struct hash_control
* arm_barrier_opt_hsh
;
715 /* Stuff needed to resolve the label ambiguity
724 symbolS
* last_label_seen
;
725 static int label_is_thumb_function_name
= FALSE
;
727 /* Literal pool structure. Held on a per-section
728 and per-sub-section basis. */
730 #define MAX_LITERAL_POOL_SIZE 1024
731 typedef struct literal_pool
733 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
734 unsigned int next_free_entry
;
739 struct literal_pool
* next
;
742 /* Pointer to a linked list of literal pools. */
743 literal_pool
* list_of_pools
= NULL
;
746 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
748 static struct current_it now_it
;
752 now_it_compatible (int cond
)
754 return (cond
& ~1) == (now_it
.cc
& ~1);
758 conditional_insn (void)
760 return inst
.cond
!= COND_ALWAYS
;
763 static int in_it_block (void);
765 static int handle_it_state (void);
767 static void force_automatic_it_block_close (void);
769 static void it_fsm_post_encode (void);
771 #define set_it_insn_type(type) \
774 inst.it_insn_type = type; \
775 if (handle_it_state () == FAIL) \
780 #define set_it_insn_type_nonvoid(type, failret) \
783 inst.it_insn_type = type; \
784 if (handle_it_state () == FAIL) \
789 #define set_it_insn_type_last() \
792 if (inst.cond == COND_ALWAYS) \
793 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
795 set_it_insn_type (INSIDE_IT_LAST_INSN); \
801 /* This array holds the chars that always start a comment. If the
802 pre-processor is disabled, these aren't very useful. */
803 const char comment_chars
[] = "@";
805 /* This array holds the chars that only start a comment at the beginning of
806 a line. If the line seems to have the form '# 123 filename'
807 .line and .file directives will appear in the pre-processed output. */
808 /* Note that input_file.c hand checks for '#' at the beginning of the
809 first line of the input file. This is because the compiler outputs
810 #NO_APP at the beginning of its output. */
811 /* Also note that comments like this one will always work. */
812 const char line_comment_chars
[] = "#";
814 const char line_separator_chars
[] = ";";
816 /* Chars that can be used to separate mant
817 from exp in floating point numbers. */
818 const char EXP_CHARS
[] = "eE";
820 /* Chars that mean this number is a floating point constant. */
824 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
826 /* Prefix characters that indicate the start of an immediate
828 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
830 /* Separator character handling. */
832 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
835 skip_past_char (char ** str
, char c
)
846 #define skip_past_comma(str) skip_past_char (str, ',')
848 /* Arithmetic expressions (possibly involving symbols). */
850 /* Return TRUE if anything in the expression is a bignum. */
853 walk_no_bignums (symbolS
* sp
)
855 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
858 if (symbol_get_value_expression (sp
)->X_add_symbol
)
860 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
861 || (symbol_get_value_expression (sp
)->X_op_symbol
862 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
868 static int in_my_get_expression
= 0;
870 /* Third argument to my_get_expression. */
871 #define GE_NO_PREFIX 0
872 #define GE_IMM_PREFIX 1
873 #define GE_OPT_PREFIX 2
874 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
875 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
876 #define GE_OPT_PREFIX_BIG 3
879 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
884 /* In unified syntax, all prefixes are optional. */
886 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
891 case GE_NO_PREFIX
: break;
893 if (!is_immediate_prefix (**str
))
895 inst
.error
= _("immediate expression requires a # prefix");
901 case GE_OPT_PREFIX_BIG
:
902 if (is_immediate_prefix (**str
))
908 memset (ep
, 0, sizeof (expressionS
));
910 save_in
= input_line_pointer
;
911 input_line_pointer
= *str
;
912 in_my_get_expression
= 1;
913 seg
= expression (ep
);
914 in_my_get_expression
= 0;
916 if (ep
->X_op
== O_illegal
)
918 /* We found a bad expression in md_operand(). */
919 *str
= input_line_pointer
;
920 input_line_pointer
= save_in
;
921 if (inst
.error
== NULL
)
922 inst
.error
= _("bad expression");
927 if (seg
!= absolute_section
928 && seg
!= text_section
929 && seg
!= data_section
930 && seg
!= bss_section
931 && seg
!= undefined_section
)
933 inst
.error
= _("bad segment");
934 *str
= input_line_pointer
;
935 input_line_pointer
= save_in
;
940 /* Get rid of any bignums now, so that we don't generate an error for which
941 we can't establish a line number later on. Big numbers are never valid
942 in instructions, which is where this routine is always called. */
943 if (prefix_mode
!= GE_OPT_PREFIX_BIG
944 && (ep
->X_op
== O_big
946 && (walk_no_bignums (ep
->X_add_symbol
)
948 && walk_no_bignums (ep
->X_op_symbol
))))))
950 inst
.error
= _("invalid constant");
951 *str
= input_line_pointer
;
952 input_line_pointer
= save_in
;
956 *str
= input_line_pointer
;
957 input_line_pointer
= save_in
;
961 /* Turn a string in input_line_pointer into a floating point constant
962 of type TYPE, and store the appropriate bytes in *LITP. The number
963 of LITTLENUMS emitted is stored in *SIZEP. An error message is
964 returned, or NULL on OK.
966 Note that fp constants aren't represent in the normal way on the ARM.
967 In big endian mode, things are as expected. However, in little endian
968 mode fp constants are big-endian word-wise, and little-endian byte-wise
969 within the words. For example, (double) 1.1 in big endian mode is
970 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
971 the byte sequence 99 99 f1 3f 9a 99 99 99.
973 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
976 md_atof (int type
, char * litP
, int * sizeP
)
979 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1011 return _("Unrecognized or unsupported floating point constant");
1014 t
= atof_ieee (input_line_pointer
, type
, words
);
1016 input_line_pointer
= t
;
1017 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1019 if (target_big_endian
)
1021 for (i
= 0; i
< prec
; i
++)
1023 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1024 litP
+= sizeof (LITTLENUM_TYPE
);
1029 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1030 for (i
= prec
- 1; i
>= 0; i
--)
1032 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1033 litP
+= sizeof (LITTLENUM_TYPE
);
1036 /* For a 4 byte float the order of elements in `words' is 1 0.
1037 For an 8 byte float the order is 1 0 3 2. */
1038 for (i
= 0; i
< prec
; i
+= 2)
1040 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1041 sizeof (LITTLENUM_TYPE
));
1042 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1043 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1044 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1051 /* We handle all bad expressions here, so that we can report the faulty
1052 instruction in the error message. */
1054 md_operand (expressionS
* expr
)
1056 if (in_my_get_expression
)
1057 expr
->X_op
= O_illegal
;
1060 /* Immediate values. */
1062 /* Generic immediate-value read function for use in directives.
1063 Accepts anything that 'expression' can fold to a constant.
1064 *val receives the number. */
1067 immediate_for_directive (int *val
)
1070 exp
.X_op
= O_illegal
;
1072 if (is_immediate_prefix (*input_line_pointer
))
1074 input_line_pointer
++;
1078 if (exp
.X_op
!= O_constant
)
1080 as_bad (_("expected #constant"));
1081 ignore_rest_of_line ();
1084 *val
= exp
.X_add_number
;
1089 /* Register parsing. */
1091 /* Generic register parser. CCP points to what should be the
1092 beginning of a register name. If it is indeed a valid register
1093 name, advance CCP over it and return the reg_entry structure;
1094 otherwise return NULL. Does not issue diagnostics. */
1096 static struct reg_entry
*
1097 arm_reg_parse_multi (char **ccp
)
1101 struct reg_entry
*reg
;
1103 #ifdef REGISTER_PREFIX
1104 if (*start
!= REGISTER_PREFIX
)
1108 #ifdef OPTIONAL_REGISTER_PREFIX
1109 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1114 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1119 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1121 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1131 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1132 enum arm_reg_type type
)
1134 /* Alternative syntaxes are accepted for a few register classes. */
1141 /* Generic coprocessor register names are allowed for these. */
1142 if (reg
&& reg
->type
== REG_TYPE_CN
)
1147 /* For backward compatibility, a bare number is valid here. */
1149 unsigned long processor
= strtoul (start
, ccp
, 10);
1150 if (*ccp
!= start
&& processor
<= 15)
1154 case REG_TYPE_MMXWC
:
1155 /* WC includes WCG. ??? I'm not sure this is true for all
1156 instructions that take WC registers. */
1157 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1168 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1169 return value is the register number or FAIL. */
1172 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1175 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1178 /* Do not allow a scalar (reg+index) to parse as a register. */
1179 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1182 if (reg
&& reg
->type
== type
)
1185 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1192 /* Parse a Neon type specifier. *STR should point at the leading '.'
1193 character. Does no verification at this stage that the type fits the opcode
1200 Can all be legally parsed by this function.
1202 Fills in neon_type struct pointer with parsed information, and updates STR
1203 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1204 type, FAIL if not. */
1207 parse_neon_type (struct neon_type
*type
, char **str
)
1214 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1216 enum neon_el_type thistype
= NT_untyped
;
1217 unsigned thissize
= -1u;
1224 /* Just a size without an explicit type. */
1228 switch (TOLOWER (*ptr
))
1230 case 'i': thistype
= NT_integer
; break;
1231 case 'f': thistype
= NT_float
; break;
1232 case 'p': thistype
= NT_poly
; break;
1233 case 's': thistype
= NT_signed
; break;
1234 case 'u': thistype
= NT_unsigned
; break;
1236 thistype
= NT_float
;
1241 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1247 /* .f is an abbreviation for .f32. */
1248 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1253 thissize
= strtoul (ptr
, &ptr
, 10);
1255 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1258 as_bad (_("bad size %d in type specifier"), thissize
);
1266 type
->el
[type
->elems
].type
= thistype
;
1267 type
->el
[type
->elems
].size
= thissize
;
1272 /* Empty/missing type is not a successful parse. */
1273 if (type
->elems
== 0)
1281 /* Errors may be set multiple times during parsing or bit encoding
1282 (particularly in the Neon bits), but usually the earliest error which is set
1283 will be the most meaningful. Avoid overwriting it with later (cascading)
1284 errors by calling this function. */
1287 first_error (const char *err
)
1293 /* Parse a single type, e.g. ".s32", leading period included. */
1295 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1298 struct neon_type optype
;
1302 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1304 if (optype
.elems
== 1)
1305 *vectype
= optype
.el
[0];
1308 first_error (_("only one type should be specified for operand"));
1314 first_error (_("vector type expected"));
1326 /* Special meanings for indices (which have a range of 0-7), which will fit into
1329 #define NEON_ALL_LANES 15
1330 #define NEON_INTERLEAVE_LANES 14
1332 /* Parse either a register or a scalar, with an optional type. Return the
1333 register number, and optionally fill in the actual type of the register
1334 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1335 type/index information in *TYPEINFO. */
1338 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1339 enum arm_reg_type
*rtype
,
1340 struct neon_typed_alias
*typeinfo
)
1343 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1344 struct neon_typed_alias atype
;
1345 struct neon_type_el parsetype
;
1349 atype
.eltype
.type
= NT_invtype
;
1350 atype
.eltype
.size
= -1;
1352 /* Try alternate syntax for some types of register. Note these are mutually
1353 exclusive with the Neon syntax extensions. */
1356 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1364 /* Undo polymorphism when a set of register types may be accepted. */
1365 if ((type
== REG_TYPE_NDQ
1366 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1367 || (type
== REG_TYPE_VFSD
1368 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1369 || (type
== REG_TYPE_NSDQ
1370 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1371 || reg
->type
== REG_TYPE_NQ
))
1372 || (type
== REG_TYPE_MMXWC
1373 && (reg
->type
== REG_TYPE_MMXWCG
)))
1376 if (type
!= reg
->type
)
1382 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1384 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1386 first_error (_("can't redefine type for operand"));
1389 atype
.defined
|= NTA_HASTYPE
;
1390 atype
.eltype
= parsetype
;
1393 if (skip_past_char (&str
, '[') == SUCCESS
)
1395 if (type
!= REG_TYPE_VFD
)
1397 first_error (_("only D registers may be indexed"));
1401 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1403 first_error (_("can't change index for operand"));
1407 atype
.defined
|= NTA_HASINDEX
;
1409 if (skip_past_char (&str
, ']') == SUCCESS
)
1410 atype
.index
= NEON_ALL_LANES
;
1415 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1417 if (exp
.X_op
!= O_constant
)
1419 first_error (_("constant expression required"));
1423 if (skip_past_char (&str
, ']') == FAIL
)
1426 atype
.index
= exp
.X_add_number
;
1441 /* Like arm_reg_parse, but allow allow the following extra features:
1442 - If RTYPE is non-zero, return the (possibly restricted) type of the
1443 register (e.g. Neon double or quad reg when either has been requested).
1444 - If this is a Neon vector type with additional type information, fill
1445 in the struct pointed to by VECTYPE (if non-NULL).
1446 This function will fault on encountering a scalar. */
1449 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1450 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1452 struct neon_typed_alias atype
;
1454 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1459 /* Do not allow a scalar (reg+index) to parse as a register. */
1460 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1462 first_error (_("register operand expected, but got scalar"));
1467 *vectype
= atype
.eltype
;
1474 #define NEON_SCALAR_REG(X) ((X) >> 4)
1475 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1477 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1478 have enough information to be able to do a good job bounds-checking. So, we
1479 just do easy checks here, and do further checks later. */
1482 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1486 struct neon_typed_alias atype
;
1488 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1490 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1493 if (atype
.index
== NEON_ALL_LANES
)
1495 first_error (_("scalar must have an index"));
1498 else if (atype
.index
>= 64 / elsize
)
1500 first_error (_("scalar index out of range"));
1505 *type
= atype
.eltype
;
1509 return reg
* 16 + atype
.index
;
1512 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1515 parse_reg_list (char ** strp
)
1517 char * str
= * strp
;
1521 /* We come back here if we get ranges concatenated by '+' or '|'. */
1536 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1538 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1548 first_error (_("bad range in register list"));
1552 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1554 if (range
& (1 << i
))
1556 (_("Warning: duplicated register (r%d) in register list"),
1564 if (range
& (1 << reg
))
1565 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1567 else if (reg
<= cur_reg
)
1568 as_tsktsk (_("Warning: register range not in ascending order"));
1573 while (skip_past_comma (&str
) != FAIL
1574 || (in_range
= 1, *str
++ == '-'));
1579 first_error (_("missing `}'"));
1587 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1590 if (expr
.X_op
== O_constant
)
1592 if (expr
.X_add_number
1593 != (expr
.X_add_number
& 0x0000ffff))
1595 inst
.error
= _("invalid register mask");
1599 if ((range
& expr
.X_add_number
) != 0)
1601 int regno
= range
& expr
.X_add_number
;
1604 regno
= (1 << regno
) - 1;
1606 (_("Warning: duplicated register (r%d) in register list"),
1610 range
|= expr
.X_add_number
;
1614 if (inst
.reloc
.type
!= 0)
1616 inst
.error
= _("expression too complex");
1620 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1621 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1622 inst
.reloc
.pc_rel
= 0;
1626 if (*str
== '|' || *str
== '+')
1632 while (another_range
);
1638 /* Types of registers in a list. */
1647 /* Parse a VFP register list. If the string is invalid return FAIL.
1648 Otherwise return the number of registers, and set PBASE to the first
1649 register. Parses registers of type ETYPE.
1650 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1651 - Q registers can be used to specify pairs of D registers
1652 - { } can be omitted from around a singleton register list
1653 FIXME: This is not implemented, as it would require backtracking in
1656 This could be done (the meaning isn't really ambiguous), but doesn't
1657 fit in well with the current parsing framework.
1658 - 32 D registers may be used (also true for VFPv3).
1659 FIXME: Types are ignored in these register lists, which is probably a
1663 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1668 enum arm_reg_type regtype
= 0;
1672 unsigned long mask
= 0;
1677 inst
.error
= _("expecting {");
1686 regtype
= REG_TYPE_VFS
;
1691 regtype
= REG_TYPE_VFD
;
1694 case REGLIST_NEON_D
:
1695 regtype
= REG_TYPE_NDQ
;
1699 if (etype
!= REGLIST_VFP_S
)
1701 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1702 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1706 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1709 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1716 base_reg
= max_regs
;
1720 int setmask
= 1, addregs
= 1;
1722 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1724 if (new_base
== FAIL
)
1726 first_error (_(reg_expected_msgs
[regtype
]));
1730 if (new_base
>= max_regs
)
1732 first_error (_("register out of range in list"));
1736 /* Note: a value of 2 * n is returned for the register Q<n>. */
1737 if (regtype
== REG_TYPE_NQ
)
1743 if (new_base
< base_reg
)
1744 base_reg
= new_base
;
1746 if (mask
& (setmask
<< new_base
))
1748 first_error (_("invalid register list"));
1752 if ((mask
>> new_base
) != 0 && ! warned
)
1754 as_tsktsk (_("register list not in ascending order"));
1758 mask
|= setmask
<< new_base
;
1761 if (*str
== '-') /* We have the start of a range expression */
1767 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1770 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1774 if (high_range
>= max_regs
)
1776 first_error (_("register out of range in list"));
1780 if (regtype
== REG_TYPE_NQ
)
1781 high_range
= high_range
+ 1;
1783 if (high_range
<= new_base
)
1785 inst
.error
= _("register range not in ascending order");
1789 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1791 if (mask
& (setmask
<< new_base
))
1793 inst
.error
= _("invalid register list");
1797 mask
|= setmask
<< new_base
;
1802 while (skip_past_comma (&str
) != FAIL
);
1806 /* Sanity check -- should have raised a parse error above. */
1807 if (count
== 0 || count
> max_regs
)
1812 /* Final test -- the registers must be consecutive. */
1814 for (i
= 0; i
< count
; i
++)
1816 if ((mask
& (1u << i
)) == 0)
1818 inst
.error
= _("non-contiguous register range");
1828 /* True if two alias types are the same. */
1831 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1839 if (a
->defined
!= b
->defined
)
1842 if ((a
->defined
& NTA_HASTYPE
) != 0
1843 && (a
->eltype
.type
!= b
->eltype
.type
1844 || a
->eltype
.size
!= b
->eltype
.size
))
1847 if ((a
->defined
& NTA_HASINDEX
) != 0
1848 && (a
->index
!= b
->index
))
1854 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1855 The base register is put in *PBASE.
1856 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1858 The register stride (minus one) is put in bit 4 of the return value.
1859 Bits [6:5] encode the list length (minus one).
1860 The type of the list elements is put in *ELTYPE, if non-NULL. */
1862 #define NEON_LANE(X) ((X) & 0xf)
1863 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1864 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1867 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1868 struct neon_type_el
*eltype
)
1875 int leading_brace
= 0;
1876 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1878 const char *const incr_error
= _("register stride must be 1 or 2");
1879 const char *const type_error
= _("mismatched element/structure types in list");
1880 struct neon_typed_alias firsttype
;
1882 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1887 struct neon_typed_alias atype
;
1888 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1892 first_error (_(reg_expected_msgs
[rtype
]));
1899 if (rtype
== REG_TYPE_NQ
)
1906 else if (reg_incr
== -1)
1908 reg_incr
= getreg
- base_reg
;
1909 if (reg_incr
< 1 || reg_incr
> 2)
1911 first_error (_(incr_error
));
1915 else if (getreg
!= base_reg
+ reg_incr
* count
)
1917 first_error (_(incr_error
));
1921 if (! neon_alias_types_same (&atype
, &firsttype
))
1923 first_error (_(type_error
));
1927 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1931 struct neon_typed_alias htype
;
1932 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1934 lane
= NEON_INTERLEAVE_LANES
;
1935 else if (lane
!= NEON_INTERLEAVE_LANES
)
1937 first_error (_(type_error
));
1942 else if (reg_incr
!= 1)
1944 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1948 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1951 first_error (_(reg_expected_msgs
[rtype
]));
1954 if (! neon_alias_types_same (&htype
, &firsttype
))
1956 first_error (_(type_error
));
1959 count
+= hireg
+ dregs
- getreg
;
1963 /* If we're using Q registers, we can't use [] or [n] syntax. */
1964 if (rtype
== REG_TYPE_NQ
)
1970 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1974 else if (lane
!= atype
.index
)
1976 first_error (_(type_error
));
1980 else if (lane
== -1)
1981 lane
= NEON_INTERLEAVE_LANES
;
1982 else if (lane
!= NEON_INTERLEAVE_LANES
)
1984 first_error (_(type_error
));
1989 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1991 /* No lane set by [x]. We must be interleaving structures. */
1993 lane
= NEON_INTERLEAVE_LANES
;
1996 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1997 || (count
> 1 && reg_incr
== -1))
1999 first_error (_("error parsing element/structure list"));
2003 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2005 first_error (_("expected }"));
2013 *eltype
= firsttype
.eltype
;
2018 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2021 /* Parse an explicit relocation suffix on an expression. This is
2022 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2023 arm_reloc_hsh contains no entries, so this function can only
2024 succeed if there is no () after the word. Returns -1 on error,
2025 BFD_RELOC_UNUSED if there wasn't any suffix. */
2027 parse_reloc (char **str
)
2029 struct reloc_entry
*r
;
2033 return BFD_RELOC_UNUSED
;
2038 while (*q
&& *q
!= ')' && *q
!= ',')
2043 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2050 /* Directives: register aliases. */
2052 static struct reg_entry
*
2053 insert_reg_alias (char *str
, int number
, int type
)
2055 struct reg_entry
*new;
2058 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
2061 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2063 /* Only warn about a redefinition if it's not defined as the
2065 else if (new->number
!= number
|| new->type
!= type
)
2066 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2071 name
= xstrdup (str
);
2072 new = xmalloc (sizeof (struct reg_entry
));
2075 new->number
= number
;
2077 new->builtin
= FALSE
;
2080 if (hash_insert (arm_reg_hsh
, name
, (void *) new))
2087 insert_neon_reg_alias (char *str
, int number
, int type
,
2088 struct neon_typed_alias
*atype
)
2090 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2094 first_error (_("attempt to redefine typed alias"));
2100 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2101 *reg
->neon
= *atype
;
2105 /* Look for the .req directive. This is of the form:
2107 new_register_name .req existing_register_name
2109 If we find one, or if it looks sufficiently like one that we want to
2110 handle any error here, return TRUE. Otherwise return FALSE. */
2113 create_register_alias (char * newname
, char *p
)
2115 struct reg_entry
*old
;
2116 char *oldname
, *nbuf
;
2119 /* The input scrubber ensures that whitespace after the mnemonic is
2120 collapsed to single spaces. */
2122 if (strncmp (oldname
, " .req ", 6) != 0)
2126 if (*oldname
== '\0')
2129 old
= hash_find (arm_reg_hsh
, oldname
);
2132 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2136 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2137 the desired alias name, and p points to its end. If not, then
2138 the desired alias name is in the global original_case_string. */
2139 #ifdef TC_CASE_SENSITIVE
2142 newname
= original_case_string
;
2143 nlen
= strlen (newname
);
2146 nbuf
= alloca (nlen
+ 1);
2147 memcpy (nbuf
, newname
, nlen
);
2150 /* Create aliases under the new name as stated; an all-lowercase
2151 version of the new name; and an all-uppercase version of the new
2153 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2155 for (p
= nbuf
; *p
; p
++)
2158 if (strncmp (nbuf
, newname
, nlen
))
2160 /* If this attempt to create an additional alias fails, do not bother
2161 trying to create the all-lower case alias. We will fail and issue
2162 a second, duplicate error message. This situation arises when the
2163 programmer does something like:
2166 The second .req creates the "Foo" alias but then fails to create
2167 the artificial FOO alias because it has already been created by the
2169 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2173 for (p
= nbuf
; *p
; p
++)
2176 if (strncmp (nbuf
, newname
, nlen
))
2177 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2183 /* Create a Neon typed/indexed register alias using directives, e.g.:
2188 These typed registers can be used instead of the types specified after the
2189 Neon mnemonic, so long as all operands given have types. Types can also be
2190 specified directly, e.g.:
2191 vadd d0.s32, d1.s32, d2.s32 */
2194 create_neon_reg_alias (char *newname
, char *p
)
2196 enum arm_reg_type basetype
;
2197 struct reg_entry
*basereg
;
2198 struct reg_entry mybasereg
;
2199 struct neon_type ntype
;
2200 struct neon_typed_alias typeinfo
;
2201 char *namebuf
, *nameend
;
2204 typeinfo
.defined
= 0;
2205 typeinfo
.eltype
.type
= NT_invtype
;
2206 typeinfo
.eltype
.size
= -1;
2207 typeinfo
.index
= -1;
2211 if (strncmp (p
, " .dn ", 5) == 0)
2212 basetype
= REG_TYPE_VFD
;
2213 else if (strncmp (p
, " .qn ", 5) == 0)
2214 basetype
= REG_TYPE_NQ
;
2223 basereg
= arm_reg_parse_multi (&p
);
2225 if (basereg
&& basereg
->type
!= basetype
)
2227 as_bad (_("bad type for register"));
2231 if (basereg
== NULL
)
2234 /* Try parsing as an integer. */
2235 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2236 if (exp
.X_op
!= O_constant
)
2238 as_bad (_("expression must be constant"));
2241 basereg
= &mybasereg
;
2242 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2248 typeinfo
= *basereg
->neon
;
2250 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2252 /* We got a type. */
2253 if (typeinfo
.defined
& NTA_HASTYPE
)
2255 as_bad (_("can't redefine the type of a register alias"));
2259 typeinfo
.defined
|= NTA_HASTYPE
;
2260 if (ntype
.elems
!= 1)
2262 as_bad (_("you must specify a single type only"));
2265 typeinfo
.eltype
= ntype
.el
[0];
2268 if (skip_past_char (&p
, '[') == SUCCESS
)
2271 /* We got a scalar index. */
2273 if (typeinfo
.defined
& NTA_HASINDEX
)
2275 as_bad (_("can't redefine the index of a scalar alias"));
2279 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2281 if (exp
.X_op
!= O_constant
)
2283 as_bad (_("scalar index must be constant"));
2287 typeinfo
.defined
|= NTA_HASINDEX
;
2288 typeinfo
.index
= exp
.X_add_number
;
2290 if (skip_past_char (&p
, ']') == FAIL
)
2292 as_bad (_("expecting ]"));
2297 namelen
= nameend
- newname
;
2298 namebuf
= alloca (namelen
+ 1);
2299 strncpy (namebuf
, newname
, namelen
);
2300 namebuf
[namelen
] = '\0';
2302 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2303 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2305 /* Insert name in all uppercase. */
2306 for (p
= namebuf
; *p
; p
++)
2309 if (strncmp (namebuf
, newname
, namelen
))
2310 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2311 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2313 /* Insert name in all lowercase. */
2314 for (p
= namebuf
; *p
; p
++)
2317 if (strncmp (namebuf
, newname
, namelen
))
2318 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2319 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2324 /* Should never be called, as .req goes between the alias and the
2325 register name, not at the beginning of the line. */
2328 s_req (int a ATTRIBUTE_UNUSED
)
2330 as_bad (_("invalid syntax for .req directive"));
2334 s_dn (int a ATTRIBUTE_UNUSED
)
2336 as_bad (_("invalid syntax for .dn directive"));
2340 s_qn (int a ATTRIBUTE_UNUSED
)
2342 as_bad (_("invalid syntax for .qn directive"));
2345 /* The .unreq directive deletes an alias which was previously defined
2346 by .req. For example:
2352 s_unreq (int a ATTRIBUTE_UNUSED
)
2357 name
= input_line_pointer
;
2359 while (*input_line_pointer
!= 0
2360 && *input_line_pointer
!= ' '
2361 && *input_line_pointer
!= '\n')
2362 ++input_line_pointer
;
2364 saved_char
= *input_line_pointer
;
2365 *input_line_pointer
= 0;
2368 as_bad (_("invalid syntax for .unreq directive"));
2371 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2374 as_bad (_("unknown register alias '%s'"), name
);
2375 else if (reg
->builtin
)
2376 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2383 hash_delete (arm_reg_hsh
, name
, FALSE
);
2384 free ((char *) reg
->name
);
2389 /* Also locate the all upper case and all lower case versions.
2390 Do not complain if we cannot find one or the other as it
2391 was probably deleted above. */
2393 nbuf
= strdup (name
);
2394 for (p
= nbuf
; *p
; p
++)
2396 reg
= hash_find (arm_reg_hsh
, nbuf
);
2399 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2400 free ((char *) reg
->name
);
2406 for (p
= nbuf
; *p
; p
++)
2408 reg
= hash_find (arm_reg_hsh
, nbuf
);
2411 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2412 free ((char *) reg
->name
);
2422 *input_line_pointer
= saved_char
;
2423 demand_empty_rest_of_line ();
2426 /* Directives: Instruction set selection. */
2429 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2430 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2431 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2432 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2434 static enum mstate mapstate
= MAP_UNDEFINED
;
2436 /* Create a new mapping symbol for the transition to STATE. */
2439 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2442 const char * symname
;
2449 type
= BSF_NO_FLAGS
;
2453 type
= BSF_NO_FLAGS
;
2457 type
= BSF_NO_FLAGS
;
2463 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2464 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2469 THUMB_SET_FUNC (symbolP
, 0);
2470 ARM_SET_THUMB (symbolP
, 0);
2471 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2475 THUMB_SET_FUNC (symbolP
, 1);
2476 ARM_SET_THUMB (symbolP
, 1);
2477 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2485 /* Save the mapping symbols for future reference. Also check that
2486 we do not place two mapping symbols at the same offset within a
2487 frag. We'll handle overlap between frags in
2488 check_mapping_symbols. */
2491 know (frag
->tc_frag_data
.first_map
== NULL
);
2492 frag
->tc_frag_data
.first_map
= symbolP
;
2494 if (frag
->tc_frag_data
.last_map
!= NULL
)
2495 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) < S_GET_VALUE (symbolP
));
2496 frag
->tc_frag_data
.last_map
= symbolP
;
2499 /* We must sometimes convert a region marked as code to data during
2500 code alignment, if an odd number of bytes have to be padded. The
2501 code mapping symbol is pushed to an aligned address. */
2504 insert_data_mapping_symbol (enum mstate state
,
2505 valueT value
, fragS
*frag
, offsetT bytes
)
2507 /* If there was already a mapping symbol, remove it. */
2508 if (frag
->tc_frag_data
.last_map
!= NULL
2509 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2511 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2515 know (frag
->tc_frag_data
.first_map
== symp
);
2516 frag
->tc_frag_data
.first_map
= NULL
;
2518 frag
->tc_frag_data
.last_map
= NULL
;
2519 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2522 make_mapping_symbol (MAP_DATA
, value
, frag
);
2523 make_mapping_symbol (state
, value
+ bytes
, frag
);
2526 static void mapping_state_2 (enum mstate state
, int max_chars
);
2528 /* Set the mapping state to STATE. Only call this when about to
2529 emit some STATE bytes to the file. */
2532 mapping_state (enum mstate state
)
2534 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2536 if (mapstate
== state
)
2537 /* The mapping symbol has already been emitted.
2538 There is nothing else to do. */
2540 else if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2541 /* This case will be evaluated later in the next else. */
2543 else if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2544 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2546 /* Only add the symbol if the offset is > 0:
2547 if we're at the first frag, check it's size > 0;
2548 if we're not at the first frag, then for sure
2549 the offset is > 0. */
2550 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2551 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2554 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2557 mapping_state_2 (state
, 0);
2561 /* Same as mapping_state, but MAX_CHARS bytes have already been
2562 allocated. Put the mapping symbol that far back. */
2565 mapping_state_2 (enum mstate state
, int max_chars
)
2567 if (mapstate
== state
)
2568 /* The mapping symbol has already been emitted.
2569 There is nothing else to do. */
2573 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2574 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2577 #define mapping_state(x) /* nothing */
2578 #define mapping_state_2(x, y) /* nothing */
2581 /* Find the real, Thumb encoded start of a Thumb function. */
2585 find_real_start (symbolS
* symbolP
)
2588 const char * name
= S_GET_NAME (symbolP
);
2589 symbolS
* new_target
;
2591 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2592 #define STUB_NAME ".real_start_of"
2597 /* The compiler may generate BL instructions to local labels because
2598 it needs to perform a branch to a far away location. These labels
2599 do not have a corresponding ".real_start_of" label. We check
2600 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2601 the ".real_start_of" convention for nonlocal branches. */
2602 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2605 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2606 new_target
= symbol_find (real_start
);
2608 if (new_target
== NULL
)
2610 as_warn (_("Failed to find real start of function: %s\n"), name
);
2611 new_target
= symbolP
;
2619 opcode_select (int width
)
2626 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2627 as_bad (_("selected processor does not support THUMB opcodes"));
2630 /* No need to force the alignment, since we will have been
2631 coming from ARM mode, which is word-aligned. */
2632 record_alignment (now_seg
, 1);
2639 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2640 as_bad (_("selected processor does not support ARM opcodes"));
2645 frag_align (2, 0, 0);
2647 record_alignment (now_seg
, 1);
2652 as_bad (_("invalid instruction size selected (%d)"), width
);
2657 s_arm (int ignore ATTRIBUTE_UNUSED
)
2660 demand_empty_rest_of_line ();
2664 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2667 demand_empty_rest_of_line ();
2671 s_code (int unused ATTRIBUTE_UNUSED
)
2675 temp
= get_absolute_expression ();
2680 opcode_select (temp
);
2684 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2689 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2691 /* If we are not already in thumb mode go into it, EVEN if
2692 the target processor does not support thumb instructions.
2693 This is used by gcc/config/arm/lib1funcs.asm for example
2694 to compile interworking support functions even if the
2695 target processor should not support interworking. */
2699 record_alignment (now_seg
, 1);
2702 demand_empty_rest_of_line ();
2706 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2710 /* The following label is the name/address of the start of a Thumb function.
2711 We need to know this for the interworking support. */
2712 label_is_thumb_function_name
= TRUE
;
2715 /* Perform a .set directive, but also mark the alias as
2716 being a thumb function. */
2719 s_thumb_set (int equiv
)
2721 /* XXX the following is a duplicate of the code for s_set() in read.c
2722 We cannot just call that code as we need to get at the symbol that
2729 /* Especial apologies for the random logic:
2730 This just grew, and could be parsed much more simply!
2732 name
= input_line_pointer
;
2733 delim
= get_symbol_end ();
2734 end_name
= input_line_pointer
;
2737 if (*input_line_pointer
!= ',')
2740 as_bad (_("expected comma after name \"%s\""), name
);
2742 ignore_rest_of_line ();
2746 input_line_pointer
++;
2749 if (name
[0] == '.' && name
[1] == '\0')
2751 /* XXX - this should not happen to .thumb_set. */
2755 if ((symbolP
= symbol_find (name
)) == NULL
2756 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2759 /* When doing symbol listings, play games with dummy fragments living
2760 outside the normal fragment chain to record the file and line info
2762 if (listing
& LISTING_SYMBOLS
)
2764 extern struct list_info_struct
* listing_tail
;
2765 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2767 memset (dummy_frag
, 0, sizeof (fragS
));
2768 dummy_frag
->fr_type
= rs_fill
;
2769 dummy_frag
->line
= listing_tail
;
2770 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2771 dummy_frag
->fr_symbol
= symbolP
;
2775 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2778 /* "set" symbols are local unless otherwise specified. */
2779 SF_SET_LOCAL (symbolP
);
2780 #endif /* OBJ_COFF */
2781 } /* Make a new symbol. */
2783 symbol_table_insert (symbolP
);
2788 && S_IS_DEFINED (symbolP
)
2789 && S_GET_SEGMENT (symbolP
) != reg_section
)
2790 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2792 pseudo_set (symbolP
);
2794 demand_empty_rest_of_line ();
2796 /* XXX Now we come to the Thumb specific bit of code. */
2798 THUMB_SET_FUNC (symbolP
, 1);
2799 ARM_SET_THUMB (symbolP
, 1);
2800 #if defined OBJ_ELF || defined OBJ_COFF
2801 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2805 /* Directives: Mode selection. */
2807 /* .syntax [unified|divided] - choose the new unified syntax
2808 (same for Arm and Thumb encoding, modulo slight differences in what
2809 can be represented) or the old divergent syntax for each mode. */
2811 s_syntax (int unused ATTRIBUTE_UNUSED
)
2815 name
= input_line_pointer
;
2816 delim
= get_symbol_end ();
2818 if (!strcasecmp (name
, "unified"))
2819 unified_syntax
= TRUE
;
2820 else if (!strcasecmp (name
, "divided"))
2821 unified_syntax
= FALSE
;
2824 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2827 *input_line_pointer
= delim
;
2828 demand_empty_rest_of_line ();
2831 /* Directives: sectioning and alignment. */
2833 /* Same as s_align_ptwo but align 0 => align 2. */
2836 s_align (int unused ATTRIBUTE_UNUSED
)
2841 long max_alignment
= 15;
2843 temp
= get_absolute_expression ();
2844 if (temp
> max_alignment
)
2845 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2848 as_bad (_("alignment negative. 0 assumed."));
2852 if (*input_line_pointer
== ',')
2854 input_line_pointer
++;
2855 temp_fill
= get_absolute_expression ();
2867 /* Only make a frag if we HAVE to. */
2868 if (temp
&& !need_pass_2
)
2870 if (!fill_p
&& subseg_text_p (now_seg
))
2871 frag_align_code (temp
, 0);
2873 frag_align (temp
, (int) temp_fill
, 0);
2875 demand_empty_rest_of_line ();
2877 record_alignment (now_seg
, temp
);
2881 s_bss (int ignore ATTRIBUTE_UNUSED
)
2883 /* We don't support putting frags in the BSS segment, we fake it by
2884 marking in_bss, then looking at s_skip for clues. */
2885 subseg_set (bss_section
, 0);
2886 demand_empty_rest_of_line ();
2888 #ifdef md_elf_section_change_hook
2889 md_elf_section_change_hook ();
2894 s_even (int ignore ATTRIBUTE_UNUSED
)
2896 /* Never make frag if expect extra pass. */
2898 frag_align (1, 0, 0);
2900 record_alignment (now_seg
, 1);
2902 demand_empty_rest_of_line ();
2905 /* Directives: Literal pools. */
2907 static literal_pool
*
2908 find_literal_pool (void)
2910 literal_pool
* pool
;
2912 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2914 if (pool
->section
== now_seg
2915 && pool
->sub_section
== now_subseg
)
2922 static literal_pool
*
2923 find_or_make_literal_pool (void)
2925 /* Next literal pool ID number. */
2926 static unsigned int latest_pool_num
= 1;
2927 literal_pool
* pool
;
2929 pool
= find_literal_pool ();
2933 /* Create a new pool. */
2934 pool
= xmalloc (sizeof (* pool
));
2938 pool
->next_free_entry
= 0;
2939 pool
->section
= now_seg
;
2940 pool
->sub_section
= now_subseg
;
2941 pool
->next
= list_of_pools
;
2942 pool
->symbol
= NULL
;
2944 /* Add it to the list. */
2945 list_of_pools
= pool
;
2948 /* New pools, and emptied pools, will have a NULL symbol. */
2949 if (pool
->symbol
== NULL
)
2951 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2952 (valueT
) 0, &zero_address_frag
);
2953 pool
->id
= latest_pool_num
++;
2960 /* Add the literal in the global 'inst'
2961 structure to the relevant literal pool. */
2964 add_to_lit_pool (void)
2966 literal_pool
* pool
;
2969 pool
= find_or_make_literal_pool ();
2971 /* Check if this literal value is already in the pool. */
2972 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2974 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2975 && (inst
.reloc
.exp
.X_op
== O_constant
)
2976 && (pool
->literals
[entry
].X_add_number
2977 == inst
.reloc
.exp
.X_add_number
)
2978 && (pool
->literals
[entry
].X_unsigned
2979 == inst
.reloc
.exp
.X_unsigned
))
2982 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2983 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2984 && (pool
->literals
[entry
].X_add_number
2985 == inst
.reloc
.exp
.X_add_number
)
2986 && (pool
->literals
[entry
].X_add_symbol
2987 == inst
.reloc
.exp
.X_add_symbol
)
2988 && (pool
->literals
[entry
].X_op_symbol
2989 == inst
.reloc
.exp
.X_op_symbol
))
2993 /* Do we need to create a new entry? */
2994 if (entry
== pool
->next_free_entry
)
2996 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2998 inst
.error
= _("literal pool overflow");
3002 pool
->literals
[entry
] = inst
.reloc
.exp
;
3003 pool
->next_free_entry
+= 1;
3006 inst
.reloc
.exp
.X_op
= O_symbol
;
3007 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
3008 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3013 /* Can't use symbol_new here, so have to create a symbol and then at
3014 a later date assign it a value. Thats what these functions do. */
3017 symbol_locate (symbolS
* symbolP
,
3018 const char * name
, /* It is copied, the caller can modify. */
3019 segT segment
, /* Segment identifier (SEG_<something>). */
3020 valueT valu
, /* Symbol value. */
3021 fragS
* frag
) /* Associated fragment. */
3023 unsigned int name_length
;
3024 char * preserved_copy_of_name
;
3026 name_length
= strlen (name
) + 1; /* +1 for \0. */
3027 obstack_grow (¬es
, name
, name_length
);
3028 preserved_copy_of_name
= obstack_finish (¬es
);
3030 #ifdef tc_canonicalize_symbol_name
3031 preserved_copy_of_name
=
3032 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3035 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3037 S_SET_SEGMENT (symbolP
, segment
);
3038 S_SET_VALUE (symbolP
, valu
);
3039 symbol_clear_list_pointers (symbolP
);
3041 symbol_set_frag (symbolP
, frag
);
3043 /* Link to end of symbol chain. */
3045 extern int symbol_table_frozen
;
3047 if (symbol_table_frozen
)
3051 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3053 obj_symbol_new_hook (symbolP
);
3055 #ifdef tc_symbol_new_hook
3056 tc_symbol_new_hook (symbolP
);
3060 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3061 #endif /* DEBUG_SYMS */
3066 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3069 literal_pool
* pool
;
3072 pool
= find_literal_pool ();
3074 || pool
->symbol
== NULL
3075 || pool
->next_free_entry
== 0)
3078 mapping_state (MAP_DATA
);
3080 /* Align pool as you have word accesses.
3081 Only make a frag if we have to. */
3083 frag_align (2, 0, 0);
3085 record_alignment (now_seg
, 2);
3087 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3089 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3090 (valueT
) frag_now_fix (), frag_now
);
3091 symbol_table_insert (pool
->symbol
);
3093 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3095 #if defined OBJ_COFF || defined OBJ_ELF
3096 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3099 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3100 /* First output the expression in the instruction to the pool. */
3101 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
3103 /* Mark the pool as empty. */
3104 pool
->next_free_entry
= 0;
3105 pool
->symbol
= NULL
;
3109 /* Forward declarations for functions below, in the MD interface
3111 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3112 static valueT
create_unwind_entry (int);
3113 static void start_unwind_section (const segT
, int);
3114 static void add_unwind_opcode (valueT
, int);
3115 static void flush_pending_unwind (void);
3117 /* Directives: Data. */
3120 s_arm_elf_cons (int nbytes
)
3124 #ifdef md_flush_pending_output
3125 md_flush_pending_output ();
3128 if (is_it_end_of_statement ())
3130 demand_empty_rest_of_line ();
3134 #ifdef md_cons_align
3135 md_cons_align (nbytes
);
3138 mapping_state (MAP_DATA
);
3142 char *base
= input_line_pointer
;
3146 if (exp
.X_op
!= O_symbol
)
3147 emit_expr (&exp
, (unsigned int) nbytes
);
3150 char *before_reloc
= input_line_pointer
;
3151 reloc
= parse_reloc (&input_line_pointer
);
3154 as_bad (_("unrecognized relocation suffix"));
3155 ignore_rest_of_line ();
3158 else if (reloc
== BFD_RELOC_UNUSED
)
3159 emit_expr (&exp
, (unsigned int) nbytes
);
3162 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
3163 int size
= bfd_get_reloc_size (howto
);
3165 if (reloc
== BFD_RELOC_ARM_PLT32
)
3167 as_bad (_("(plt) is only valid on branch targets"));
3168 reloc
= BFD_RELOC_UNUSED
;
3173 as_bad (_("%s relocations do not fit in %d bytes"),
3174 howto
->name
, nbytes
);
3177 /* We've parsed an expression stopping at O_symbol.
3178 But there may be more expression left now that we
3179 have parsed the relocation marker. Parse it again.
3180 XXX Surely there is a cleaner way to do this. */
3181 char *p
= input_line_pointer
;
3183 char *save_buf
= alloca (input_line_pointer
- base
);
3184 memcpy (save_buf
, base
, input_line_pointer
- base
);
3185 memmove (base
+ (input_line_pointer
- before_reloc
),
3186 base
, before_reloc
- base
);
3188 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3190 memcpy (base
, save_buf
, p
- base
);
3192 offset
= nbytes
- size
;
3193 p
= frag_more ((int) nbytes
);
3194 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3195 size
, &exp
, 0, reloc
);
3200 while (*input_line_pointer
++ == ',');
3202 /* Put terminator back into stream. */
3203 input_line_pointer
--;
3204 demand_empty_rest_of_line ();
3207 /* Emit an expression containing a 32-bit thumb instruction.
3208 Implementation based on put_thumb32_insn. */
3211 emit_thumb32_expr (expressionS
* exp
)
3213 expressionS exp_high
= *exp
;
3215 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3216 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3217 exp
->X_add_number
&= 0xffff;
3218 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3221 /* Guess the instruction size based on the opcode. */
3224 thumb_insn_size (int opcode
)
3226 if ((unsigned int) opcode
< 0xe800u
)
3228 else if ((unsigned int) opcode
>= 0xe8000000u
)
3235 emit_insn (expressionS
*exp
, int nbytes
)
3239 if (exp
->X_op
== O_constant
)
3244 size
= thumb_insn_size (exp
->X_add_number
);
3248 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3250 as_bad (_(".inst.n operand too big. "\
3251 "Use .inst.w instead"));
3256 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3257 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3259 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3261 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3262 emit_thumb32_expr (exp
);
3264 emit_expr (exp
, (unsigned int) size
);
3266 it_fsm_post_encode ();
3270 as_bad (_("cannot determine Thumb instruction size. " \
3271 "Use .inst.n/.inst.w instead"));
3274 as_bad (_("constant expression required"));
3279 /* Like s_arm_elf_cons but do not use md_cons_align and
3280 set the mapping state to MAP_ARM/MAP_THUMB. */
3283 s_arm_elf_inst (int nbytes
)
3285 if (is_it_end_of_statement ())
3287 demand_empty_rest_of_line ();
3291 /* Calling mapping_state () here will not change ARM/THUMB,
3292 but will ensure not to be in DATA state. */
3295 mapping_state (MAP_THUMB
);
3300 as_bad (_("width suffixes are invalid in ARM mode"));
3301 ignore_rest_of_line ();
3307 mapping_state (MAP_ARM
);
3316 if (! emit_insn (& exp
, nbytes
))
3318 ignore_rest_of_line ();
3322 while (*input_line_pointer
++ == ',');
3324 /* Put terminator back into stream. */
3325 input_line_pointer
--;
3326 demand_empty_rest_of_line ();
3329 /* Parse a .rel31 directive. */
3332 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3339 if (*input_line_pointer
== '1')
3340 highbit
= 0x80000000;
3341 else if (*input_line_pointer
!= '0')
3342 as_bad (_("expected 0 or 1"));
3344 input_line_pointer
++;
3345 if (*input_line_pointer
!= ',')
3346 as_bad (_("missing comma"));
3347 input_line_pointer
++;
3349 #ifdef md_flush_pending_output
3350 md_flush_pending_output ();
3353 #ifdef md_cons_align
3357 mapping_state (MAP_DATA
);
3362 md_number_to_chars (p
, highbit
, 4);
3363 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3364 BFD_RELOC_ARM_PREL31
);
3366 demand_empty_rest_of_line ();
3369 /* Directives: AEABI stack-unwind tables. */
3371 /* Parse an unwind_fnstart directive. Simply records the current location. */
3374 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3376 demand_empty_rest_of_line ();
3377 if (unwind
.proc_start
)
3379 as_bad (_("duplicate .fnstart directive"));
3383 /* Mark the start of the function. */
3384 unwind
.proc_start
= expr_build_dot ();
3386 /* Reset the rest of the unwind info. */
3387 unwind
.opcode_count
= 0;
3388 unwind
.table_entry
= NULL
;
3389 unwind
.personality_routine
= NULL
;
3390 unwind
.personality_index
= -1;
3391 unwind
.frame_size
= 0;
3392 unwind
.fp_offset
= 0;
3393 unwind
.fp_reg
= REG_SP
;
3395 unwind
.sp_restored
= 0;
3399 /* Parse a handlerdata directive. Creates the exception handling table entry
3400 for the function. */
3403 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3405 demand_empty_rest_of_line ();
3406 if (!unwind
.proc_start
)
3407 as_bad (MISSING_FNSTART
);
3409 if (unwind
.table_entry
)
3410 as_bad (_("duplicate .handlerdata directive"));
3412 create_unwind_entry (1);
3415 /* Parse an unwind_fnend directive. Generates the index table entry. */
3418 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3424 demand_empty_rest_of_line ();
3426 if (!unwind
.proc_start
)
3428 as_bad (_(".fnend directive without .fnstart"));
3432 /* Add eh table entry. */
3433 if (unwind
.table_entry
== NULL
)
3434 val
= create_unwind_entry (0);
3438 /* Add index table entry. This is two words. */
3439 start_unwind_section (unwind
.saved_seg
, 1);
3440 frag_align (2, 0, 0);
3441 record_alignment (now_seg
, 2);
3443 ptr
= frag_more (8);
3444 where
= frag_now_fix () - 8;
3446 /* Self relative offset of the function start. */
3447 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3448 BFD_RELOC_ARM_PREL31
);
3450 /* Indicate dependency on EHABI-defined personality routines to the
3451 linker, if it hasn't been done already. */
3452 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3453 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3455 static const char *const name
[] =
3457 "__aeabi_unwind_cpp_pr0",
3458 "__aeabi_unwind_cpp_pr1",
3459 "__aeabi_unwind_cpp_pr2"
3461 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3462 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3463 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3464 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3465 = marked_pr_dependency
;
3469 /* Inline exception table entry. */
3470 md_number_to_chars (ptr
+ 4, val
, 4);
3472 /* Self relative offset of the table entry. */
3473 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3474 BFD_RELOC_ARM_PREL31
);
3476 /* Restore the original section. */
3477 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3479 unwind
.proc_start
= NULL
;
3483 /* Parse an unwind_cantunwind directive. */
3486 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3488 demand_empty_rest_of_line ();
3489 if (!unwind
.proc_start
)
3490 as_bad (MISSING_FNSTART
);
3492 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3493 as_bad (_("personality routine specified for cantunwind frame"));
3495 unwind
.personality_index
= -2;
3499 /* Parse a personalityindex directive. */
3502 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3506 if (!unwind
.proc_start
)
3507 as_bad (MISSING_FNSTART
);
3509 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3510 as_bad (_("duplicate .personalityindex directive"));
3514 if (exp
.X_op
!= O_constant
3515 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3517 as_bad (_("bad personality routine number"));
3518 ignore_rest_of_line ();
3522 unwind
.personality_index
= exp
.X_add_number
;
3524 demand_empty_rest_of_line ();
3528 /* Parse a personality directive. */
3531 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3535 if (!unwind
.proc_start
)
3536 as_bad (MISSING_FNSTART
);
3538 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3539 as_bad (_("duplicate .personality directive"));
3541 name
= input_line_pointer
;
3542 c
= get_symbol_end ();
3543 p
= input_line_pointer
;
3544 unwind
.personality_routine
= symbol_find_or_make (name
);
3546 demand_empty_rest_of_line ();
3550 /* Parse a directive saving core registers. */
3553 s_arm_unwind_save_core (void)
3559 range
= parse_reg_list (&input_line_pointer
);
3562 as_bad (_("expected register list"));
3563 ignore_rest_of_line ();
3567 demand_empty_rest_of_line ();
3569 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3570 into .unwind_save {..., sp...}. We aren't bothered about the value of
3571 ip because it is clobbered by calls. */
3572 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3573 && (range
& 0x3000) == 0x1000)
3575 unwind
.opcode_count
--;
3576 unwind
.sp_restored
= 0;
3577 range
= (range
| 0x2000) & ~0x1000;
3578 unwind
.pending_offset
= 0;
3584 /* See if we can use the short opcodes. These pop a block of up to 8
3585 registers starting with r4, plus maybe r14. */
3586 for (n
= 0; n
< 8; n
++)
3588 /* Break at the first non-saved register. */
3589 if ((range
& (1 << (n
+ 4))) == 0)
3592 /* See if there are any other bits set. */
3593 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3595 /* Use the long form. */
3596 op
= 0x8000 | ((range
>> 4) & 0xfff);
3597 add_unwind_opcode (op
, 2);
3601 /* Use the short form. */
3603 op
= 0xa8; /* Pop r14. */
3605 op
= 0xa0; /* Do not pop r14. */
3607 add_unwind_opcode (op
, 1);
3614 op
= 0xb100 | (range
& 0xf);
3615 add_unwind_opcode (op
, 2);
3618 /* Record the number of bytes pushed. */
3619 for (n
= 0; n
< 16; n
++)
3621 if (range
& (1 << n
))
3622 unwind
.frame_size
+= 4;
3627 /* Parse a directive saving FPA registers. */
3630 s_arm_unwind_save_fpa (int reg
)
3636 /* Get Number of registers to transfer. */
3637 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3640 exp
.X_op
= O_illegal
;
3642 if (exp
.X_op
!= O_constant
)
3644 as_bad (_("expected , <constant>"));
3645 ignore_rest_of_line ();
3649 num_regs
= exp
.X_add_number
;
3651 if (num_regs
< 1 || num_regs
> 4)
3653 as_bad (_("number of registers must be in the range [1:4]"));
3654 ignore_rest_of_line ();
3658 demand_empty_rest_of_line ();
3663 op
= 0xb4 | (num_regs
- 1);
3664 add_unwind_opcode (op
, 1);
3669 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3670 add_unwind_opcode (op
, 2);
3672 unwind
.frame_size
+= num_regs
* 12;
3676 /* Parse a directive saving VFP registers for ARMv6 and above. */
3679 s_arm_unwind_save_vfp_armv6 (void)
3684 int num_vfpv3_regs
= 0;
3685 int num_regs_below_16
;
3687 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3690 as_bad (_("expected register list"));
3691 ignore_rest_of_line ();
3695 demand_empty_rest_of_line ();
3697 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3698 than FSTMX/FLDMX-style ones). */
3700 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3702 num_vfpv3_regs
= count
;
3703 else if (start
+ count
> 16)
3704 num_vfpv3_regs
= start
+ count
- 16;
3706 if (num_vfpv3_regs
> 0)
3708 int start_offset
= start
> 16 ? start
- 16 : 0;
3709 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3710 add_unwind_opcode (op
, 2);
3713 /* Generate opcode for registers numbered in the range 0 .. 15. */
3714 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3715 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3716 if (num_regs_below_16
> 0)
3718 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3719 add_unwind_opcode (op
, 2);
3722 unwind
.frame_size
+= count
* 8;
3726 /* Parse a directive saving VFP registers for pre-ARMv6. */
3729 s_arm_unwind_save_vfp (void)
3735 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3738 as_bad (_("expected register list"));
3739 ignore_rest_of_line ();
3743 demand_empty_rest_of_line ();
3748 op
= 0xb8 | (count
- 1);
3749 add_unwind_opcode (op
, 1);
3754 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3755 add_unwind_opcode (op
, 2);
3757 unwind
.frame_size
+= count
* 8 + 4;
3761 /* Parse a directive saving iWMMXt data registers. */
3764 s_arm_unwind_save_mmxwr (void)
3772 if (*input_line_pointer
== '{')
3773 input_line_pointer
++;
3777 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3781 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3786 as_tsktsk (_("register list not in ascending order"));
3789 if (*input_line_pointer
== '-')
3791 input_line_pointer
++;
3792 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3795 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3798 else if (reg
>= hi_reg
)
3800 as_bad (_("bad register range"));
3803 for (; reg
< hi_reg
; reg
++)
3807 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3809 if (*input_line_pointer
== '}')
3810 input_line_pointer
++;
3812 demand_empty_rest_of_line ();
3814 /* Generate any deferred opcodes because we're going to be looking at
3816 flush_pending_unwind ();
3818 for (i
= 0; i
< 16; i
++)
3820 if (mask
& (1 << i
))
3821 unwind
.frame_size
+= 8;
3824 /* Attempt to combine with a previous opcode. We do this because gcc
3825 likes to output separate unwind directives for a single block of
3827 if (unwind
.opcode_count
> 0)
3829 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3830 if ((i
& 0xf8) == 0xc0)
3833 /* Only merge if the blocks are contiguous. */
3836 if ((mask
& 0xfe00) == (1 << 9))
3838 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3839 unwind
.opcode_count
--;
3842 else if (i
== 6 && unwind
.opcode_count
>= 2)
3844 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3848 op
= 0xffff << (reg
- 1);
3850 && ((mask
& op
) == (1u << (reg
- 1))))
3852 op
= (1 << (reg
+ i
+ 1)) - 1;
3853 op
&= ~((1 << reg
) - 1);
3855 unwind
.opcode_count
-= 2;
3862 /* We want to generate opcodes in the order the registers have been
3863 saved, ie. descending order. */
3864 for (reg
= 15; reg
>= -1; reg
--)
3866 /* Save registers in blocks. */
3868 || !(mask
& (1 << reg
)))
3870 /* We found an unsaved reg. Generate opcodes to save the
3877 op
= 0xc0 | (hi_reg
- 10);
3878 add_unwind_opcode (op
, 1);
3883 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3884 add_unwind_opcode (op
, 2);
3893 ignore_rest_of_line ();
3897 s_arm_unwind_save_mmxwcg (void)
3904 if (*input_line_pointer
== '{')
3905 input_line_pointer
++;
3909 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3913 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3919 as_tsktsk (_("register list not in ascending order"));
3922 if (*input_line_pointer
== '-')
3924 input_line_pointer
++;
3925 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3928 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3931 else if (reg
>= hi_reg
)
3933 as_bad (_("bad register range"));
3936 for (; reg
< hi_reg
; reg
++)
3940 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3942 if (*input_line_pointer
== '}')
3943 input_line_pointer
++;
3945 demand_empty_rest_of_line ();
3947 /* Generate any deferred opcodes because we're going to be looking at
3949 flush_pending_unwind ();
3951 for (reg
= 0; reg
< 16; reg
++)
3953 if (mask
& (1 << reg
))
3954 unwind
.frame_size
+= 4;
3957 add_unwind_opcode (op
, 2);
3960 ignore_rest_of_line ();
3964 /* Parse an unwind_save directive.
3965 If the argument is non-zero, this is a .vsave directive. */
3968 s_arm_unwind_save (int arch_v6
)
3971 struct reg_entry
*reg
;
3972 bfd_boolean had_brace
= FALSE
;
3974 if (!unwind
.proc_start
)
3975 as_bad (MISSING_FNSTART
);
3977 /* Figure out what sort of save we have. */
3978 peek
= input_line_pointer
;
3986 reg
= arm_reg_parse_multi (&peek
);
3990 as_bad (_("register expected"));
3991 ignore_rest_of_line ();
4000 as_bad (_("FPA .unwind_save does not take a register list"));
4001 ignore_rest_of_line ();
4004 input_line_pointer
= peek
;
4005 s_arm_unwind_save_fpa (reg
->number
);
4008 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
4011 s_arm_unwind_save_vfp_armv6 ();
4013 s_arm_unwind_save_vfp ();
4015 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
4016 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
4019 as_bad (_(".unwind_save does not support this kind of register"));
4020 ignore_rest_of_line ();
4025 /* Parse an unwind_movsp directive. */
4028 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4034 if (!unwind
.proc_start
)
4035 as_bad (MISSING_FNSTART
);
4037 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4040 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4041 ignore_rest_of_line ();
4045 /* Optional constant. */
4046 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4048 if (immediate_for_directive (&offset
) == FAIL
)
4054 demand_empty_rest_of_line ();
4056 if (reg
== REG_SP
|| reg
== REG_PC
)
4058 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4062 if (unwind
.fp_reg
!= REG_SP
)
4063 as_bad (_("unexpected .unwind_movsp directive"));
4065 /* Generate opcode to restore the value. */
4067 add_unwind_opcode (op
, 1);
4069 /* Record the information for later. */
4070 unwind
.fp_reg
= reg
;
4071 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4072 unwind
.sp_restored
= 1;
4075 /* Parse an unwind_pad directive. */
4078 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4082 if (!unwind
.proc_start
)
4083 as_bad (MISSING_FNSTART
);
4085 if (immediate_for_directive (&offset
) == FAIL
)
4090 as_bad (_("stack increment must be multiple of 4"));
4091 ignore_rest_of_line ();
4095 /* Don't generate any opcodes, just record the details for later. */
4096 unwind
.frame_size
+= offset
;
4097 unwind
.pending_offset
+= offset
;
4099 demand_empty_rest_of_line ();
4102 /* Parse an unwind_setfp directive. */
4105 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4111 if (!unwind
.proc_start
)
4112 as_bad (MISSING_FNSTART
);
4114 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4115 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4118 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4120 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4122 as_bad (_("expected <reg>, <reg>"));
4123 ignore_rest_of_line ();
4127 /* Optional constant. */
4128 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4130 if (immediate_for_directive (&offset
) == FAIL
)
4136 demand_empty_rest_of_line ();
4138 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4140 as_bad (_("register must be either sp or set by a previous"
4141 "unwind_movsp directive"));
4145 /* Don't generate any opcodes, just record the information for later. */
4146 unwind
.fp_reg
= fp_reg
;
4148 if (sp_reg
== REG_SP
)
4149 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4151 unwind
.fp_offset
-= offset
;
4154 /* Parse an unwind_raw directive. */
4157 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4160 /* This is an arbitrary limit. */
4161 unsigned char op
[16];
4164 if (!unwind
.proc_start
)
4165 as_bad (MISSING_FNSTART
);
4168 if (exp
.X_op
== O_constant
4169 && skip_past_comma (&input_line_pointer
) != FAIL
)
4171 unwind
.frame_size
+= exp
.X_add_number
;
4175 exp
.X_op
= O_illegal
;
4177 if (exp
.X_op
!= O_constant
)
4179 as_bad (_("expected <offset>, <opcode>"));
4180 ignore_rest_of_line ();
4186 /* Parse the opcode. */
4191 as_bad (_("unwind opcode too long"));
4192 ignore_rest_of_line ();
4194 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4196 as_bad (_("invalid unwind opcode"));
4197 ignore_rest_of_line ();
4200 op
[count
++] = exp
.X_add_number
;
4202 /* Parse the next byte. */
4203 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4209 /* Add the opcode bytes in reverse order. */
4211 add_unwind_opcode (op
[count
], 1);
4213 demand_empty_rest_of_line ();
4217 /* Parse a .eabi_attribute directive. */
4220 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4222 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
4224 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4225 attributes_set_explicitly
[tag
] = 1;
4227 #endif /* OBJ_ELF */
4229 static void s_arm_arch (int);
4230 static void s_arm_object_arch (int);
4231 static void s_arm_cpu (int);
4232 static void s_arm_fpu (int);
4237 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4244 if (exp
.X_op
== O_symbol
)
4245 exp
.X_op
= O_secrel
;
4247 emit_expr (&exp
, 4);
4249 while (*input_line_pointer
++ == ',');
4251 input_line_pointer
--;
4252 demand_empty_rest_of_line ();
4256 /* This table describes all the machine specific pseudo-ops the assembler
4257 has to support. The fields are:
4258 pseudo-op name without dot
4259 function to call to execute this pseudo-op
4260 Integer arg to pass to the function. */
4262 const pseudo_typeS md_pseudo_table
[] =
4264 /* Never called because '.req' does not start a line. */
4265 { "req", s_req
, 0 },
4266 /* Following two are likewise never called. */
4269 { "unreq", s_unreq
, 0 },
4270 { "bss", s_bss
, 0 },
4271 { "align", s_align
, 0 },
4272 { "arm", s_arm
, 0 },
4273 { "thumb", s_thumb
, 0 },
4274 { "code", s_code
, 0 },
4275 { "force_thumb", s_force_thumb
, 0 },
4276 { "thumb_func", s_thumb_func
, 0 },
4277 { "thumb_set", s_thumb_set
, 0 },
4278 { "even", s_even
, 0 },
4279 { "ltorg", s_ltorg
, 0 },
4280 { "pool", s_ltorg
, 0 },
4281 { "syntax", s_syntax
, 0 },
4282 { "cpu", s_arm_cpu
, 0 },
4283 { "arch", s_arm_arch
, 0 },
4284 { "object_arch", s_arm_object_arch
, 0 },
4285 { "fpu", s_arm_fpu
, 0 },
4287 { "word", s_arm_elf_cons
, 4 },
4288 { "long", s_arm_elf_cons
, 4 },
4289 { "inst.n", s_arm_elf_inst
, 2 },
4290 { "inst.w", s_arm_elf_inst
, 4 },
4291 { "inst", s_arm_elf_inst
, 0 },
4292 { "rel31", s_arm_rel31
, 0 },
4293 { "fnstart", s_arm_unwind_fnstart
, 0 },
4294 { "fnend", s_arm_unwind_fnend
, 0 },
4295 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4296 { "personality", s_arm_unwind_personality
, 0 },
4297 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4298 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4299 { "save", s_arm_unwind_save
, 0 },
4300 { "vsave", s_arm_unwind_save
, 1 },
4301 { "movsp", s_arm_unwind_movsp
, 0 },
4302 { "pad", s_arm_unwind_pad
, 0 },
4303 { "setfp", s_arm_unwind_setfp
, 0 },
4304 { "unwind_raw", s_arm_unwind_raw
, 0 },
4305 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4309 /* These are used for dwarf. */
4313 /* These are used for dwarf2. */
4314 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4315 { "loc", dwarf2_directive_loc
, 0 },
4316 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4318 { "extend", float_cons
, 'x' },
4319 { "ldouble", float_cons
, 'x' },
4320 { "packed", float_cons
, 'p' },
4322 {"secrel32", pe_directive_secrel
, 0},
4327 /* Parser functions used exclusively in instruction operands. */
4329 /* Generic immediate-value read function for use in insn parsing.
4330 STR points to the beginning of the immediate (the leading #);
4331 VAL receives the value; if the value is outside [MIN, MAX]
4332 issue an error. PREFIX_OPT is true if the immediate prefix is
4336 parse_immediate (char **str
, int *val
, int min
, int max
,
4337 bfd_boolean prefix_opt
)
4340 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4341 if (exp
.X_op
!= O_constant
)
4343 inst
.error
= _("constant expression required");
4347 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4349 inst
.error
= _("immediate value out of range");
4353 *val
= exp
.X_add_number
;
4357 /* Less-generic immediate-value read function with the possibility of loading a
4358 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4359 instructions. Puts the result directly in inst.operands[i]. */
4362 parse_big_immediate (char **str
, int i
)
4367 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4369 if (exp
.X_op
== O_constant
)
4371 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4372 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4373 O_constant. We have to be careful not to break compilation for
4374 32-bit X_add_number, though. */
4375 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4377 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4378 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4379 inst
.operands
[i
].regisimm
= 1;
4382 else if (exp
.X_op
== O_big
4383 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4384 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4386 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4387 /* Bignums have their least significant bits in
4388 generic_bignum[0]. Make sure we put 32 bits in imm and
4389 32 bits in reg, in a (hopefully) portable way. */
4390 gas_assert (parts
!= 0);
4391 inst
.operands
[i
].imm
= 0;
4392 for (j
= 0; j
< parts
; j
++, idx
++)
4393 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4394 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4395 inst
.operands
[i
].reg
= 0;
4396 for (j
= 0; j
< parts
; j
++, idx
++)
4397 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4398 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4399 inst
.operands
[i
].regisimm
= 1;
4409 /* Returns the pseudo-register number of an FPA immediate constant,
4410 or FAIL if there isn't a valid constant here. */
4413 parse_fpa_immediate (char ** str
)
4415 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4421 /* First try and match exact strings, this is to guarantee
4422 that some formats will work even for cross assembly. */
4424 for (i
= 0; fp_const
[i
]; i
++)
4426 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4430 *str
+= strlen (fp_const
[i
]);
4431 if (is_end_of_line
[(unsigned char) **str
])
4437 /* Just because we didn't get a match doesn't mean that the constant
4438 isn't valid, just that it is in a format that we don't
4439 automatically recognize. Try parsing it with the standard
4440 expression routines. */
4442 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4444 /* Look for a raw floating point number. */
4445 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4446 && is_end_of_line
[(unsigned char) *save_in
])
4448 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4450 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4452 if (words
[j
] != fp_values
[i
][j
])
4456 if (j
== MAX_LITTLENUMS
)
4464 /* Try and parse a more complex expression, this will probably fail
4465 unless the code uses a floating point prefix (eg "0f"). */
4466 save_in
= input_line_pointer
;
4467 input_line_pointer
= *str
;
4468 if (expression (&exp
) == absolute_section
4469 && exp
.X_op
== O_big
4470 && exp
.X_add_number
< 0)
4472 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4474 if (gen_to_words (words
, 5, (long) 15) == 0)
4476 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4478 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4480 if (words
[j
] != fp_values
[i
][j
])
4484 if (j
== MAX_LITTLENUMS
)
4486 *str
= input_line_pointer
;
4487 input_line_pointer
= save_in
;
4494 *str
= input_line_pointer
;
4495 input_line_pointer
= save_in
;
4496 inst
.error
= _("invalid FPA immediate expression");
4500 /* Returns 1 if a number has "quarter-precision" float format
4501 0baBbbbbbc defgh000 00000000 00000000. */
4504 is_quarter_float (unsigned imm
)
4506 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4507 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4510 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4511 0baBbbbbbc defgh000 00000000 00000000.
4512 The zero and minus-zero cases need special handling, since they can't be
4513 encoded in the "quarter-precision" float format, but can nonetheless be
4514 loaded as integer constants. */
4517 parse_qfloat_immediate (char **ccp
, int *immed
)
4521 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4522 int found_fpchar
= 0;
4524 skip_past_char (&str
, '#');
4526 /* We must not accidentally parse an integer as a floating-point number. Make
4527 sure that the value we parse is not an integer by checking for special
4528 characters '.' or 'e'.
4529 FIXME: This is a horrible hack, but doing better is tricky because type
4530 information isn't in a very usable state at parse time. */
4532 skip_whitespace (fpnum
);
4534 if (strncmp (fpnum
, "0x", 2) == 0)
4538 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4539 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4549 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4551 unsigned fpword
= 0;
4554 /* Our FP word must be 32 bits (single-precision FP). */
4555 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4557 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4561 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4574 /* Shift operands. */
4577 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4580 struct asm_shift_name
4583 enum shift_kind kind
;
4586 /* Third argument to parse_shift. */
4587 enum parse_shift_mode
4589 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4590 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4591 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4592 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4593 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4596 /* Parse a <shift> specifier on an ARM data processing instruction.
4597 This has three forms:
4599 (LSL|LSR|ASL|ASR|ROR) Rs
4600 (LSL|LSR|ASL|ASR|ROR) #imm
4603 Note that ASL is assimilated to LSL in the instruction encoding, and
4604 RRX to ROR #0 (which cannot be written as such). */
4607 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4609 const struct asm_shift_name
*shift_name
;
4610 enum shift_kind shift
;
4615 for (p
= *str
; ISALPHA (*p
); p
++)
4620 inst
.error
= _("shift expression expected");
4624 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4626 if (shift_name
== NULL
)
4628 inst
.error
= _("shift expression expected");
4632 shift
= shift_name
->kind
;
4636 case NO_SHIFT_RESTRICT
:
4637 case SHIFT_IMMEDIATE
: break;
4639 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4640 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4642 inst
.error
= _("'LSL' or 'ASR' required");
4647 case SHIFT_LSL_IMMEDIATE
:
4648 if (shift
!= SHIFT_LSL
)
4650 inst
.error
= _("'LSL' required");
4655 case SHIFT_ASR_IMMEDIATE
:
4656 if (shift
!= SHIFT_ASR
)
4658 inst
.error
= _("'ASR' required");
4666 if (shift
!= SHIFT_RRX
)
4668 /* Whitespace can appear here if the next thing is a bare digit. */
4669 skip_whitespace (p
);
4671 if (mode
== NO_SHIFT_RESTRICT
4672 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4674 inst
.operands
[i
].imm
= reg
;
4675 inst
.operands
[i
].immisreg
= 1;
4677 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4680 inst
.operands
[i
].shift_kind
= shift
;
4681 inst
.operands
[i
].shifted
= 1;
4686 /* Parse a <shifter_operand> for an ARM data processing instruction:
4689 #<immediate>, <rotate>
4693 where <shift> is defined by parse_shift above, and <rotate> is a
4694 multiple of 2 between 0 and 30. Validation of immediate operands
4695 is deferred to md_apply_fix. */
4698 parse_shifter_operand (char **str
, int i
)
4703 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4705 inst
.operands
[i
].reg
= value
;
4706 inst
.operands
[i
].isreg
= 1;
4708 /* parse_shift will override this if appropriate */
4709 inst
.reloc
.exp
.X_op
= O_constant
;
4710 inst
.reloc
.exp
.X_add_number
= 0;
4712 if (skip_past_comma (str
) == FAIL
)
4715 /* Shift operation on register. */
4716 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4719 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4722 if (skip_past_comma (str
) == SUCCESS
)
4724 /* #x, y -- ie explicit rotation by Y. */
4725 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4728 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4730 inst
.error
= _("constant expression expected");
4734 value
= expr
.X_add_number
;
4735 if (value
< 0 || value
> 30 || value
% 2 != 0)
4737 inst
.error
= _("invalid rotation");
4740 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4742 inst
.error
= _("invalid constant");
4746 /* Convert to decoded value. md_apply_fix will put it back. */
4747 inst
.reloc
.exp
.X_add_number
4748 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4749 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4752 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4753 inst
.reloc
.pc_rel
= 0;
4757 /* Group relocation information. Each entry in the table contains the
4758 textual name of the relocation as may appear in assembler source
4759 and must end with a colon.
4760 Along with this textual name are the relocation codes to be used if
4761 the corresponding instruction is an ALU instruction (ADD or SUB only),
4762 an LDR, an LDRS, or an LDC. */
4764 struct group_reloc_table_entry
4775 /* Varieties of non-ALU group relocation. */
4782 static struct group_reloc_table_entry group_reloc_table
[] =
4783 { /* Program counter relative: */
4785 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4790 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4791 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4792 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4793 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4795 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4800 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4801 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4802 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4803 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4805 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4806 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4807 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4808 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4809 /* Section base relative */
4811 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4816 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4817 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4818 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4819 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4821 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4826 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4827 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4828 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4829 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4831 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4832 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4833 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4834 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4836 /* Given the address of a pointer pointing to the textual name of a group
4837 relocation as may appear in assembler source, attempt to find its details
4838 in group_reloc_table. The pointer will be updated to the character after
4839 the trailing colon. On failure, FAIL will be returned; SUCCESS
4840 otherwise. On success, *entry will be updated to point at the relevant
4841 group_reloc_table entry. */
4844 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4847 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4849 int length
= strlen (group_reloc_table
[i
].name
);
4851 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4852 && (*str
)[length
] == ':')
4854 *out
= &group_reloc_table
[i
];
4855 *str
+= (length
+ 1);
4863 /* Parse a <shifter_operand> for an ARM data processing instruction
4864 (as for parse_shifter_operand) where group relocations are allowed:
4867 #<immediate>, <rotate>
4868 #:<group_reloc>:<expression>
4872 where <group_reloc> is one of the strings defined in group_reloc_table.
4873 The hashes are optional.
4875 Everything else is as for parse_shifter_operand. */
4877 static parse_operand_result
4878 parse_shifter_operand_group_reloc (char **str
, int i
)
4880 /* Determine if we have the sequence of characters #: or just :
4881 coming next. If we do, then we check for a group relocation.
4882 If we don't, punt the whole lot to parse_shifter_operand. */
4884 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4885 || (*str
)[0] == ':')
4887 struct group_reloc_table_entry
*entry
;
4889 if ((*str
)[0] == '#')
4894 /* Try to parse a group relocation. Anything else is an error. */
4895 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4897 inst
.error
= _("unknown group relocation");
4898 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4901 /* We now have the group relocation table entry corresponding to
4902 the name in the assembler source. Next, we parse the expression. */
4903 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4904 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4906 /* Record the relocation type (always the ALU variant here). */
4907 inst
.reloc
.type
= entry
->alu_code
;
4908 gas_assert (inst
.reloc
.type
!= 0);
4910 return PARSE_OPERAND_SUCCESS
;
4913 return parse_shifter_operand (str
, i
) == SUCCESS
4914 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4916 /* Never reached. */
4919 /* Parse all forms of an ARM address expression. Information is written
4920 to inst.operands[i] and/or inst.reloc.
4922 Preindexed addressing (.preind=1):
4924 [Rn, #offset] .reg=Rn .reloc.exp=offset
4925 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4926 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4927 .shift_kind=shift .reloc.exp=shift_imm
4929 These three may have a trailing ! which causes .writeback to be set also.
4931 Postindexed addressing (.postind=1, .writeback=1):
4933 [Rn], #offset .reg=Rn .reloc.exp=offset
4934 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4935 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4936 .shift_kind=shift .reloc.exp=shift_imm
4938 Unindexed addressing (.preind=0, .postind=0):
4940 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4944 [Rn]{!} shorthand for [Rn,#0]{!}
4945 =immediate .isreg=0 .reloc.exp=immediate
4946 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4948 It is the caller's responsibility to check for addressing modes not
4949 supported by the instruction, and to set inst.reloc.type. */
4951 static parse_operand_result
4952 parse_address_main (char **str
, int i
, int group_relocations
,
4953 group_reloc_type group_type
)
4958 if (skip_past_char (&p
, '[') == FAIL
)
4960 if (skip_past_char (&p
, '=') == FAIL
)
4962 /* bare address - translate to PC-relative offset */
4963 inst
.reloc
.pc_rel
= 1;
4964 inst
.operands
[i
].reg
= REG_PC
;
4965 inst
.operands
[i
].isreg
= 1;
4966 inst
.operands
[i
].preind
= 1;
4968 /* else a load-constant pseudo op, no special treatment needed here */
4970 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4971 return PARSE_OPERAND_FAIL
;
4974 return PARSE_OPERAND_SUCCESS
;
4977 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4979 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4980 return PARSE_OPERAND_FAIL
;
4982 inst
.operands
[i
].reg
= reg
;
4983 inst
.operands
[i
].isreg
= 1;
4985 if (skip_past_comma (&p
) == SUCCESS
)
4987 inst
.operands
[i
].preind
= 1;
4990 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4992 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4994 inst
.operands
[i
].imm
= reg
;
4995 inst
.operands
[i
].immisreg
= 1;
4997 if (skip_past_comma (&p
) == SUCCESS
)
4998 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4999 return PARSE_OPERAND_FAIL
;
5001 else if (skip_past_char (&p
, ':') == SUCCESS
)
5003 /* FIXME: '@' should be used here, but it's filtered out by generic
5004 code before we get to see it here. This may be subject to
5007 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5008 if (exp
.X_op
!= O_constant
)
5010 inst
.error
= _("alignment must be constant");
5011 return PARSE_OPERAND_FAIL
;
5013 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5014 inst
.operands
[i
].immisalign
= 1;
5015 /* Alignments are not pre-indexes. */
5016 inst
.operands
[i
].preind
= 0;
5020 if (inst
.operands
[i
].negative
)
5022 inst
.operands
[i
].negative
= 0;
5026 if (group_relocations
5027 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5029 struct group_reloc_table_entry
*entry
;
5031 /* Skip over the #: or : sequence. */
5037 /* Try to parse a group relocation. Anything else is an
5039 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5041 inst
.error
= _("unknown group relocation");
5042 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5045 /* We now have the group relocation table entry corresponding to
5046 the name in the assembler source. Next, we parse the
5048 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5049 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5051 /* Record the relocation type. */
5055 inst
.reloc
.type
= entry
->ldr_code
;
5059 inst
.reloc
.type
= entry
->ldrs_code
;
5063 inst
.reloc
.type
= entry
->ldc_code
;
5070 if (inst
.reloc
.type
== 0)
5072 inst
.error
= _("this group relocation is not allowed on this instruction");
5073 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5077 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5078 return PARSE_OPERAND_FAIL
;
5082 if (skip_past_char (&p
, ']') == FAIL
)
5084 inst
.error
= _("']' expected");
5085 return PARSE_OPERAND_FAIL
;
5088 if (skip_past_char (&p
, '!') == SUCCESS
)
5089 inst
.operands
[i
].writeback
= 1;
5091 else if (skip_past_comma (&p
) == SUCCESS
)
5093 if (skip_past_char (&p
, '{') == SUCCESS
)
5095 /* [Rn], {expr} - unindexed, with option */
5096 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5097 0, 255, TRUE
) == FAIL
)
5098 return PARSE_OPERAND_FAIL
;
5100 if (skip_past_char (&p
, '}') == FAIL
)
5102 inst
.error
= _("'}' expected at end of 'option' field");
5103 return PARSE_OPERAND_FAIL
;
5105 if (inst
.operands
[i
].preind
)
5107 inst
.error
= _("cannot combine index with option");
5108 return PARSE_OPERAND_FAIL
;
5111 return PARSE_OPERAND_SUCCESS
;
5115 inst
.operands
[i
].postind
= 1;
5116 inst
.operands
[i
].writeback
= 1;
5118 if (inst
.operands
[i
].preind
)
5120 inst
.error
= _("cannot combine pre- and post-indexing");
5121 return PARSE_OPERAND_FAIL
;
5125 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5127 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5129 /* We might be using the immediate for alignment already. If we
5130 are, OR the register number into the low-order bits. */
5131 if (inst
.operands
[i
].immisalign
)
5132 inst
.operands
[i
].imm
|= reg
;
5134 inst
.operands
[i
].imm
= reg
;
5135 inst
.operands
[i
].immisreg
= 1;
5137 if (skip_past_comma (&p
) == SUCCESS
)
5138 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5139 return PARSE_OPERAND_FAIL
;
5143 if (inst
.operands
[i
].negative
)
5145 inst
.operands
[i
].negative
= 0;
5148 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5149 return PARSE_OPERAND_FAIL
;
5154 /* If at this point neither .preind nor .postind is set, we have a
5155 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5156 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5158 inst
.operands
[i
].preind
= 1;
5159 inst
.reloc
.exp
.X_op
= O_constant
;
5160 inst
.reloc
.exp
.X_add_number
= 0;
5163 return PARSE_OPERAND_SUCCESS
;
5167 parse_address (char **str
, int i
)
5169 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
5173 static parse_operand_result
5174 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5176 return parse_address_main (str
, i
, 1, type
);
5179 /* Parse an operand for a MOVW or MOVT instruction. */
5181 parse_half (char **str
)
5186 skip_past_char (&p
, '#');
5187 if (strncasecmp (p
, ":lower16:", 9) == 0)
5188 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5189 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5190 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5192 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5195 skip_whitespace (p
);
5198 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5201 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5203 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5205 inst
.error
= _("constant expression expected");
5208 if (inst
.reloc
.exp
.X_add_number
< 0
5209 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5211 inst
.error
= _("immediate value out of range");
5219 /* Miscellaneous. */
5221 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5222 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5224 parse_psr (char **str
)
5227 unsigned long psr_field
;
5228 const struct asm_psr
*psr
;
5231 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5232 feature for ease of use and backwards compatibility. */
5234 if (strncasecmp (p
, "SPSR", 4) == 0)
5235 psr_field
= SPSR_BIT
;
5236 else if (strncasecmp (p
, "CPSR", 4) == 0)
5243 while (ISALNUM (*p
) || *p
== '_');
5245 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
5256 /* A suffix follows. */
5262 while (ISALNUM (*p
) || *p
== '_');
5264 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
5268 psr_field
|= psr
->field
;
5273 goto error
; /* Garbage after "[CS]PSR". */
5275 psr_field
|= (PSR_c
| PSR_f
);
5281 inst
.error
= _("flag for {c}psr instruction expected");
5285 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5286 value suitable for splatting into the AIF field of the instruction. */
5289 parse_cps_flags (char **str
)
5298 case '\0': case ',':
5301 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5302 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5303 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5306 inst
.error
= _("unrecognized CPS flag");
5311 if (saw_a_flag
== 0)
5313 inst
.error
= _("missing CPS flags");
5321 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5322 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5325 parse_endian_specifier (char **str
)
5330 if (strncasecmp (s
, "BE", 2))
5332 else if (strncasecmp (s
, "LE", 2))
5336 inst
.error
= _("valid endian specifiers are be or le");
5340 if (ISALNUM (s
[2]) || s
[2] == '_')
5342 inst
.error
= _("valid endian specifiers are be or le");
5347 return little_endian
;
5350 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5351 value suitable for poking into the rotate field of an sxt or sxta
5352 instruction, or FAIL on error. */
5355 parse_ror (char **str
)
5360 if (strncasecmp (s
, "ROR", 3) == 0)
5364 inst
.error
= _("missing rotation field after comma");
5368 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5373 case 0: *str
= s
; return 0x0;
5374 case 8: *str
= s
; return 0x1;
5375 case 16: *str
= s
; return 0x2;
5376 case 24: *str
= s
; return 0x3;
5379 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5384 /* Parse a conditional code (from conds[] below). The value returned is in the
5385 range 0 .. 14, or FAIL. */
5387 parse_cond (char **str
)
5390 const struct asm_cond
*c
;
5392 /* Condition codes are always 2 characters, so matching up to
5393 3 characters is sufficient. */
5398 while (ISALPHA (*q
) && n
< 3)
5400 cond
[n
] = TOLOWER (*q
);
5405 c
= hash_find_n (arm_cond_hsh
, cond
, n
);
5408 inst
.error
= _("condition required");
5416 /* Parse an option for a barrier instruction. Returns the encoding for the
5419 parse_barrier (char **str
)
5422 const struct asm_barrier_opt
*o
;
5425 while (ISALPHA (*q
))
5428 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5436 /* Parse the operands of a table branch instruction. Similar to a memory
5439 parse_tb (char **str
)
5444 if (skip_past_char (&p
, '[') == FAIL
)
5446 inst
.error
= _("'[' expected");
5450 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5452 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5455 inst
.operands
[0].reg
= reg
;
5457 if (skip_past_comma (&p
) == FAIL
)
5459 inst
.error
= _("',' expected");
5463 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5465 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5468 inst
.operands
[0].imm
= reg
;
5470 if (skip_past_comma (&p
) == SUCCESS
)
5472 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5474 if (inst
.reloc
.exp
.X_add_number
!= 1)
5476 inst
.error
= _("invalid shift");
5479 inst
.operands
[0].shifted
= 1;
5482 if (skip_past_char (&p
, ']') == FAIL
)
5484 inst
.error
= _("']' expected");
5491 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5492 information on the types the operands can take and how they are encoded.
5493 Up to four operands may be read; this function handles setting the
5494 ".present" field for each read operand itself.
5495 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5496 else returns FAIL. */
5499 parse_neon_mov (char **str
, int *which_operand
)
5501 int i
= *which_operand
, val
;
5502 enum arm_reg_type rtype
;
5504 struct neon_type_el optype
;
5506 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5508 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5509 inst
.operands
[i
].reg
= val
;
5510 inst
.operands
[i
].isscalar
= 1;
5511 inst
.operands
[i
].vectype
= optype
;
5512 inst
.operands
[i
++].present
= 1;
5514 if (skip_past_comma (&ptr
) == FAIL
)
5517 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5520 inst
.operands
[i
].reg
= val
;
5521 inst
.operands
[i
].isreg
= 1;
5522 inst
.operands
[i
].present
= 1;
5524 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5527 /* Cases 0, 1, 2, 3, 5 (D only). */
5528 if (skip_past_comma (&ptr
) == FAIL
)
5531 inst
.operands
[i
].reg
= val
;
5532 inst
.operands
[i
].isreg
= 1;
5533 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5534 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5535 inst
.operands
[i
].isvec
= 1;
5536 inst
.operands
[i
].vectype
= optype
;
5537 inst
.operands
[i
++].present
= 1;
5539 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5541 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5542 Case 13: VMOV <Sd>, <Rm> */
5543 inst
.operands
[i
].reg
= val
;
5544 inst
.operands
[i
].isreg
= 1;
5545 inst
.operands
[i
].present
= 1;
5547 if (rtype
== REG_TYPE_NQ
)
5549 first_error (_("can't use Neon quad register here"));
5552 else if (rtype
!= REG_TYPE_VFS
)
5555 if (skip_past_comma (&ptr
) == FAIL
)
5557 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5559 inst
.operands
[i
].reg
= val
;
5560 inst
.operands
[i
].isreg
= 1;
5561 inst
.operands
[i
].present
= 1;
5564 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5567 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5568 Case 1: VMOV<c><q> <Dd>, <Dm>
5569 Case 8: VMOV.F32 <Sd>, <Sm>
5570 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5572 inst
.operands
[i
].reg
= val
;
5573 inst
.operands
[i
].isreg
= 1;
5574 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5575 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5576 inst
.operands
[i
].isvec
= 1;
5577 inst
.operands
[i
].vectype
= optype
;
5578 inst
.operands
[i
].present
= 1;
5580 if (skip_past_comma (&ptr
) == SUCCESS
)
5585 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5588 inst
.operands
[i
].reg
= val
;
5589 inst
.operands
[i
].isreg
= 1;
5590 inst
.operands
[i
++].present
= 1;
5592 if (skip_past_comma (&ptr
) == FAIL
)
5595 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5598 inst
.operands
[i
].reg
= val
;
5599 inst
.operands
[i
].isreg
= 1;
5600 inst
.operands
[i
++].present
= 1;
5603 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5604 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5605 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5606 Case 10: VMOV.F32 <Sd>, #<imm>
5607 Case 11: VMOV.F64 <Dd>, #<imm> */
5608 inst
.operands
[i
].immisfloat
= 1;
5609 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5610 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5611 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5615 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5619 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5622 inst
.operands
[i
].reg
= val
;
5623 inst
.operands
[i
].isreg
= 1;
5624 inst
.operands
[i
++].present
= 1;
5626 if (skip_past_comma (&ptr
) == FAIL
)
5629 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5631 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5632 inst
.operands
[i
].reg
= val
;
5633 inst
.operands
[i
].isscalar
= 1;
5634 inst
.operands
[i
].present
= 1;
5635 inst
.operands
[i
].vectype
= optype
;
5637 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5639 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5640 inst
.operands
[i
].reg
= val
;
5641 inst
.operands
[i
].isreg
= 1;
5642 inst
.operands
[i
++].present
= 1;
5644 if (skip_past_comma (&ptr
) == FAIL
)
5647 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5650 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5654 inst
.operands
[i
].reg
= val
;
5655 inst
.operands
[i
].isreg
= 1;
5656 inst
.operands
[i
].isvec
= 1;
5657 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5658 inst
.operands
[i
].vectype
= optype
;
5659 inst
.operands
[i
].present
= 1;
5661 if (rtype
== REG_TYPE_VFS
)
5665 if (skip_past_comma (&ptr
) == FAIL
)
5667 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5670 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5673 inst
.operands
[i
].reg
= val
;
5674 inst
.operands
[i
].isreg
= 1;
5675 inst
.operands
[i
].isvec
= 1;
5676 inst
.operands
[i
].issingle
= 1;
5677 inst
.operands
[i
].vectype
= optype
;
5678 inst
.operands
[i
].present
= 1;
5681 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5685 inst
.operands
[i
].reg
= val
;
5686 inst
.operands
[i
].isreg
= 1;
5687 inst
.operands
[i
].isvec
= 1;
5688 inst
.operands
[i
].issingle
= 1;
5689 inst
.operands
[i
].vectype
= optype
;
5690 inst
.operands
[i
++].present
= 1;
5695 first_error (_("parse error"));
5699 /* Successfully parsed the operands. Update args. */
5705 first_error (_("expected comma"));
5709 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5713 /* Matcher codes for parse_operands. */
5714 enum operand_parse_code
5716 OP_stop
, /* end of line */
5718 OP_RR
, /* ARM register */
5719 OP_RRnpc
, /* ARM register, not r15 */
5720 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5721 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5722 OP_RCP
, /* Coprocessor number */
5723 OP_RCN
, /* Coprocessor register */
5724 OP_RF
, /* FPA register */
5725 OP_RVS
, /* VFP single precision register */
5726 OP_RVD
, /* VFP double precision register (0..15) */
5727 OP_RND
, /* Neon double precision register (0..31) */
5728 OP_RNQ
, /* Neon quad precision register */
5729 OP_RVSD
, /* VFP single or double precision register */
5730 OP_RNDQ
, /* Neon double or quad precision register */
5731 OP_RNSDQ
, /* Neon single, double or quad precision register */
5732 OP_RNSC
, /* Neon scalar D[X] */
5733 OP_RVC
, /* VFP control register */
5734 OP_RMF
, /* Maverick F register */
5735 OP_RMD
, /* Maverick D register */
5736 OP_RMFX
, /* Maverick FX register */
5737 OP_RMDX
, /* Maverick DX register */
5738 OP_RMAX
, /* Maverick AX register */
5739 OP_RMDS
, /* Maverick DSPSC register */
5740 OP_RIWR
, /* iWMMXt wR register */
5741 OP_RIWC
, /* iWMMXt wC register */
5742 OP_RIWG
, /* iWMMXt wCG register */
5743 OP_RXA
, /* XScale accumulator register */
5745 OP_REGLST
, /* ARM register list */
5746 OP_VRSLST
, /* VFP single-precision register list */
5747 OP_VRDLST
, /* VFP double-precision register list */
5748 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5749 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5750 OP_NSTRLST
, /* Neon element/structure list */
5752 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5753 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5754 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5755 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5756 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5757 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5758 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5759 OP_VMOV
, /* Neon VMOV operands. */
5760 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5761 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5762 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5764 OP_I0
, /* immediate zero */
5765 OP_I7
, /* immediate value 0 .. 7 */
5766 OP_I15
, /* 0 .. 15 */
5767 OP_I16
, /* 1 .. 16 */
5768 OP_I16z
, /* 0 .. 16 */
5769 OP_I31
, /* 0 .. 31 */
5770 OP_I31w
, /* 0 .. 31, optional trailing ! */
5771 OP_I32
, /* 1 .. 32 */
5772 OP_I32z
, /* 0 .. 32 */
5773 OP_I63
, /* 0 .. 63 */
5774 OP_I63s
, /* -64 .. 63 */
5775 OP_I64
, /* 1 .. 64 */
5776 OP_I64z
, /* 0 .. 64 */
5777 OP_I255
, /* 0 .. 255 */
5779 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5780 OP_I7b
, /* 0 .. 7 */
5781 OP_I15b
, /* 0 .. 15 */
5782 OP_I31b
, /* 0 .. 31 */
5784 OP_SH
, /* shifter operand */
5785 OP_SHG
, /* shifter operand with possible group relocation */
5786 OP_ADDR
, /* Memory address expression (any mode) */
5787 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5788 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5789 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5790 OP_EXP
, /* arbitrary expression */
5791 OP_EXPi
, /* same, with optional immediate prefix */
5792 OP_EXPr
, /* same, with optional relocation suffix */
5793 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5795 OP_CPSF
, /* CPS flags */
5796 OP_ENDI
, /* Endianness specifier */
5797 OP_PSR
, /* CPSR/SPSR mask for msr */
5798 OP_COND
, /* conditional code */
5799 OP_TB
, /* Table branch. */
5801 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5802 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5804 OP_RRnpc_I0
, /* ARM register or literal 0 */
5805 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5806 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5807 OP_RF_IF
, /* FPA register or immediate */
5808 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5809 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5811 /* Optional operands. */
5812 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5813 OP_oI31b
, /* 0 .. 31 */
5814 OP_oI32b
, /* 1 .. 32 */
5815 OP_oIffffb
, /* 0 .. 65535 */
5816 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5818 OP_oRR
, /* ARM register */
5819 OP_oRRnpc
, /* ARM register, not the PC */
5820 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5821 OP_oRND
, /* Optional Neon double precision register */
5822 OP_oRNQ
, /* Optional Neon quad precision register */
5823 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5824 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5825 OP_oSHll
, /* LSL immediate */
5826 OP_oSHar
, /* ASR immediate */
5827 OP_oSHllar
, /* LSL or ASR immediate */
5828 OP_oROR
, /* ROR 0/8/16/24 */
5829 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5831 OP_FIRST_OPTIONAL
= OP_oI7b
5834 /* Generic instruction operand parser. This does no encoding and no
5835 semantic validation; it merely squirrels values away in the inst
5836 structure. Returns SUCCESS or FAIL depending on whether the
5837 specified grammar matched. */
5839 parse_operands (char *str
, const unsigned char *pattern
)
5841 unsigned const char *upat
= pattern
;
5842 char *backtrack_pos
= 0;
5843 const char *backtrack_error
= 0;
5844 int i
, val
, backtrack_index
= 0;
5845 enum arm_reg_type rtype
;
5846 parse_operand_result result
;
5848 #define po_char_or_fail(chr) \
5851 if (skip_past_char (&str, chr) == FAIL) \
5856 #define po_reg_or_fail(regtype) \
5859 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5860 & inst.operands[i].vectype); \
5863 first_error (_(reg_expected_msgs[regtype])); \
5866 inst.operands[i].reg = val; \
5867 inst.operands[i].isreg = 1; \
5868 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5869 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5870 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5871 || rtype == REG_TYPE_VFD \
5872 || rtype == REG_TYPE_NQ); \
5876 #define po_reg_or_goto(regtype, label) \
5879 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5880 & inst.operands[i].vectype); \
5884 inst.operands[i].reg = val; \
5885 inst.operands[i].isreg = 1; \
5886 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5887 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5888 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5889 || rtype == REG_TYPE_VFD \
5890 || rtype == REG_TYPE_NQ); \
5894 #define po_imm_or_fail(min, max, popt) \
5897 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5899 inst.operands[i].imm = val; \
5903 #define po_scalar_or_goto(elsz, label) \
5906 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5909 inst.operands[i].reg = val; \
5910 inst.operands[i].isscalar = 1; \
5914 #define po_misc_or_fail(expr) \
5922 #define po_misc_or_fail_no_backtrack(expr) \
5926 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5927 backtrack_pos = 0; \
5928 if (result != PARSE_OPERAND_SUCCESS) \
5933 skip_whitespace (str
);
5935 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5937 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5939 /* Remember where we are in case we need to backtrack. */
5940 gas_assert (!backtrack_pos
);
5941 backtrack_pos
= str
;
5942 backtrack_error
= inst
.error
;
5943 backtrack_index
= i
;
5946 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5947 po_char_or_fail (',');
5955 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5956 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5957 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5958 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5959 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5960 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5962 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5964 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
5966 /* Also accept generic coprocessor regs for unknown registers. */
5968 po_reg_or_fail (REG_TYPE_CN
);
5970 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5971 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5972 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5973 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5974 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5975 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5976 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5977 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5978 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5979 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5981 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5983 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5984 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5986 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5988 /* Neon scalar. Using an element size of 8 means that some invalid
5989 scalars are accepted here, so deal with those in later code. */
5990 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5992 /* WARNING: We can expand to two operands here. This has the potential
5993 to totally confuse the backtracking mechanism! It will be OK at
5994 least as long as we don't try to use optional args as well,
5998 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5999 inst
.operands
[i
].present
= 1;
6001 skip_past_comma (&str
);
6002 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
6005 /* Optional register operand was omitted. Unfortunately, it's in
6006 operands[i-1] and we need it to be in inst.operands[i]. Fix that
6007 here (this is a bit grotty). */
6008 inst
.operands
[i
] = inst
.operands
[i
-1];
6009 inst
.operands
[i
-1].present
= 0;
6012 /* There's a possibility of getting a 64-bit immediate here, so
6013 we need special handling. */
6014 if (parse_big_immediate (&str
, i
) == FAIL
)
6016 inst
.error
= _("immediate value is out of range");
6024 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6027 po_imm_or_fail (0, 0, TRUE
);
6032 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6037 po_scalar_or_goto (8, try_rr
);
6040 po_reg_or_fail (REG_TYPE_RN
);
6046 po_scalar_or_goto (8, try_nsdq
);
6049 po_reg_or_fail (REG_TYPE_NSDQ
);
6055 po_scalar_or_goto (8, try_ndq
);
6058 po_reg_or_fail (REG_TYPE_NDQ
);
6064 po_scalar_or_goto (8, try_vfd
);
6067 po_reg_or_fail (REG_TYPE_VFD
);
6072 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6073 not careful then bad things might happen. */
6074 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6079 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
6082 /* There's a possibility of getting a 64-bit immediate here, so
6083 we need special handling. */
6084 if (parse_big_immediate (&str
, i
) == FAIL
)
6086 inst
.error
= _("immediate value is out of range");
6094 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6097 po_imm_or_fail (0, 63, TRUE
);
6102 po_char_or_fail ('[');
6103 po_reg_or_fail (REG_TYPE_RN
);
6104 po_char_or_fail (']');
6109 po_reg_or_fail (REG_TYPE_RN
);
6110 if (skip_past_char (&str
, '!') == SUCCESS
)
6111 inst
.operands
[i
].writeback
= 1;
6115 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6116 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6117 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6118 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6119 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6120 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6121 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6122 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6123 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6124 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6125 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6126 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6128 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6130 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6131 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6133 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6134 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6135 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6137 /* Immediate variants */
6139 po_char_or_fail ('{');
6140 po_imm_or_fail (0, 255, TRUE
);
6141 po_char_or_fail ('}');
6145 /* The expression parser chokes on a trailing !, so we have
6146 to find it first and zap it. */
6149 while (*s
&& *s
!= ',')
6154 inst
.operands
[i
].writeback
= 1;
6156 po_imm_or_fail (0, 31, TRUE
);
6164 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6169 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6174 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6176 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6178 val
= parse_reloc (&str
);
6181 inst
.error
= _("unrecognized relocation suffix");
6184 else if (val
!= BFD_RELOC_UNUSED
)
6186 inst
.operands
[i
].imm
= val
;
6187 inst
.operands
[i
].hasreloc
= 1;
6192 /* Operand for MOVW or MOVT. */
6194 po_misc_or_fail (parse_half (&str
));
6197 /* Register or expression. */
6198 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6199 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6201 /* Register or immediate. */
6202 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6203 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6205 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6207 if (!is_immediate_prefix (*str
))
6210 val
= parse_fpa_immediate (&str
);
6213 /* FPA immediates are encoded as registers 8-15.
6214 parse_fpa_immediate has already applied the offset. */
6215 inst
.operands
[i
].reg
= val
;
6216 inst
.operands
[i
].isreg
= 1;
6219 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6220 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6222 /* Two kinds of register. */
6225 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6227 || (rege
->type
!= REG_TYPE_MMXWR
6228 && rege
->type
!= REG_TYPE_MMXWC
6229 && rege
->type
!= REG_TYPE_MMXWCG
))
6231 inst
.error
= _("iWMMXt data or control register expected");
6234 inst
.operands
[i
].reg
= rege
->number
;
6235 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6241 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6243 || (rege
->type
!= REG_TYPE_MMXWC
6244 && rege
->type
!= REG_TYPE_MMXWCG
))
6246 inst
.error
= _("iWMMXt control register expected");
6249 inst
.operands
[i
].reg
= rege
->number
;
6250 inst
.operands
[i
].isreg
= 1;
6255 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6256 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6257 case OP_oROR
: val
= parse_ror (&str
); break;
6258 case OP_PSR
: val
= parse_psr (&str
); break;
6259 case OP_COND
: val
= parse_cond (&str
); break;
6260 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
6263 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
6264 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
6267 val
= parse_psr (&str
);
6271 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
6274 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6276 if (strncasecmp (str
, "APSR_", 5) == 0)
6283 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
6284 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
6285 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
6286 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
6287 default: found
= 16;
6291 inst
.operands
[i
].isvec
= 1;
6298 po_misc_or_fail (parse_tb (&str
));
6301 /* Register lists. */
6303 val
= parse_reg_list (&str
);
6306 inst
.operands
[1].writeback
= 1;
6312 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
6316 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
6320 /* Allow Q registers too. */
6321 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6326 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6328 inst
.operands
[i
].issingle
= 1;
6333 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6338 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
6339 &inst
.operands
[i
].vectype
);
6342 /* Addressing modes */
6344 po_misc_or_fail (parse_address (&str
, i
));
6348 po_misc_or_fail_no_backtrack (
6349 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6353 po_misc_or_fail_no_backtrack (
6354 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6358 po_misc_or_fail_no_backtrack (
6359 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6363 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6367 po_misc_or_fail_no_backtrack (
6368 parse_shifter_operand_group_reloc (&str
, i
));
6372 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6376 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6380 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6384 as_fatal (_("unhandled operand code %d"), upat
[i
]);
6387 /* Various value-based sanity checks and shared operations. We
6388 do not signal immediate failures for the register constraints;
6389 this allows a syntax error to take precedence. */
6398 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6399 inst
.error
= BAD_PC
;
6417 inst
.operands
[i
].imm
= val
;
6424 /* If we get here, this operand was successfully parsed. */
6425 inst
.operands
[i
].present
= 1;
6429 inst
.error
= BAD_ARGS
;
6434 /* The parse routine should already have set inst.error, but set a
6435 default here just in case. */
6437 inst
.error
= _("syntax error");
6441 /* Do not backtrack over a trailing optional argument that
6442 absorbed some text. We will only fail again, with the
6443 'garbage following instruction' error message, which is
6444 probably less helpful than the current one. */
6445 if (backtrack_index
== i
&& backtrack_pos
!= str
6446 && upat
[i
+1] == OP_stop
)
6449 inst
.error
= _("syntax error");
6453 /* Try again, skipping the optional argument at backtrack_pos. */
6454 str
= backtrack_pos
;
6455 inst
.error
= backtrack_error
;
6456 inst
.operands
[backtrack_index
].present
= 0;
6457 i
= backtrack_index
;
6461 /* Check that we have parsed all the arguments. */
6462 if (*str
!= '\0' && !inst
.error
)
6463 inst
.error
= _("garbage following instruction");
6465 return inst
.error
? FAIL
: SUCCESS
;
6468 #undef po_char_or_fail
6469 #undef po_reg_or_fail
6470 #undef po_reg_or_goto
6471 #undef po_imm_or_fail
6472 #undef po_scalar_or_fail
6474 /* Shorthand macro for instruction encoding functions issuing errors. */
6475 #define constraint(expr, err) \
6486 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6487 instructions are unpredictable if these registers are used. This
6488 is the BadReg predicate in ARM's Thumb-2 documentation. */
6489 #define reject_bad_reg(reg) \
6491 if (reg == REG_SP || reg == REG_PC) \
6493 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6498 /* If REG is R13 (the stack pointer), warn that its use is
6500 #define warn_deprecated_sp(reg) \
6502 if (warn_on_deprecated && reg == REG_SP) \
6503 as_warn (_("use of r13 is deprecated")); \
6506 /* Functions for operand encoding. ARM, then Thumb. */
6508 #define rotate_left(v, n) (v << n | v >> (32 - n))
6510 /* If VAL can be encoded in the immediate field of an ARM instruction,
6511 return the encoded form. Otherwise, return FAIL. */
6514 encode_arm_immediate (unsigned int val
)
6518 for (i
= 0; i
< 32; i
+= 2)
6519 if ((a
= rotate_left (val
, i
)) <= 0xff)
6520 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6525 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6526 return the encoded form. Otherwise, return FAIL. */
6528 encode_thumb32_immediate (unsigned int val
)
6535 for (i
= 1; i
<= 24; i
++)
6538 if ((val
& ~(0xff << i
)) == 0)
6539 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6543 if (val
== ((a
<< 16) | a
))
6545 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6549 if (val
== ((a
<< 16) | a
))
6550 return 0x200 | (a
>> 8);
6554 /* Encode a VFP SP or DP register number into inst.instruction. */
6557 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6559 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6562 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6565 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6568 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6573 first_error (_("D register out of range for selected VFP version"));
6581 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6585 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6589 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6593 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6597 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6601 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6609 /* Encode a <shift> in an ARM-format instruction. The immediate,
6610 if any, is handled by md_apply_fix. */
6612 encode_arm_shift (int i
)
6614 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6615 inst
.instruction
|= SHIFT_ROR
<< 5;
6618 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6619 if (inst
.operands
[i
].immisreg
)
6621 inst
.instruction
|= SHIFT_BY_REG
;
6622 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6625 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6630 encode_arm_shifter_operand (int i
)
6632 if (inst
.operands
[i
].isreg
)
6634 inst
.instruction
|= inst
.operands
[i
].reg
;
6635 encode_arm_shift (i
);
6638 inst
.instruction
|= INST_IMMEDIATE
;
6641 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6643 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6645 gas_assert (inst
.operands
[i
].isreg
);
6646 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6648 if (inst
.operands
[i
].preind
)
6652 inst
.error
= _("instruction does not accept preindexed addressing");
6655 inst
.instruction
|= PRE_INDEX
;
6656 if (inst
.operands
[i
].writeback
)
6657 inst
.instruction
|= WRITE_BACK
;
6660 else if (inst
.operands
[i
].postind
)
6662 gas_assert (inst
.operands
[i
].writeback
);
6664 inst
.instruction
|= WRITE_BACK
;
6666 else /* unindexed - only for coprocessor */
6668 inst
.error
= _("instruction does not accept unindexed addressing");
6672 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6673 && (((inst
.instruction
& 0x000f0000) >> 16)
6674 == ((inst
.instruction
& 0x0000f000) >> 12)))
6675 as_warn ((inst
.instruction
& LOAD_BIT
)
6676 ? _("destination register same as write-back base")
6677 : _("source register same as write-back base"));
6680 /* inst.operands[i] was set up by parse_address. Encode it into an
6681 ARM-format mode 2 load or store instruction. If is_t is true,
6682 reject forms that cannot be used with a T instruction (i.e. not
6685 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6687 encode_arm_addr_mode_common (i
, is_t
);
6689 if (inst
.operands
[i
].immisreg
)
6691 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6692 inst
.instruction
|= inst
.operands
[i
].imm
;
6693 if (!inst
.operands
[i
].negative
)
6694 inst
.instruction
|= INDEX_UP
;
6695 if (inst
.operands
[i
].shifted
)
6697 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6698 inst
.instruction
|= SHIFT_ROR
<< 5;
6701 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6702 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6706 else /* immediate offset in inst.reloc */
6708 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6709 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6713 /* inst.operands[i] was set up by parse_address. Encode it into an
6714 ARM-format mode 3 load or store instruction. Reject forms that
6715 cannot be used with such instructions. If is_t is true, reject
6716 forms that cannot be used with a T instruction (i.e. not
6719 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6721 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6723 inst
.error
= _("instruction does not accept scaled register index");
6727 encode_arm_addr_mode_common (i
, is_t
);
6729 if (inst
.operands
[i
].immisreg
)
6731 inst
.instruction
|= inst
.operands
[i
].imm
;
6732 if (!inst
.operands
[i
].negative
)
6733 inst
.instruction
|= INDEX_UP
;
6735 else /* immediate offset in inst.reloc */
6737 inst
.instruction
|= HWOFFSET_IMM
;
6738 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6739 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6743 /* inst.operands[i] was set up by parse_address. Encode it into an
6744 ARM-format instruction. Reject all forms which cannot be encoded
6745 into a coprocessor load/store instruction. If wb_ok is false,
6746 reject use of writeback; if unind_ok is false, reject use of
6747 unindexed addressing. If reloc_override is not 0, use it instead
6748 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6749 (in which case it is preserved). */
6752 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6754 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6756 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6758 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6760 gas_assert (!inst
.operands
[i
].writeback
);
6763 inst
.error
= _("instruction does not support unindexed addressing");
6766 inst
.instruction
|= inst
.operands
[i
].imm
;
6767 inst
.instruction
|= INDEX_UP
;
6771 if (inst
.operands
[i
].preind
)
6772 inst
.instruction
|= PRE_INDEX
;
6774 if (inst
.operands
[i
].writeback
)
6776 if (inst
.operands
[i
].reg
== REG_PC
)
6778 inst
.error
= _("pc may not be used with write-back");
6783 inst
.error
= _("instruction does not support writeback");
6786 inst
.instruction
|= WRITE_BACK
;
6790 inst
.reloc
.type
= reloc_override
;
6791 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6792 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6793 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6796 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6798 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6804 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6805 Determine whether it can be performed with a move instruction; if
6806 it can, convert inst.instruction to that move instruction and
6807 return TRUE; if it can't, convert inst.instruction to a literal-pool
6808 load and return FALSE. If this is not a valid thing to do in the
6809 current context, set inst.error and return TRUE.
6811 inst.operands[i] describes the destination register. */
6814 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6819 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6823 if ((inst
.instruction
& tbit
) == 0)
6825 inst
.error
= _("invalid pseudo operation");
6828 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6830 inst
.error
= _("constant expression expected");
6833 if (inst
.reloc
.exp
.X_op
== O_constant
)
6837 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6839 /* This can be done with a mov(1) instruction. */
6840 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6841 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6847 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6850 /* This can be done with a mov instruction. */
6851 inst
.instruction
&= LITERAL_MASK
;
6852 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6853 inst
.instruction
|= value
& 0xfff;
6857 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6860 /* This can be done with a mvn instruction. */
6861 inst
.instruction
&= LITERAL_MASK
;
6862 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6863 inst
.instruction
|= value
& 0xfff;
6869 if (add_to_lit_pool () == FAIL
)
6871 inst
.error
= _("literal pool insertion failed");
6874 inst
.operands
[1].reg
= REG_PC
;
6875 inst
.operands
[1].isreg
= 1;
6876 inst
.operands
[1].preind
= 1;
6877 inst
.reloc
.pc_rel
= 1;
6878 inst
.reloc
.type
= (thumb_p
6879 ? BFD_RELOC_ARM_THUMB_OFFSET
6881 ? BFD_RELOC_ARM_HWLITERAL
6882 : BFD_RELOC_ARM_LITERAL
));
6886 /* Functions for instruction encoding, sorted by sub-architecture.
6887 First some generics; their names are taken from the conventional
6888 bit positions for register arguments in ARM format instructions. */
6898 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6904 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6905 inst
.instruction
|= inst
.operands
[1].reg
;
6911 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6912 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6918 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6919 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6925 unsigned Rn
= inst
.operands
[2].reg
;
6926 /* Enforce restrictions on SWP instruction. */
6927 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6928 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6929 _("Rn must not overlap other operands"));
6930 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6931 inst
.instruction
|= inst
.operands
[1].reg
;
6932 inst
.instruction
|= Rn
<< 16;
6938 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6939 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6940 inst
.instruction
|= inst
.operands
[2].reg
;
6946 inst
.instruction
|= inst
.operands
[0].reg
;
6947 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6948 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6954 inst
.instruction
|= inst
.operands
[0].imm
;
6960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6961 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6964 /* ARM instructions, in alphabetical order by function name (except
6965 that wrapper functions appear immediately after the function they
6968 /* This is a pseudo-op of the form "adr rd, label" to be converted
6969 into a relative address of the form "add rd, pc, #label-.-8". */
6974 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6976 /* Frag hacking will turn this into a sub instruction if the offset turns
6977 out to be negative. */
6978 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6979 inst
.reloc
.pc_rel
= 1;
6980 inst
.reloc
.exp
.X_add_number
-= 8;
6983 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6984 into a relative address of the form:
6985 add rd, pc, #low(label-.-8)"
6986 add rd, rd, #high(label-.-8)" */
6991 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6993 /* Frag hacking will turn this into a sub instruction if the offset turns
6994 out to be negative. */
6995 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6996 inst
.reloc
.pc_rel
= 1;
6997 inst
.size
= INSN_SIZE
* 2;
6998 inst
.reloc
.exp
.X_add_number
-= 8;
7004 if (!inst
.operands
[1].present
)
7005 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
7006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7007 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7008 encode_arm_shifter_operand (2);
7014 if (inst
.operands
[0].present
)
7016 constraint ((inst
.instruction
& 0xf0) != 0x40
7017 && inst
.operands
[0].imm
!= 0xf,
7018 _("bad barrier type"));
7019 inst
.instruction
|= inst
.operands
[0].imm
;
7022 inst
.instruction
|= 0xf;
7028 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7029 constraint (msb
> 32, _("bit-field extends past end of register"));
7030 /* The instruction encoding stores the LSB and MSB,
7031 not the LSB and width. */
7032 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7033 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
7034 inst
.instruction
|= (msb
- 1) << 16;
7042 /* #0 in second position is alternative syntax for bfc, which is
7043 the same instruction but with REG_PC in the Rm field. */
7044 if (!inst
.operands
[1].isreg
)
7045 inst
.operands
[1].reg
= REG_PC
;
7047 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7048 constraint (msb
> 32, _("bit-field extends past end of register"));
7049 /* The instruction encoding stores the LSB and MSB,
7050 not the LSB and width. */
7051 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7052 inst
.instruction
|= inst
.operands
[1].reg
;
7053 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7054 inst
.instruction
|= (msb
- 1) << 16;
7060 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7061 _("bit-field extends past end of register"));
7062 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7063 inst
.instruction
|= inst
.operands
[1].reg
;
7064 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7065 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
7068 /* ARM V5 breakpoint instruction (argument parse)
7069 BKPT <16 bit unsigned immediate>
7070 Instruction is not conditional.
7071 The bit pattern given in insns[] has the COND_ALWAYS condition,
7072 and it is an error if the caller tried to override that. */
7077 /* Top 12 of 16 bits to bits 19:8. */
7078 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
7080 /* Bottom 4 of 16 bits to bits 3:0. */
7081 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
7085 encode_branch (int default_reloc
)
7087 if (inst
.operands
[0].hasreloc
)
7089 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
7090 _("the only suffix valid here is '(plt)'"));
7091 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
7095 inst
.reloc
.type
= default_reloc
;
7097 inst
.reloc
.pc_rel
= 1;
7104 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7105 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7108 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7115 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7117 if (inst
.cond
== COND_ALWAYS
)
7118 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
7120 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7124 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7127 /* ARM V5 branch-link-exchange instruction (argument parse)
7128 BLX <target_addr> ie BLX(1)
7129 BLX{<condition>} <Rm> ie BLX(2)
7130 Unfortunately, there are two different opcodes for this mnemonic.
7131 So, the insns[].value is not used, and the code here zaps values
7132 into inst.instruction.
7133 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7138 if (inst
.operands
[0].isreg
)
7140 /* Arg is a register; the opcode provided by insns[] is correct.
7141 It is not illegal to do "blx pc", just useless. */
7142 if (inst
.operands
[0].reg
== REG_PC
)
7143 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7145 inst
.instruction
|= inst
.operands
[0].reg
;
7149 /* Arg is an address; this instruction cannot be executed
7150 conditionally, and the opcode must be adjusted.
7151 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7152 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7153 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7154 inst
.instruction
= 0xfa000000;
7155 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
7162 bfd_boolean want_reloc
;
7164 if (inst
.operands
[0].reg
== REG_PC
)
7165 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7167 inst
.instruction
|= inst
.operands
[0].reg
;
7168 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7169 it is for ARMv4t or earlier. */
7170 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
7171 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
7175 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
7180 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
7184 /* ARM v5TEJ. Jump to Jazelle code. */
7189 if (inst
.operands
[0].reg
== REG_PC
)
7190 as_tsktsk (_("use of r15 in bxj is not really useful"));
7192 inst
.instruction
|= inst
.operands
[0].reg
;
7195 /* Co-processor data operation:
7196 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7197 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7201 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7202 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
7203 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7204 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7205 inst
.instruction
|= inst
.operands
[4].reg
;
7206 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7212 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7213 encode_arm_shifter_operand (1);
7216 /* Transfer between coprocessor and ARM registers.
7217 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7222 No special properties. */
7229 Rd
= inst
.operands
[2].reg
;
7232 if (inst
.instruction
== 0xee000010
7233 || inst
.instruction
== 0xfe000010)
7235 reject_bad_reg (Rd
);
7238 constraint (Rd
== REG_SP
, BAD_SP
);
7243 if (inst
.instruction
== 0xe000010)
7244 constraint (Rd
== REG_PC
, BAD_PC
);
7248 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7249 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
7250 inst
.instruction
|= Rd
<< 12;
7251 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7252 inst
.instruction
|= inst
.operands
[4].reg
;
7253 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7256 /* Transfer between coprocessor register and pair of ARM registers.
7257 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7262 Two XScale instructions are special cases of these:
7264 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7265 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7267 Result unpredictable if Rd or Rn is R15. */
7274 Rd
= inst
.operands
[2].reg
;
7275 Rn
= inst
.operands
[3].reg
;
7279 reject_bad_reg (Rd
);
7280 reject_bad_reg (Rn
);
7284 constraint (Rd
== REG_PC
, BAD_PC
);
7285 constraint (Rn
== REG_PC
, BAD_PC
);
7288 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7289 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
7290 inst
.instruction
|= Rd
<< 12;
7291 inst
.instruction
|= Rn
<< 16;
7292 inst
.instruction
|= inst
.operands
[4].reg
;
7298 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
7299 if (inst
.operands
[1].present
)
7301 inst
.instruction
|= CPSI_MMOD
;
7302 inst
.instruction
|= inst
.operands
[1].imm
;
7309 inst
.instruction
|= inst
.operands
[0].imm
;
7315 /* There is no IT instruction in ARM mode. We
7316 process it to do the validation as if in
7317 thumb mode, just in case the code gets
7318 assembled for thumb using the unified syntax. */
7323 set_it_insn_type (IT_INSN
);
7324 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
7325 now_it
.cc
= inst
.operands
[0].imm
;
7332 int base_reg
= inst
.operands
[0].reg
;
7333 int range
= inst
.operands
[1].imm
;
7335 inst
.instruction
|= base_reg
<< 16;
7336 inst
.instruction
|= range
;
7338 if (inst
.operands
[1].writeback
)
7339 inst
.instruction
|= LDM_TYPE_2_OR_3
;
7341 if (inst
.operands
[0].writeback
)
7343 inst
.instruction
|= WRITE_BACK
;
7344 /* Check for unpredictable uses of writeback. */
7345 if (inst
.instruction
& LOAD_BIT
)
7347 /* Not allowed in LDM type 2. */
7348 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
7349 && ((range
& (1 << REG_PC
)) == 0))
7350 as_warn (_("writeback of base register is UNPREDICTABLE"));
7351 /* Only allowed if base reg not in list for other types. */
7352 else if (range
& (1 << base_reg
))
7353 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7357 /* Not allowed for type 2. */
7358 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7359 as_warn (_("writeback of base register is UNPREDICTABLE"));
7360 /* Only allowed if base reg not in list, or first in list. */
7361 else if ((range
& (1 << base_reg
))
7362 && (range
& ((1 << base_reg
) - 1)))
7363 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7368 /* ARMv5TE load-consecutive (argument parse)
7377 constraint (inst
.operands
[0].reg
% 2 != 0,
7378 _("first destination register must be even"));
7379 constraint (inst
.operands
[1].present
7380 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7381 _("can only load two consecutive registers"));
7382 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7383 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7385 if (!inst
.operands
[1].present
)
7386 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7388 if (inst
.instruction
& LOAD_BIT
)
7390 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7391 register and the first register written; we have to diagnose
7392 overlap between the base and the second register written here. */
7394 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7395 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7396 as_warn (_("base register written back, and overlaps "
7397 "second destination register"));
7399 /* For an index-register load, the index register must not overlap the
7400 destination (even if not write-back). */
7401 else if (inst
.operands
[2].immisreg
7402 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7403 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7404 as_warn (_("index register overlaps destination register"));
7407 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7408 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7414 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7415 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7416 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7417 || inst
.operands
[1].negative
7418 /* This can arise if the programmer has written
7420 or if they have mistakenly used a register name as the last
7423 It is very difficult to distinguish between these two cases
7424 because "rX" might actually be a label. ie the register
7425 name has been occluded by a symbol of the same name. So we
7426 just generate a general 'bad addressing mode' type error
7427 message and leave it up to the programmer to discover the
7428 true cause and fix their mistake. */
7429 || (inst
.operands
[1].reg
== REG_PC
),
7432 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7433 || inst
.reloc
.exp
.X_add_number
!= 0,
7434 _("offset must be zero in ARM encoding"));
7436 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7437 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7438 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7444 constraint (inst
.operands
[0].reg
% 2 != 0,
7445 _("even register required"));
7446 constraint (inst
.operands
[1].present
7447 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7448 _("can only load two consecutive registers"));
7449 /* If op 1 were present and equal to PC, this function wouldn't
7450 have been called in the first place. */
7451 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7453 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7454 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7460 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7461 if (!inst
.operands
[1].isreg
)
7462 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7464 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7470 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7472 if (inst
.operands
[1].preind
)
7474 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7475 || inst
.reloc
.exp
.X_add_number
!= 0,
7476 _("this instruction requires a post-indexed address"));
7478 inst
.operands
[1].preind
= 0;
7479 inst
.operands
[1].postind
= 1;
7480 inst
.operands
[1].writeback
= 1;
7482 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7483 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7486 /* Halfword and signed-byte load/store operations. */
7491 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7492 if (!inst
.operands
[1].isreg
)
7493 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7495 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7501 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7503 if (inst
.operands
[1].preind
)
7505 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7506 || inst
.reloc
.exp
.X_add_number
!= 0,
7507 _("this instruction requires a post-indexed address"));
7509 inst
.operands
[1].preind
= 0;
7510 inst
.operands
[1].postind
= 1;
7511 inst
.operands
[1].writeback
= 1;
7513 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7514 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7517 /* Co-processor register load/store.
7518 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7522 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7523 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7524 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7530 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7531 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7532 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7533 && !(inst
.instruction
& 0x00400000))
7534 as_tsktsk (_("Rd and Rm should be different in mla"));
7536 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7537 inst
.instruction
|= inst
.operands
[1].reg
;
7538 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7539 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7545 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7546 encode_arm_shifter_operand (1);
7549 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7556 top
= (inst
.instruction
& 0x00400000) != 0;
7557 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7558 _(":lower16: not allowed this instruction"));
7559 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7560 _(":upper16: not allowed instruction"));
7561 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7562 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7564 imm
= inst
.reloc
.exp
.X_add_number
;
7565 /* The value is in two pieces: 0:11, 16:19. */
7566 inst
.instruction
|= (imm
& 0x00000fff);
7567 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7571 static void do_vfp_nsyn_opcode (const char *);
7574 do_vfp_nsyn_mrs (void)
7576 if (inst
.operands
[0].isvec
)
7578 if (inst
.operands
[1].reg
!= 1)
7579 first_error (_("operand 1 must be FPSCR"));
7580 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7581 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7582 do_vfp_nsyn_opcode ("fmstat");
7584 else if (inst
.operands
[1].isvec
)
7585 do_vfp_nsyn_opcode ("fmrx");
7593 do_vfp_nsyn_msr (void)
7595 if (inst
.operands
[0].isvec
)
7596 do_vfp_nsyn_opcode ("fmxr");
7606 if (do_vfp_nsyn_mrs () == SUCCESS
)
7609 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7610 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7612 _("'CPSR' or 'SPSR' expected"));
7613 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7614 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7617 /* Two possible forms:
7618 "{C|S}PSR_<field>, Rm",
7619 "{C|S}PSR_f, #expression". */
7624 if (do_vfp_nsyn_msr () == SUCCESS
)
7627 inst
.instruction
|= inst
.operands
[0].imm
;
7628 if (inst
.operands
[1].isreg
)
7629 inst
.instruction
|= inst
.operands
[1].reg
;
7632 inst
.instruction
|= INST_IMMEDIATE
;
7633 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7634 inst
.reloc
.pc_rel
= 0;
7641 if (!inst
.operands
[2].present
)
7642 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7643 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7644 inst
.instruction
|= inst
.operands
[1].reg
;
7645 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7647 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7648 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7649 as_tsktsk (_("Rd and Rm should be different in mul"));
7652 /* Long Multiply Parser
7653 UMULL RdLo, RdHi, Rm, Rs
7654 SMULL RdLo, RdHi, Rm, Rs
7655 UMLAL RdLo, RdHi, Rm, Rs
7656 SMLAL RdLo, RdHi, Rm, Rs. */
7661 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7662 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7663 inst
.instruction
|= inst
.operands
[2].reg
;
7664 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7666 /* rdhi and rdlo must be different. */
7667 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7668 as_tsktsk (_("rdhi and rdlo must be different"));
7670 /* rdhi, rdlo and rm must all be different before armv6. */
7671 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7672 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7673 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7674 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7680 if (inst
.operands
[0].present
7681 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7683 /* Architectural NOP hints are CPSR sets with no bits selected. */
7684 inst
.instruction
&= 0xf0000000;
7685 inst
.instruction
|= 0x0320f000;
7686 if (inst
.operands
[0].present
)
7687 inst
.instruction
|= inst
.operands
[0].imm
;
7691 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7692 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7693 Condition defaults to COND_ALWAYS.
7694 Error if Rd, Rn or Rm are R15. */
7699 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7700 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7701 inst
.instruction
|= inst
.operands
[2].reg
;
7702 if (inst
.operands
[3].present
)
7703 encode_arm_shift (3);
7706 /* ARM V6 PKHTB (Argument Parse). */
7711 if (!inst
.operands
[3].present
)
7713 /* If the shift specifier is omitted, turn the instruction
7714 into pkhbt rd, rm, rn. */
7715 inst
.instruction
&= 0xfff00010;
7716 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7717 inst
.instruction
|= inst
.operands
[1].reg
;
7718 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7722 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7723 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7724 inst
.instruction
|= inst
.operands
[2].reg
;
7725 encode_arm_shift (3);
7729 /* ARMv5TE: Preload-Cache
7733 Syntactically, like LDR with B=1, W=0, L=1. */
7738 constraint (!inst
.operands
[0].isreg
,
7739 _("'[' expected after PLD mnemonic"));
7740 constraint (inst
.operands
[0].postind
,
7741 _("post-indexed expression used in preload instruction"));
7742 constraint (inst
.operands
[0].writeback
,
7743 _("writeback used in preload instruction"));
7744 constraint (!inst
.operands
[0].preind
,
7745 _("unindexed addressing used in preload instruction"));
7746 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7749 /* ARMv7: PLI <addr_mode> */
7753 constraint (!inst
.operands
[0].isreg
,
7754 _("'[' expected after PLI mnemonic"));
7755 constraint (inst
.operands
[0].postind
,
7756 _("post-indexed expression used in preload instruction"));
7757 constraint (inst
.operands
[0].writeback
,
7758 _("writeback used in preload instruction"));
7759 constraint (!inst
.operands
[0].preind
,
7760 _("unindexed addressing used in preload instruction"));
7761 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7762 inst
.instruction
&= ~PRE_INDEX
;
7768 inst
.operands
[1] = inst
.operands
[0];
7769 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7770 inst
.operands
[0].isreg
= 1;
7771 inst
.operands
[0].writeback
= 1;
7772 inst
.operands
[0].reg
= REG_SP
;
7776 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7777 word at the specified address and the following word
7779 Unconditionally executed.
7780 Error if Rn is R15. */
7785 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7786 if (inst
.operands
[0].writeback
)
7787 inst
.instruction
|= WRITE_BACK
;
7790 /* ARM V6 ssat (argument parse). */
7795 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7796 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7797 inst
.instruction
|= inst
.operands
[2].reg
;
7799 if (inst
.operands
[3].present
)
7800 encode_arm_shift (3);
7803 /* ARM V6 usat (argument parse). */
7808 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7809 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7810 inst
.instruction
|= inst
.operands
[2].reg
;
7812 if (inst
.operands
[3].present
)
7813 encode_arm_shift (3);
7816 /* ARM V6 ssat16 (argument parse). */
7821 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7822 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7823 inst
.instruction
|= inst
.operands
[2].reg
;
7829 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7830 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7831 inst
.instruction
|= inst
.operands
[2].reg
;
7834 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7835 preserving the other bits.
7837 setend <endian_specifier>, where <endian_specifier> is either
7843 if (inst
.operands
[0].imm
)
7844 inst
.instruction
|= 0x200;
7850 unsigned int Rm
= (inst
.operands
[1].present
7851 ? inst
.operands
[1].reg
7852 : inst
.operands
[0].reg
);
7854 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7855 inst
.instruction
|= Rm
;
7856 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7858 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7859 inst
.instruction
|= SHIFT_BY_REG
;
7862 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7868 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7869 inst
.reloc
.pc_rel
= 0;
7875 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7876 inst
.reloc
.pc_rel
= 0;
7879 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7880 SMLAxy{cond} Rd,Rm,Rs,Rn
7881 SMLAWy{cond} Rd,Rm,Rs,Rn
7882 Error if any register is R15. */
7887 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7888 inst
.instruction
|= inst
.operands
[1].reg
;
7889 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7890 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7893 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7894 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7895 Error if any register is R15.
7896 Warning if Rdlo == Rdhi. */
7901 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7902 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7903 inst
.instruction
|= inst
.operands
[2].reg
;
7904 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7906 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7907 as_tsktsk (_("rdhi and rdlo must be different"));
7910 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7911 SMULxy{cond} Rd,Rm,Rs
7912 Error if any register is R15. */
7917 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7918 inst
.instruction
|= inst
.operands
[1].reg
;
7919 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7922 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7923 the same for both ARM and Thumb-2. */
7930 if (inst
.operands
[0].present
)
7932 reg
= inst
.operands
[0].reg
;
7933 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
7938 inst
.instruction
|= reg
<< 16;
7939 inst
.instruction
|= inst
.operands
[1].imm
;
7940 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
7941 inst
.instruction
|= WRITE_BACK
;
7944 /* ARM V6 strex (argument parse). */
7949 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7950 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7951 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7952 || inst
.operands
[2].negative
7953 /* See comment in do_ldrex(). */
7954 || (inst
.operands
[2].reg
== REG_PC
),
7957 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7958 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7960 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7961 || inst
.reloc
.exp
.X_add_number
!= 0,
7962 _("offset must be zero in ARM encoding"));
7964 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7965 inst
.instruction
|= inst
.operands
[1].reg
;
7966 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7967 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7973 constraint (inst
.operands
[1].reg
% 2 != 0,
7974 _("even register required"));
7975 constraint (inst
.operands
[2].present
7976 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7977 _("can only store two consecutive registers"));
7978 /* If op 2 were present and equal to PC, this function wouldn't
7979 have been called in the first place. */
7980 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7982 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7983 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7984 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7988 inst
.instruction
|= inst
.operands
[1].reg
;
7989 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7992 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7993 extends it to 32-bits, and adds the result to a value in another
7994 register. You can specify a rotation by 0, 8, 16, or 24 bits
7995 before extracting the 16-bit value.
7996 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7997 Condition defaults to COND_ALWAYS.
7998 Error if any register uses R15. */
8003 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8004 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8005 inst
.instruction
|= inst
.operands
[2].reg
;
8006 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
8011 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8012 Condition defaults to COND_ALWAYS.
8013 Error if any register uses R15. */
8018 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8019 inst
.instruction
|= inst
.operands
[1].reg
;
8020 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
8023 /* VFP instructions. In a logical order: SP variant first, monad
8024 before dyad, arithmetic then move then load/store. */
8027 do_vfp_sp_monadic (void)
8029 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8030 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8034 do_vfp_sp_dyadic (void)
8036 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8037 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8038 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8042 do_vfp_sp_compare_z (void)
8044 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8048 do_vfp_dp_sp_cvt (void)
8050 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8051 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8055 do_vfp_sp_dp_cvt (void)
8057 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8058 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8062 do_vfp_reg_from_sp (void)
8064 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8065 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8069 do_vfp_reg2_from_sp2 (void)
8071 constraint (inst
.operands
[2].imm
!= 2,
8072 _("only two consecutive VFP SP registers allowed here"));
8073 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8074 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8075 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8079 do_vfp_sp_from_reg (void)
8081 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
8082 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8086 do_vfp_sp2_from_reg2 (void)
8088 constraint (inst
.operands
[0].imm
!= 2,
8089 _("only two consecutive VFP SP registers allowed here"));
8090 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
8091 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8092 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8096 do_vfp_sp_ldst (void)
8098 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8099 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8103 do_vfp_dp_ldst (void)
8105 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8106 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8111 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
8113 if (inst
.operands
[0].writeback
)
8114 inst
.instruction
|= WRITE_BACK
;
8116 constraint (ldstm_type
!= VFP_LDSTMIA
,
8117 _("this addressing mode requires base-register writeback"));
8118 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8119 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
8120 inst
.instruction
|= inst
.operands
[1].imm
;
8124 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
8128 if (inst
.operands
[0].writeback
)
8129 inst
.instruction
|= WRITE_BACK
;
8131 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
8132 _("this addressing mode requires base-register writeback"));
8134 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8135 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8137 count
= inst
.operands
[1].imm
<< 1;
8138 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
8141 inst
.instruction
|= count
;
8145 do_vfp_sp_ldstmia (void)
8147 vfp_sp_ldstm (VFP_LDSTMIA
);
8151 do_vfp_sp_ldstmdb (void)
8153 vfp_sp_ldstm (VFP_LDSTMDB
);
8157 do_vfp_dp_ldstmia (void)
8159 vfp_dp_ldstm (VFP_LDSTMIA
);
8163 do_vfp_dp_ldstmdb (void)
8165 vfp_dp_ldstm (VFP_LDSTMDB
);
8169 do_vfp_xp_ldstmia (void)
8171 vfp_dp_ldstm (VFP_LDSTMIAX
);
8175 do_vfp_xp_ldstmdb (void)
8177 vfp_dp_ldstm (VFP_LDSTMDBX
);
8181 do_vfp_dp_rd_rm (void)
8183 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8184 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8188 do_vfp_dp_rn_rd (void)
8190 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
8191 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8195 do_vfp_dp_rd_rn (void)
8197 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8198 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8202 do_vfp_dp_rd_rn_rm (void)
8204 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8205 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8206 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
8212 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8216 do_vfp_dp_rm_rd_rn (void)
8218 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
8219 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8220 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
8223 /* VFPv3 instructions. */
8225 do_vfp_sp_const (void)
8227 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8228 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8229 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8233 do_vfp_dp_const (void)
8235 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8236 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8237 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8241 vfp_conv (int srcsize
)
8243 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
8244 inst
.instruction
|= (immbits
& 1) << 5;
8245 inst
.instruction
|= (immbits
>> 1);
8249 do_vfp_sp_conv_16 (void)
8251 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8256 do_vfp_dp_conv_16 (void)
8258 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8263 do_vfp_sp_conv_32 (void)
8265 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8270 do_vfp_dp_conv_32 (void)
8272 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8276 /* FPA instructions. Also in a logical order. */
8281 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8282 inst
.instruction
|= inst
.operands
[1].reg
;
8286 do_fpa_ldmstm (void)
8288 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8289 switch (inst
.operands
[1].imm
)
8291 case 1: inst
.instruction
|= CP_T_X
; break;
8292 case 2: inst
.instruction
|= CP_T_Y
; break;
8293 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
8298 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
8300 /* The instruction specified "ea" or "fd", so we can only accept
8301 [Rn]{!}. The instruction does not really support stacking or
8302 unstacking, so we have to emulate these by setting appropriate
8303 bits and offsets. */
8304 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8305 || inst
.reloc
.exp
.X_add_number
!= 0,
8306 _("this instruction does not support indexing"));
8308 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
8309 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
8311 if (!(inst
.instruction
& INDEX_UP
))
8312 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
8314 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
8316 inst
.operands
[2].preind
= 0;
8317 inst
.operands
[2].postind
= 1;
8321 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8324 /* iWMMXt instructions: strictly in alphabetical order. */
8327 do_iwmmxt_tandorc (void)
8329 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
8333 do_iwmmxt_textrc (void)
8335 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8336 inst
.instruction
|= inst
.operands
[1].imm
;
8340 do_iwmmxt_textrm (void)
8342 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8343 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8344 inst
.instruction
|= inst
.operands
[2].imm
;
8348 do_iwmmxt_tinsr (void)
8350 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8351 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8352 inst
.instruction
|= inst
.operands
[2].imm
;
8356 do_iwmmxt_tmia (void)
8358 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8359 inst
.instruction
|= inst
.operands
[1].reg
;
8360 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8364 do_iwmmxt_waligni (void)
8366 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8367 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8368 inst
.instruction
|= inst
.operands
[2].reg
;
8369 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8373 do_iwmmxt_wmerge (void)
8375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8376 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8377 inst
.instruction
|= inst
.operands
[2].reg
;
8378 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8382 do_iwmmxt_wmov (void)
8384 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8385 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8386 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8387 inst
.instruction
|= inst
.operands
[1].reg
;
8391 do_iwmmxt_wldstbh (void)
8394 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8396 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8398 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8399 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8403 do_iwmmxt_wldstw (void)
8405 /* RIWR_RIWC clears .isreg for a control register. */
8406 if (!inst
.operands
[0].isreg
)
8408 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8409 inst
.instruction
|= 0xf0000000;
8412 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8413 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8417 do_iwmmxt_wldstd (void)
8419 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8420 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8421 && inst
.operands
[1].immisreg
)
8423 inst
.instruction
&= ~0x1a000ff;
8424 inst
.instruction
|= (0xf << 28);
8425 if (inst
.operands
[1].preind
)
8426 inst
.instruction
|= PRE_INDEX
;
8427 if (!inst
.operands
[1].negative
)
8428 inst
.instruction
|= INDEX_UP
;
8429 if (inst
.operands
[1].writeback
)
8430 inst
.instruction
|= WRITE_BACK
;
8431 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8432 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8433 inst
.instruction
|= inst
.operands
[1].imm
;
8436 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8440 do_iwmmxt_wshufh (void)
8442 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8443 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8444 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8445 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8449 do_iwmmxt_wzero (void)
8451 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8452 inst
.instruction
|= inst
.operands
[0].reg
;
8453 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8454 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8458 do_iwmmxt_wrwrwr_or_imm5 (void)
8460 if (inst
.operands
[2].isreg
)
8463 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8464 _("immediate operand requires iWMMXt2"));
8466 if (inst
.operands
[2].imm
== 0)
8468 switch ((inst
.instruction
>> 20) & 0xf)
8474 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8475 inst
.operands
[2].imm
= 16;
8476 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8482 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8483 inst
.operands
[2].imm
= 32;
8484 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8491 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8493 wrn
= (inst
.instruction
>> 16) & 0xf;
8494 inst
.instruction
&= 0xff0fff0f;
8495 inst
.instruction
|= wrn
;
8496 /* Bail out here; the instruction is now assembled. */
8501 /* Map 32 -> 0, etc. */
8502 inst
.operands
[2].imm
&= 0x1f;
8503 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8507 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8508 operations first, then control, shift, and load/store. */
8510 /* Insns like "foo X,Y,Z". */
8513 do_mav_triple (void)
8515 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8516 inst
.instruction
|= inst
.operands
[1].reg
;
8517 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8520 /* Insns like "foo W,X,Y,Z".
8521 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8526 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8527 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8528 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8529 inst
.instruction
|= inst
.operands
[3].reg
;
8532 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8536 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8539 /* Maverick shift immediate instructions.
8540 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8541 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8546 int imm
= inst
.operands
[2].imm
;
8548 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8549 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8551 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8552 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8553 Bit 4 should be 0. */
8554 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8556 inst
.instruction
|= imm
;
8559 /* XScale instructions. Also sorted arithmetic before move. */
8561 /* Xscale multiply-accumulate (argument parse)
8564 MIAxycc acc0,Rm,Rs. */
8569 inst
.instruction
|= inst
.operands
[1].reg
;
8570 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8573 /* Xscale move-accumulator-register (argument parse)
8575 MARcc acc0,RdLo,RdHi. */
8580 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8581 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8584 /* Xscale move-register-accumulator (argument parse)
8586 MRAcc RdLo,RdHi,acc0. */
8591 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8592 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8593 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8596 /* Encoding functions relevant only to Thumb. */
8598 /* inst.operands[i] is a shifted-register operand; encode
8599 it into inst.instruction in the format used by Thumb32. */
8602 encode_thumb32_shifted_operand (int i
)
8604 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8605 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8607 constraint (inst
.operands
[i
].immisreg
,
8608 _("shift by register not allowed in thumb mode"));
8609 inst
.instruction
|= inst
.operands
[i
].reg
;
8610 if (shift
== SHIFT_RRX
)
8611 inst
.instruction
|= SHIFT_ROR
<< 4;
8614 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8615 _("expression too complex"));
8617 constraint (value
> 32
8618 || (value
== 32 && (shift
== SHIFT_LSL
8619 || shift
== SHIFT_ROR
)),
8620 _("shift expression is too large"));
8624 else if (value
== 32)
8627 inst
.instruction
|= shift
<< 4;
8628 inst
.instruction
|= (value
& 0x1c) << 10;
8629 inst
.instruction
|= (value
& 0x03) << 6;
8634 /* inst.operands[i] was set up by parse_address. Encode it into a
8635 Thumb32 format load or store instruction. Reject forms that cannot
8636 be used with such instructions. If is_t is true, reject forms that
8637 cannot be used with a T instruction; if is_d is true, reject forms
8638 that cannot be used with a D instruction. */
8641 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8643 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8645 constraint (!inst
.operands
[i
].isreg
,
8646 _("Instruction does not support =N addresses"));
8648 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8649 if (inst
.operands
[i
].immisreg
)
8651 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8652 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8653 constraint (inst
.operands
[i
].negative
,
8654 _("Thumb does not support negative register indexing"));
8655 constraint (inst
.operands
[i
].postind
,
8656 _("Thumb does not support register post-indexing"));
8657 constraint (inst
.operands
[i
].writeback
,
8658 _("Thumb does not support register indexing with writeback"));
8659 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8660 _("Thumb supports only LSL in shifted register indexing"));
8662 inst
.instruction
|= inst
.operands
[i
].imm
;
8663 if (inst
.operands
[i
].shifted
)
8665 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8666 _("expression too complex"));
8667 constraint (inst
.reloc
.exp
.X_add_number
< 0
8668 || inst
.reloc
.exp
.X_add_number
> 3,
8669 _("shift out of range"));
8670 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8672 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8674 else if (inst
.operands
[i
].preind
)
8676 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8677 _("cannot use writeback with PC-relative addressing"));
8678 constraint (is_t
&& inst
.operands
[i
].writeback
,
8679 _("cannot use writeback with this instruction"));
8683 inst
.instruction
|= 0x01000000;
8684 if (inst
.operands
[i
].writeback
)
8685 inst
.instruction
|= 0x00200000;
8689 inst
.instruction
|= 0x00000c00;
8690 if (inst
.operands
[i
].writeback
)
8691 inst
.instruction
|= 0x00000100;
8693 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8695 else if (inst
.operands
[i
].postind
)
8697 gas_assert (inst
.operands
[i
].writeback
);
8698 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8699 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8702 inst
.instruction
|= 0x00200000;
8704 inst
.instruction
|= 0x00000900;
8705 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8707 else /* unindexed - only for coprocessor */
8708 inst
.error
= _("instruction does not accept unindexed addressing");
8711 /* Table of Thumb instructions which exist in both 16- and 32-bit
8712 encodings (the latter only in post-V6T2 cores). The index is the
8713 value used in the insns table below. When there is more than one
8714 possible 16-bit encoding for the instruction, this table always
8716 Also contains several pseudo-instructions used during relaxation. */
8717 #define T16_32_TAB \
8718 X(adc, 4140, eb400000), \
8719 X(adcs, 4140, eb500000), \
8720 X(add, 1c00, eb000000), \
8721 X(adds, 1c00, eb100000), \
8722 X(addi, 0000, f1000000), \
8723 X(addis, 0000, f1100000), \
8724 X(add_pc,000f, f20f0000), \
8725 X(add_sp,000d, f10d0000), \
8726 X(adr, 000f, f20f0000), \
8727 X(and, 4000, ea000000), \
8728 X(ands, 4000, ea100000), \
8729 X(asr, 1000, fa40f000), \
8730 X(asrs, 1000, fa50f000), \
8731 X(b, e000, f000b000), \
8732 X(bcond, d000, f0008000), \
8733 X(bic, 4380, ea200000), \
8734 X(bics, 4380, ea300000), \
8735 X(cmn, 42c0, eb100f00), \
8736 X(cmp, 2800, ebb00f00), \
8737 X(cpsie, b660, f3af8400), \
8738 X(cpsid, b670, f3af8600), \
8739 X(cpy, 4600, ea4f0000), \
8740 X(dec_sp,80dd, f1ad0d00), \
8741 X(eor, 4040, ea800000), \
8742 X(eors, 4040, ea900000), \
8743 X(inc_sp,00dd, f10d0d00), \
8744 X(ldmia, c800, e8900000), \
8745 X(ldr, 6800, f8500000), \
8746 X(ldrb, 7800, f8100000), \
8747 X(ldrh, 8800, f8300000), \
8748 X(ldrsb, 5600, f9100000), \
8749 X(ldrsh, 5e00, f9300000), \
8750 X(ldr_pc,4800, f85f0000), \
8751 X(ldr_pc2,4800, f85f0000), \
8752 X(ldr_sp,9800, f85d0000), \
8753 X(lsl, 0000, fa00f000), \
8754 X(lsls, 0000, fa10f000), \
8755 X(lsr, 0800, fa20f000), \
8756 X(lsrs, 0800, fa30f000), \
8757 X(mov, 2000, ea4f0000), \
8758 X(movs, 2000, ea5f0000), \
8759 X(mul, 4340, fb00f000), \
8760 X(muls, 4340, ffffffff), /* no 32b muls */ \
8761 X(mvn, 43c0, ea6f0000), \
8762 X(mvns, 43c0, ea7f0000), \
8763 X(neg, 4240, f1c00000), /* rsb #0 */ \
8764 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8765 X(orr, 4300, ea400000), \
8766 X(orrs, 4300, ea500000), \
8767 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8768 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8769 X(rev, ba00, fa90f080), \
8770 X(rev16, ba40, fa90f090), \
8771 X(revsh, bac0, fa90f0b0), \
8772 X(ror, 41c0, fa60f000), \
8773 X(rors, 41c0, fa70f000), \
8774 X(sbc, 4180, eb600000), \
8775 X(sbcs, 4180, eb700000), \
8776 X(stmia, c000, e8800000), \
8777 X(str, 6000, f8400000), \
8778 X(strb, 7000, f8000000), \
8779 X(strh, 8000, f8200000), \
8780 X(str_sp,9000, f84d0000), \
8781 X(sub, 1e00, eba00000), \
8782 X(subs, 1e00, ebb00000), \
8783 X(subi, 8000, f1a00000), \
8784 X(subis, 8000, f1b00000), \
8785 X(sxtb, b240, fa4ff080), \
8786 X(sxth, b200, fa0ff080), \
8787 X(tst, 4200, ea100f00), \
8788 X(uxtb, b2c0, fa5ff080), \
8789 X(uxth, b280, fa1ff080), \
8790 X(nop, bf00, f3af8000), \
8791 X(yield, bf10, f3af8001), \
8792 X(wfe, bf20, f3af8002), \
8793 X(wfi, bf30, f3af8003), \
8794 X(sev, bf40, f3af8004),
8796 /* To catch errors in encoding functions, the codes are all offset by
8797 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8798 as 16-bit instructions. */
8799 #define X(a,b,c) T_MNEM_##a
8800 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8803 #define X(a,b,c) 0x##b
8804 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8805 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8808 #define X(a,b,c) 0x##c
8809 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8810 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8811 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8815 /* Thumb instruction encoders, in alphabetical order. */
8820 do_t_add_sub_w (void)
8824 Rd
= inst
.operands
[0].reg
;
8825 Rn
= inst
.operands
[1].reg
;
8827 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this is the
8828 SP-{plus,minute}-immediate form of the instruction. */
8829 reject_bad_reg (Rd
);
8831 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8832 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8835 /* Parse an add or subtract instruction. We get here with inst.instruction
8836 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8843 Rd
= inst
.operands
[0].reg
;
8844 Rs
= (inst
.operands
[1].present
8845 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8846 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8849 set_it_insn_type_last ();
8857 flags
= (inst
.instruction
== T_MNEM_adds
8858 || inst
.instruction
== T_MNEM_subs
);
8860 narrow
= !in_it_block ();
8862 narrow
= in_it_block ();
8863 if (!inst
.operands
[2].isreg
)
8867 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8869 add
= (inst
.instruction
== T_MNEM_add
8870 || inst
.instruction
== T_MNEM_adds
);
8872 if (inst
.size_req
!= 4)
8874 /* Attempt to use a narrow opcode, with relaxation if
8876 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8877 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8878 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8879 opcode
= T_MNEM_add_sp
;
8880 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8881 opcode
= T_MNEM_add_pc
;
8882 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8885 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8887 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8891 inst
.instruction
= THUMB_OP16(opcode
);
8892 inst
.instruction
|= (Rd
<< 4) | Rs
;
8893 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8894 if (inst
.size_req
!= 2)
8895 inst
.relax
= opcode
;
8898 constraint (inst
.size_req
== 2, BAD_HIREG
);
8900 if (inst
.size_req
== 4
8901 || (inst
.size_req
!= 2 && !opcode
))
8905 constraint (add
, BAD_PC
);
8906 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
8907 _("only SUBS PC, LR, #const allowed"));
8908 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8909 _("expression too complex"));
8910 constraint (inst
.reloc
.exp
.X_add_number
< 0
8911 || inst
.reloc
.exp
.X_add_number
> 0xff,
8912 _("immediate value out of range"));
8913 inst
.instruction
= T2_SUBS_PC_LR
8914 | inst
.reloc
.exp
.X_add_number
;
8915 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8918 else if (Rs
== REG_PC
)
8920 /* Always use addw/subw. */
8921 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8922 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8926 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8927 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8930 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8932 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8934 inst
.instruction
|= Rd
<< 8;
8935 inst
.instruction
|= Rs
<< 16;
8940 Rn
= inst
.operands
[2].reg
;
8941 /* See if we can do this with a 16-bit instruction. */
8942 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8944 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8949 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8950 || inst
.instruction
== T_MNEM_add
)
8953 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8957 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
8959 /* Thumb-1 cores (except v6-M) require at least one high
8960 register in a narrow non flag setting add. */
8961 if (Rd
> 7 || Rn
> 7
8962 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
8963 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
8970 inst
.instruction
= T_OPCODE_ADD_HI
;
8971 inst
.instruction
|= (Rd
& 8) << 4;
8972 inst
.instruction
|= (Rd
& 7);
8973 inst
.instruction
|= Rn
<< 3;
8979 constraint (Rd
== REG_PC
, BAD_PC
);
8980 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8981 constraint (Rs
== REG_PC
, BAD_PC
);
8982 reject_bad_reg (Rn
);
8984 /* If we get here, it can't be done in 16 bits. */
8985 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8986 _("shift must be constant"));
8987 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8988 inst
.instruction
|= Rd
<< 8;
8989 inst
.instruction
|= Rs
<< 16;
8990 encode_thumb32_shifted_operand (2);
8995 constraint (inst
.instruction
== T_MNEM_adds
8996 || inst
.instruction
== T_MNEM_subs
,
8999 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
9001 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
9002 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
9005 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9007 inst
.instruction
|= (Rd
<< 4) | Rs
;
9008 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9012 Rn
= inst
.operands
[2].reg
;
9013 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
9015 /* We now have Rd, Rs, and Rn set to registers. */
9016 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9018 /* Can't do this for SUB. */
9019 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
9020 inst
.instruction
= T_OPCODE_ADD_HI
;
9021 inst
.instruction
|= (Rd
& 8) << 4;
9022 inst
.instruction
|= (Rd
& 7);
9024 inst
.instruction
|= Rn
<< 3;
9026 inst
.instruction
|= Rs
<< 3;
9028 constraint (1, _("dest must overlap one source register"));
9032 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9033 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
9034 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9044 Rd
= inst
.operands
[0].reg
;
9045 reject_bad_reg (Rd
);
9047 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
9049 /* Defer to section relaxation. */
9050 inst
.relax
= inst
.instruction
;
9051 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9052 inst
.instruction
|= Rd
<< 4;
9054 else if (unified_syntax
&& inst
.size_req
!= 2)
9056 /* Generate a 32-bit opcode. */
9057 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9058 inst
.instruction
|= Rd
<< 8;
9059 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
9060 inst
.reloc
.pc_rel
= 1;
9064 /* Generate a 16-bit opcode. */
9065 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9066 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9067 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
9068 inst
.reloc
.pc_rel
= 1;
9070 inst
.instruction
|= Rd
<< 4;
9074 /* Arithmetic instructions for which there is just one 16-bit
9075 instruction encoding, and it allows only two low registers.
9076 For maximal compatibility with ARM syntax, we allow three register
9077 operands even when Thumb-32 instructions are not available, as long
9078 as the first two are identical. For instance, both "sbc r0,r1" and
9079 "sbc r0,r0,r1" are allowed. */
9085 Rd
= inst
.operands
[0].reg
;
9086 Rs
= (inst
.operands
[1].present
9087 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9088 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9089 Rn
= inst
.operands
[2].reg
;
9091 reject_bad_reg (Rd
);
9092 reject_bad_reg (Rs
);
9093 if (inst
.operands
[2].isreg
)
9094 reject_bad_reg (Rn
);
9098 if (!inst
.operands
[2].isreg
)
9100 /* For an immediate, we always generate a 32-bit opcode;
9101 section relaxation will shrink it later if possible. */
9102 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9103 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9104 inst
.instruction
|= Rd
<< 8;
9105 inst
.instruction
|= Rs
<< 16;
9106 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9112 /* See if we can do this with a 16-bit instruction. */
9113 if (THUMB_SETS_FLAGS (inst
.instruction
))
9114 narrow
= !in_it_block ();
9116 narrow
= in_it_block ();
9118 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9120 if (inst
.operands
[2].shifted
)
9122 if (inst
.size_req
== 4)
9128 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9129 inst
.instruction
|= Rd
;
9130 inst
.instruction
|= Rn
<< 3;
9134 /* If we get here, it can't be done in 16 bits. */
9135 constraint (inst
.operands
[2].shifted
9136 && inst
.operands
[2].immisreg
,
9137 _("shift must be constant"));
9138 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9139 inst
.instruction
|= Rd
<< 8;
9140 inst
.instruction
|= Rs
<< 16;
9141 encode_thumb32_shifted_operand (2);
9146 /* On its face this is a lie - the instruction does set the
9147 flags. However, the only supported mnemonic in this mode
9149 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9151 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9152 _("unshifted register required"));
9153 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9154 constraint (Rd
!= Rs
,
9155 _("dest and source1 must be the same register"));
9157 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9158 inst
.instruction
|= Rd
;
9159 inst
.instruction
|= Rn
<< 3;
9163 /* Similarly, but for instructions where the arithmetic operation is
9164 commutative, so we can allow either of them to be different from
9165 the destination operand in a 16-bit instruction. For instance, all
9166 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9173 Rd
= inst
.operands
[0].reg
;
9174 Rs
= (inst
.operands
[1].present
9175 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9176 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9177 Rn
= inst
.operands
[2].reg
;
9179 reject_bad_reg (Rd
);
9180 reject_bad_reg (Rs
);
9181 if (inst
.operands
[2].isreg
)
9182 reject_bad_reg (Rn
);
9186 if (!inst
.operands
[2].isreg
)
9188 /* For an immediate, we always generate a 32-bit opcode;
9189 section relaxation will shrink it later if possible. */
9190 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9191 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9192 inst
.instruction
|= Rd
<< 8;
9193 inst
.instruction
|= Rs
<< 16;
9194 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9200 /* See if we can do this with a 16-bit instruction. */
9201 if (THUMB_SETS_FLAGS (inst
.instruction
))
9202 narrow
= !in_it_block ();
9204 narrow
= in_it_block ();
9206 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9208 if (inst
.operands
[2].shifted
)
9210 if (inst
.size_req
== 4)
9217 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9218 inst
.instruction
|= Rd
;
9219 inst
.instruction
|= Rn
<< 3;
9224 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9225 inst
.instruction
|= Rd
;
9226 inst
.instruction
|= Rs
<< 3;
9231 /* If we get here, it can't be done in 16 bits. */
9232 constraint (inst
.operands
[2].shifted
9233 && inst
.operands
[2].immisreg
,
9234 _("shift must be constant"));
9235 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9236 inst
.instruction
|= Rd
<< 8;
9237 inst
.instruction
|= Rs
<< 16;
9238 encode_thumb32_shifted_operand (2);
9243 /* On its face this is a lie - the instruction does set the
9244 flags. However, the only supported mnemonic in this mode
9246 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9248 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9249 _("unshifted register required"));
9250 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9252 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9253 inst
.instruction
|= Rd
;
9256 inst
.instruction
|= Rn
<< 3;
9258 inst
.instruction
|= Rs
<< 3;
9260 constraint (1, _("dest must overlap one source register"));
9267 if (inst
.operands
[0].present
)
9269 constraint ((inst
.instruction
& 0xf0) != 0x40
9270 && inst
.operands
[0].imm
!= 0xf,
9271 _("bad barrier type"));
9272 inst
.instruction
|= inst
.operands
[0].imm
;
9275 inst
.instruction
|= 0xf;
9282 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9283 constraint (msb
> 32, _("bit-field extends past end of register"));
9284 /* The instruction encoding stores the LSB and MSB,
9285 not the LSB and width. */
9286 Rd
= inst
.operands
[0].reg
;
9287 reject_bad_reg (Rd
);
9288 inst
.instruction
|= Rd
<< 8;
9289 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
9290 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
9291 inst
.instruction
|= msb
- 1;
9300 Rd
= inst
.operands
[0].reg
;
9301 reject_bad_reg (Rd
);
9303 /* #0 in second position is alternative syntax for bfc, which is
9304 the same instruction but with REG_PC in the Rm field. */
9305 if (!inst
.operands
[1].isreg
)
9309 Rn
= inst
.operands
[1].reg
;
9310 reject_bad_reg (Rn
);
9313 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9314 constraint (msb
> 32, _("bit-field extends past end of register"));
9315 /* The instruction encoding stores the LSB and MSB,
9316 not the LSB and width. */
9317 inst
.instruction
|= Rd
<< 8;
9318 inst
.instruction
|= Rn
<< 16;
9319 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9320 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9321 inst
.instruction
|= msb
- 1;
9329 Rd
= inst
.operands
[0].reg
;
9330 Rn
= inst
.operands
[1].reg
;
9332 reject_bad_reg (Rd
);
9333 reject_bad_reg (Rn
);
9335 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9336 _("bit-field extends past end of register"));
9337 inst
.instruction
|= Rd
<< 8;
9338 inst
.instruction
|= Rn
<< 16;
9339 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9340 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9341 inst
.instruction
|= inst
.operands
[3].imm
- 1;
9344 /* ARM V5 Thumb BLX (argument parse)
9345 BLX <target_addr> which is BLX(1)
9346 BLX <Rm> which is BLX(2)
9347 Unfortunately, there are two different opcodes for this mnemonic.
9348 So, the insns[].value is not used, and the code here zaps values
9349 into inst.instruction.
9351 ??? How to take advantage of the additional two bits of displacement
9352 available in Thumb32 mode? Need new relocation? */
9357 set_it_insn_type_last ();
9359 if (inst
.operands
[0].isreg
)
9361 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9362 /* We have a register, so this is BLX(2). */
9363 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9367 /* No register. This must be BLX(1). */
9368 inst
.instruction
= 0xf000e800;
9369 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9370 inst
.reloc
.pc_rel
= 1;
9381 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
9385 /* Conditional branches inside IT blocks are encoded as unconditional
9392 if (cond
!= COND_ALWAYS
)
9393 opcode
= T_MNEM_bcond
;
9395 opcode
= inst
.instruction
;
9397 if (unified_syntax
&& inst
.size_req
== 4)
9399 inst
.instruction
= THUMB_OP32(opcode
);
9400 if (cond
== COND_ALWAYS
)
9401 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9404 gas_assert (cond
!= 0xF);
9405 inst
.instruction
|= cond
<< 22;
9406 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9411 inst
.instruction
= THUMB_OP16(opcode
);
9412 if (cond
== COND_ALWAYS
)
9413 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9416 inst
.instruction
|= cond
<< 8;
9417 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9419 /* Allow section relaxation. */
9420 if (unified_syntax
&& inst
.size_req
!= 2)
9421 inst
.relax
= opcode
;
9424 inst
.reloc
.pc_rel
= 1;
9430 constraint (inst
.cond
!= COND_ALWAYS
,
9431 _("instruction is always unconditional"));
9432 if (inst
.operands
[0].present
)
9434 constraint (inst
.operands
[0].imm
> 255,
9435 _("immediate value out of range"));
9436 inst
.instruction
|= inst
.operands
[0].imm
;
9437 set_it_insn_type (NEUTRAL_IT_INSN
);
9442 do_t_branch23 (void)
9444 set_it_insn_type_last ();
9445 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9446 inst
.reloc
.pc_rel
= 1;
9448 #if defined(OBJ_COFF)
9449 /* If the destination of the branch is a defined symbol which does not have
9450 the THUMB_FUNC attribute, then we must be calling a function which has
9451 the (interfacearm) attribute. We look for the Thumb entry point to that
9452 function and change the branch to refer to that function instead. */
9453 if ( inst
.reloc
.exp
.X_op
== O_symbol
9454 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9455 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9456 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9457 inst
.reloc
.exp
.X_add_symbol
=
9458 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9465 set_it_insn_type_last ();
9466 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9467 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9468 should cause the alignment to be checked once it is known. This is
9469 because BX PC only works if the instruction is word aligned. */
9477 set_it_insn_type_last ();
9478 Rm
= inst
.operands
[0].reg
;
9479 reject_bad_reg (Rm
);
9480 inst
.instruction
|= Rm
<< 16;
9489 Rd
= inst
.operands
[0].reg
;
9490 Rm
= inst
.operands
[1].reg
;
9492 reject_bad_reg (Rd
);
9493 reject_bad_reg (Rm
);
9495 inst
.instruction
|= Rd
<< 8;
9496 inst
.instruction
|= Rm
<< 16;
9497 inst
.instruction
|= Rm
;
9503 set_it_insn_type (OUTSIDE_IT_INSN
);
9504 inst
.instruction
|= inst
.operands
[0].imm
;
9510 set_it_insn_type (OUTSIDE_IT_INSN
);
9512 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9513 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9515 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9516 inst
.instruction
= 0xf3af8000;
9517 inst
.instruction
|= imod
<< 9;
9518 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9519 if (inst
.operands
[1].present
)
9520 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9524 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9525 && (inst
.operands
[0].imm
& 4),
9526 _("selected processor does not support 'A' form "
9527 "of this instruction"));
9528 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9529 _("Thumb does not support the 2-argument "
9530 "form of this instruction"));
9531 inst
.instruction
|= inst
.operands
[0].imm
;
9535 /* THUMB CPY instruction (argument parse). */
9540 if (inst
.size_req
== 4)
9542 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9543 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9544 inst
.instruction
|= inst
.operands
[1].reg
;
9548 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9549 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9550 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9557 set_it_insn_type (OUTSIDE_IT_INSN
);
9558 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9559 inst
.instruction
|= inst
.operands
[0].reg
;
9560 inst
.reloc
.pc_rel
= 1;
9561 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9567 inst
.instruction
|= inst
.operands
[0].imm
;
9573 unsigned Rd
, Rn
, Rm
;
9575 Rd
= inst
.operands
[0].reg
;
9576 Rn
= (inst
.operands
[1].present
9577 ? inst
.operands
[1].reg
: Rd
);
9578 Rm
= inst
.operands
[2].reg
;
9580 reject_bad_reg (Rd
);
9581 reject_bad_reg (Rn
);
9582 reject_bad_reg (Rm
);
9584 inst
.instruction
|= Rd
<< 8;
9585 inst
.instruction
|= Rn
<< 16;
9586 inst
.instruction
|= Rm
;
9592 if (unified_syntax
&& inst
.size_req
== 4)
9593 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9595 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9601 unsigned int cond
= inst
.operands
[0].imm
;
9603 set_it_insn_type (IT_INSN
);
9604 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9607 /* If the condition is a negative condition, invert the mask. */
9608 if ((cond
& 0x1) == 0x0)
9610 unsigned int mask
= inst
.instruction
& 0x000f;
9612 if ((mask
& 0x7) == 0)
9613 /* no conversion needed */;
9614 else if ((mask
& 0x3) == 0)
9616 else if ((mask
& 0x1) == 0)
9621 inst
.instruction
&= 0xfff0;
9622 inst
.instruction
|= mask
;
9625 inst
.instruction
|= cond
<< 4;
9628 /* Helper function used for both push/pop and ldm/stm. */
9630 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9634 load
= (inst
.instruction
& (1 << 20)) != 0;
9636 if (mask
& (1 << 13))
9637 inst
.error
= _("SP not allowed in register list");
9640 if (mask
& (1 << 15))
9642 if (mask
& (1 << 14))
9643 inst
.error
= _("LR and PC should not both be in register list");
9645 set_it_insn_type_last ();
9648 if ((mask
& (1 << base
)) != 0
9650 as_warn (_("base register should not be in register list "
9651 "when written back"));
9655 if (mask
& (1 << 15))
9656 inst
.error
= _("PC not allowed in register list");
9658 if (mask
& (1 << base
))
9659 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9662 if ((mask
& (mask
- 1)) == 0)
9664 /* Single register transfers implemented as str/ldr. */
9667 if (inst
.instruction
& (1 << 23))
9668 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9670 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9674 if (inst
.instruction
& (1 << 23))
9675 inst
.instruction
= 0x00800000; /* ia -> [base] */
9677 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9680 inst
.instruction
|= 0xf8400000;
9682 inst
.instruction
|= 0x00100000;
9684 mask
= ffs (mask
) - 1;
9688 inst
.instruction
|= WRITE_BACK
;
9690 inst
.instruction
|= mask
;
9691 inst
.instruction
|= base
<< 16;
9697 /* This really doesn't seem worth it. */
9698 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9699 _("expression too complex"));
9700 constraint (inst
.operands
[1].writeback
,
9701 _("Thumb load/store multiple does not support {reglist}^"));
9709 /* See if we can use a 16-bit instruction. */
9710 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9711 && inst
.size_req
!= 4
9712 && !(inst
.operands
[1].imm
& ~0xff))
9714 mask
= 1 << inst
.operands
[0].reg
;
9716 if (inst
.operands
[0].reg
<= 7
9717 && (inst
.instruction
== T_MNEM_stmia
9718 ? inst
.operands
[0].writeback
9719 : (inst
.operands
[0].writeback
9720 == !(inst
.operands
[1].imm
& mask
))))
9722 if (inst
.instruction
== T_MNEM_stmia
9723 && (inst
.operands
[1].imm
& mask
)
9724 && (inst
.operands
[1].imm
& (mask
- 1)))
9725 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9726 inst
.operands
[0].reg
);
9728 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9729 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9730 inst
.instruction
|= inst
.operands
[1].imm
;
9733 else if (inst
.operands
[0] .reg
== REG_SP
9734 && inst
.operands
[0].writeback
)
9736 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9737 ? T_MNEM_push
: T_MNEM_pop
);
9738 inst
.instruction
|= inst
.operands
[1].imm
;
9745 if (inst
.instruction
< 0xffff)
9746 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9748 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9749 inst
.operands
[0].writeback
);
9754 constraint (inst
.operands
[0].reg
> 7
9755 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9756 constraint (inst
.instruction
!= T_MNEM_ldmia
9757 && inst
.instruction
!= T_MNEM_stmia
,
9758 _("Thumb-2 instruction only valid in unified syntax"));
9759 if (inst
.instruction
== T_MNEM_stmia
)
9761 if (!inst
.operands
[0].writeback
)
9762 as_warn (_("this instruction will write back the base register"));
9763 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9764 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9765 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9766 inst
.operands
[0].reg
);
9770 if (!inst
.operands
[0].writeback
9771 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9772 as_warn (_("this instruction will write back the base register"));
9773 else if (inst
.operands
[0].writeback
9774 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9775 as_warn (_("this instruction will not write back the base register"));
9778 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9779 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9780 inst
.instruction
|= inst
.operands
[1].imm
;
9787 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9788 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9789 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9790 || inst
.operands
[1].negative
,
9793 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9794 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9795 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9801 if (!inst
.operands
[1].present
)
9803 constraint (inst
.operands
[0].reg
== REG_LR
,
9804 _("r14 not allowed as first register "
9805 "when second register is omitted"));
9806 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9808 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9811 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9812 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9813 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9819 unsigned long opcode
;
9822 if (inst
.operands
[0].isreg
9823 && !inst
.operands
[0].preind
9824 && inst
.operands
[0].reg
== REG_PC
)
9825 set_it_insn_type_last ();
9827 opcode
= inst
.instruction
;
9830 if (!inst
.operands
[1].isreg
)
9832 if (opcode
<= 0xffff)
9833 inst
.instruction
= THUMB_OP32 (opcode
);
9834 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9837 if (inst
.operands
[1].isreg
9838 && !inst
.operands
[1].writeback
9839 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9840 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9842 && inst
.size_req
!= 4)
9844 /* Insn may have a 16-bit form. */
9845 Rn
= inst
.operands
[1].reg
;
9846 if (inst
.operands
[1].immisreg
)
9848 inst
.instruction
= THUMB_OP16 (opcode
);
9850 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9853 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9854 && opcode
!= T_MNEM_ldrsb
)
9855 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9856 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9863 if (inst
.reloc
.pc_rel
)
9864 opcode
= T_MNEM_ldr_pc2
;
9866 opcode
= T_MNEM_ldr_pc
;
9870 if (opcode
== T_MNEM_ldr
)
9871 opcode
= T_MNEM_ldr_sp
;
9873 opcode
= T_MNEM_str_sp
;
9875 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9879 inst
.instruction
= inst
.operands
[0].reg
;
9880 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9882 inst
.instruction
|= THUMB_OP16 (opcode
);
9883 if (inst
.size_req
== 2)
9884 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9886 inst
.relax
= opcode
;
9890 /* Definitely a 32-bit variant. */
9891 inst
.instruction
= THUMB_OP32 (opcode
);
9892 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9893 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9897 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9899 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9901 /* Only [Rn,Rm] is acceptable. */
9902 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9903 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9904 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9905 || inst
.operands
[1].negative
,
9906 _("Thumb does not support this addressing mode"));
9907 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9911 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9912 if (!inst
.operands
[1].isreg
)
9913 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9916 constraint (!inst
.operands
[1].preind
9917 || inst
.operands
[1].shifted
9918 || inst
.operands
[1].writeback
,
9919 _("Thumb does not support this addressing mode"));
9920 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9922 constraint (inst
.instruction
& 0x0600,
9923 _("byte or halfword not valid for base register"));
9924 constraint (inst
.operands
[1].reg
== REG_PC
9925 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9926 _("r15 based store not allowed"));
9927 constraint (inst
.operands
[1].immisreg
,
9928 _("invalid base register for register offset"));
9930 if (inst
.operands
[1].reg
== REG_PC
)
9931 inst
.instruction
= T_OPCODE_LDR_PC
;
9932 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9933 inst
.instruction
= T_OPCODE_LDR_SP
;
9935 inst
.instruction
= T_OPCODE_STR_SP
;
9937 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9938 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9942 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9943 if (!inst
.operands
[1].immisreg
)
9945 /* Immediate offset. */
9946 inst
.instruction
|= inst
.operands
[0].reg
;
9947 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9948 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9952 /* Register offset. */
9953 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9954 constraint (inst
.operands
[1].negative
,
9955 _("Thumb does not support this addressing mode"));
9958 switch (inst
.instruction
)
9960 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9961 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9962 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9963 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9964 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9965 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9966 case 0x5600 /* ldrsb */:
9967 case 0x5e00 /* ldrsh */: break;
9971 inst
.instruction
|= inst
.operands
[0].reg
;
9972 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9973 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9979 if (!inst
.operands
[1].present
)
9981 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9982 constraint (inst
.operands
[0].reg
== REG_LR
,
9983 _("r14 not allowed here"));
9985 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9986 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9987 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9993 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9994 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
10000 unsigned Rd
, Rn
, Rm
, Ra
;
10002 Rd
= inst
.operands
[0].reg
;
10003 Rn
= inst
.operands
[1].reg
;
10004 Rm
= inst
.operands
[2].reg
;
10005 Ra
= inst
.operands
[3].reg
;
10007 reject_bad_reg (Rd
);
10008 reject_bad_reg (Rn
);
10009 reject_bad_reg (Rm
);
10010 reject_bad_reg (Ra
);
10012 inst
.instruction
|= Rd
<< 8;
10013 inst
.instruction
|= Rn
<< 16;
10014 inst
.instruction
|= Rm
;
10015 inst
.instruction
|= Ra
<< 12;
10021 unsigned RdLo
, RdHi
, Rn
, Rm
;
10023 RdLo
= inst
.operands
[0].reg
;
10024 RdHi
= inst
.operands
[1].reg
;
10025 Rn
= inst
.operands
[2].reg
;
10026 Rm
= inst
.operands
[3].reg
;
10028 reject_bad_reg (RdLo
);
10029 reject_bad_reg (RdHi
);
10030 reject_bad_reg (Rn
);
10031 reject_bad_reg (Rm
);
10033 inst
.instruction
|= RdLo
<< 12;
10034 inst
.instruction
|= RdHi
<< 8;
10035 inst
.instruction
|= Rn
<< 16;
10036 inst
.instruction
|= Rm
;
10040 do_t_mov_cmp (void)
10044 Rn
= inst
.operands
[0].reg
;
10045 Rm
= inst
.operands
[1].reg
;
10048 set_it_insn_type_last ();
10050 if (unified_syntax
)
10052 int r0off
= (inst
.instruction
== T_MNEM_mov
10053 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
10054 unsigned long opcode
;
10055 bfd_boolean narrow
;
10056 bfd_boolean low_regs
;
10058 low_regs
= (Rn
<= 7 && Rm
<= 7);
10059 opcode
= inst
.instruction
;
10060 if (in_it_block ())
10061 narrow
= opcode
!= T_MNEM_movs
;
10063 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
10064 if (inst
.size_req
== 4
10065 || inst
.operands
[1].shifted
)
10068 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10069 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
10070 && !inst
.operands
[1].shifted
10074 inst
.instruction
= T2_SUBS_PC_LR
;
10078 if (opcode
== T_MNEM_cmp
)
10080 constraint (Rn
== REG_PC
, BAD_PC
);
10083 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10085 warn_deprecated_sp (Rm
);
10086 /* R15 was documented as a valid choice for Rm in ARMv6,
10087 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10088 tools reject R15, so we do too. */
10089 constraint (Rm
== REG_PC
, BAD_PC
);
10092 reject_bad_reg (Rm
);
10094 else if (opcode
== T_MNEM_mov
10095 || opcode
== T_MNEM_movs
)
10097 if (inst
.operands
[1].isreg
)
10099 if (opcode
== T_MNEM_movs
)
10101 reject_bad_reg (Rn
);
10102 reject_bad_reg (Rm
);
10104 else if ((Rn
== REG_SP
|| Rn
== REG_PC
)
10105 && (Rm
== REG_SP
|| Rm
== REG_PC
))
10106 reject_bad_reg (Rm
);
10109 reject_bad_reg (Rn
);
10112 if (!inst
.operands
[1].isreg
)
10114 /* Immediate operand. */
10115 if (!in_it_block () && opcode
== T_MNEM_mov
)
10117 if (low_regs
&& narrow
)
10119 inst
.instruction
= THUMB_OP16 (opcode
);
10120 inst
.instruction
|= Rn
<< 8;
10121 if (inst
.size_req
== 2)
10122 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10124 inst
.relax
= opcode
;
10128 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10129 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10130 inst
.instruction
|= Rn
<< r0off
;
10131 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10134 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
10135 && (inst
.instruction
== T_MNEM_mov
10136 || inst
.instruction
== T_MNEM_movs
))
10138 /* Register shifts are encoded as separate shift instructions. */
10139 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
10141 if (in_it_block ())
10146 if (inst
.size_req
== 4)
10149 if (!low_regs
|| inst
.operands
[1].imm
> 7)
10155 switch (inst
.operands
[1].shift_kind
)
10158 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
10161 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
10164 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
10167 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
10173 inst
.instruction
= opcode
;
10176 inst
.instruction
|= Rn
;
10177 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
10182 inst
.instruction
|= CONDS_BIT
;
10184 inst
.instruction
|= Rn
<< 8;
10185 inst
.instruction
|= Rm
<< 16;
10186 inst
.instruction
|= inst
.operands
[1].imm
;
10191 /* Some mov with immediate shift have narrow variants.
10192 Register shifts are handled above. */
10193 if (low_regs
&& inst
.operands
[1].shifted
10194 && (inst
.instruction
== T_MNEM_mov
10195 || inst
.instruction
== T_MNEM_movs
))
10197 if (in_it_block ())
10198 narrow
= (inst
.instruction
== T_MNEM_mov
);
10200 narrow
= (inst
.instruction
== T_MNEM_movs
);
10205 switch (inst
.operands
[1].shift_kind
)
10207 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10208 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10209 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10210 default: narrow
= FALSE
; break;
10216 inst
.instruction
|= Rn
;
10217 inst
.instruction
|= Rm
<< 3;
10218 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10222 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10223 inst
.instruction
|= Rn
<< r0off
;
10224 encode_thumb32_shifted_operand (1);
10228 switch (inst
.instruction
)
10231 inst
.instruction
= T_OPCODE_MOV_HR
;
10232 inst
.instruction
|= (Rn
& 0x8) << 4;
10233 inst
.instruction
|= (Rn
& 0x7);
10234 inst
.instruction
|= Rm
<< 3;
10238 /* We know we have low registers at this point.
10239 Generate ADD Rd, Rs, #0. */
10240 inst
.instruction
= T_OPCODE_ADD_I3
;
10241 inst
.instruction
|= Rn
;
10242 inst
.instruction
|= Rm
<< 3;
10248 inst
.instruction
= T_OPCODE_CMP_LR
;
10249 inst
.instruction
|= Rn
;
10250 inst
.instruction
|= Rm
<< 3;
10254 inst
.instruction
= T_OPCODE_CMP_HR
;
10255 inst
.instruction
|= (Rn
& 0x8) << 4;
10256 inst
.instruction
|= (Rn
& 0x7);
10257 inst
.instruction
|= Rm
<< 3;
10264 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10265 if (inst
.operands
[1].isreg
)
10267 if (Rn
< 8 && Rm
< 8)
10269 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10270 since a MOV instruction produces unpredictable results. */
10271 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10272 inst
.instruction
= T_OPCODE_ADD_I3
;
10274 inst
.instruction
= T_OPCODE_CMP_LR
;
10276 inst
.instruction
|= Rn
;
10277 inst
.instruction
|= Rm
<< 3;
10281 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10282 inst
.instruction
= T_OPCODE_MOV_HR
;
10284 inst
.instruction
= T_OPCODE_CMP_HR
;
10290 constraint (Rn
> 7,
10291 _("only lo regs allowed with immediate"));
10292 inst
.instruction
|= Rn
<< 8;
10293 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10304 top
= (inst
.instruction
& 0x00800000) != 0;
10305 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
10307 constraint (top
, _(":lower16: not allowed this instruction"));
10308 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
10310 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
10312 constraint (!top
, _(":upper16: not allowed this instruction"));
10313 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
10316 Rd
= inst
.operands
[0].reg
;
10317 reject_bad_reg (Rd
);
10319 inst
.instruction
|= Rd
<< 8;
10320 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
10322 imm
= inst
.reloc
.exp
.X_add_number
;
10323 inst
.instruction
|= (imm
& 0xf000) << 4;
10324 inst
.instruction
|= (imm
& 0x0800) << 15;
10325 inst
.instruction
|= (imm
& 0x0700) << 4;
10326 inst
.instruction
|= (imm
& 0x00ff);
10331 do_t_mvn_tst (void)
10335 Rn
= inst
.operands
[0].reg
;
10336 Rm
= inst
.operands
[1].reg
;
10338 if (inst
.instruction
== T_MNEM_cmp
10339 || inst
.instruction
== T_MNEM_cmn
)
10340 constraint (Rn
== REG_PC
, BAD_PC
);
10342 reject_bad_reg (Rn
);
10343 reject_bad_reg (Rm
);
10345 if (unified_syntax
)
10347 int r0off
= (inst
.instruction
== T_MNEM_mvn
10348 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
10349 bfd_boolean narrow
;
10351 if (inst
.size_req
== 4
10352 || inst
.instruction
> 0xffff
10353 || inst
.operands
[1].shifted
10354 || Rn
> 7 || Rm
> 7)
10356 else if (inst
.instruction
== T_MNEM_cmn
)
10358 else if (THUMB_SETS_FLAGS (inst
.instruction
))
10359 narrow
= !in_it_block ();
10361 narrow
= in_it_block ();
10363 if (!inst
.operands
[1].isreg
)
10365 /* For an immediate, we always generate a 32-bit opcode;
10366 section relaxation will shrink it later if possible. */
10367 if (inst
.instruction
< 0xffff)
10368 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10369 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10370 inst
.instruction
|= Rn
<< r0off
;
10371 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10375 /* See if we can do this with a 16-bit instruction. */
10378 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10379 inst
.instruction
|= Rn
;
10380 inst
.instruction
|= Rm
<< 3;
10384 constraint (inst
.operands
[1].shifted
10385 && inst
.operands
[1].immisreg
,
10386 _("shift must be constant"));
10387 if (inst
.instruction
< 0xffff)
10388 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10389 inst
.instruction
|= Rn
<< r0off
;
10390 encode_thumb32_shifted_operand (1);
10396 constraint (inst
.instruction
> 0xffff
10397 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10398 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10399 _("unshifted register required"));
10400 constraint (Rn
> 7 || Rm
> 7,
10403 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10404 inst
.instruction
|= Rn
;
10405 inst
.instruction
|= Rm
<< 3;
10415 if (do_vfp_nsyn_mrs () == SUCCESS
)
10418 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10421 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10422 _("selected processor does not support "
10423 "requested special purpose register"));
10427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10428 _("selected processor does not support "
10429 "requested special purpose register"));
10430 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10431 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10432 _("'CPSR' or 'SPSR' expected"));
10435 Rd
= inst
.operands
[0].reg
;
10436 reject_bad_reg (Rd
);
10438 inst
.instruction
|= Rd
<< 8;
10439 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10440 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10449 if (do_vfp_nsyn_msr () == SUCCESS
)
10452 constraint (!inst
.operands
[1].isreg
,
10453 _("Thumb encoding does not support an immediate here"));
10454 flags
= inst
.operands
[0].imm
;
10457 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10458 _("selected processor does not support "
10459 "requested special purpose register"));
10463 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10464 _("selected processor does not support "
10465 "requested special purpose register"));
10469 Rn
= inst
.operands
[1].reg
;
10470 reject_bad_reg (Rn
);
10472 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10473 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10474 inst
.instruction
|= (flags
& 0xff);
10475 inst
.instruction
|= Rn
<< 16;
10481 bfd_boolean narrow
;
10482 unsigned Rd
, Rn
, Rm
;
10484 if (!inst
.operands
[2].present
)
10485 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10487 Rd
= inst
.operands
[0].reg
;
10488 Rn
= inst
.operands
[1].reg
;
10489 Rm
= inst
.operands
[2].reg
;
10491 if (unified_syntax
)
10493 if (inst
.size_req
== 4
10499 else if (inst
.instruction
== T_MNEM_muls
)
10500 narrow
= !in_it_block ();
10502 narrow
= in_it_block ();
10506 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10507 constraint (Rn
> 7 || Rm
> 7,
10514 /* 16-bit MULS/Conditional MUL. */
10515 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10516 inst
.instruction
|= Rd
;
10519 inst
.instruction
|= Rm
<< 3;
10521 inst
.instruction
|= Rn
<< 3;
10523 constraint (1, _("dest must overlap one source register"));
10527 constraint (inst
.instruction
!= T_MNEM_mul
,
10528 _("Thumb-2 MUL must not set flags"));
10530 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10531 inst
.instruction
|= Rd
<< 8;
10532 inst
.instruction
|= Rn
<< 16;
10533 inst
.instruction
|= Rm
<< 0;
10535 reject_bad_reg (Rd
);
10536 reject_bad_reg (Rn
);
10537 reject_bad_reg (Rm
);
10544 unsigned RdLo
, RdHi
, Rn
, Rm
;
10546 RdLo
= inst
.operands
[0].reg
;
10547 RdHi
= inst
.operands
[1].reg
;
10548 Rn
= inst
.operands
[2].reg
;
10549 Rm
= inst
.operands
[3].reg
;
10551 reject_bad_reg (RdLo
);
10552 reject_bad_reg (RdHi
);
10553 reject_bad_reg (Rn
);
10554 reject_bad_reg (Rm
);
10556 inst
.instruction
|= RdLo
<< 12;
10557 inst
.instruction
|= RdHi
<< 8;
10558 inst
.instruction
|= Rn
<< 16;
10559 inst
.instruction
|= Rm
;
10562 as_tsktsk (_("rdhi and rdlo must be different"));
10568 set_it_insn_type (NEUTRAL_IT_INSN
);
10570 if (unified_syntax
)
10572 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10574 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10575 inst
.instruction
|= inst
.operands
[0].imm
;
10579 /* PR9722: Check for Thumb2 availability before
10580 generating a thumb2 nop instruction. */
10581 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
10583 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10584 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10587 inst
.instruction
= 0x46c0;
10592 constraint (inst
.operands
[0].present
,
10593 _("Thumb does not support NOP with hints"));
10594 inst
.instruction
= 0x46c0;
10601 if (unified_syntax
)
10603 bfd_boolean narrow
;
10605 if (THUMB_SETS_FLAGS (inst
.instruction
))
10606 narrow
= !in_it_block ();
10608 narrow
= in_it_block ();
10609 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10611 if (inst
.size_req
== 4)
10616 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10617 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10618 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10622 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10623 inst
.instruction
|= inst
.operands
[0].reg
;
10624 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10629 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10631 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10633 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10634 inst
.instruction
|= inst
.operands
[0].reg
;
10635 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10644 Rd
= inst
.operands
[0].reg
;
10645 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10647 reject_bad_reg (Rd
);
10648 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10649 reject_bad_reg (Rn
);
10651 inst
.instruction
|= Rd
<< 8;
10652 inst
.instruction
|= Rn
<< 16;
10654 if (!inst
.operands
[2].isreg
)
10656 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10657 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10663 Rm
= inst
.operands
[2].reg
;
10664 reject_bad_reg (Rm
);
10666 constraint (inst
.operands
[2].shifted
10667 && inst
.operands
[2].immisreg
,
10668 _("shift must be constant"));
10669 encode_thumb32_shifted_operand (2);
10676 unsigned Rd
, Rn
, Rm
;
10678 Rd
= inst
.operands
[0].reg
;
10679 Rn
= inst
.operands
[1].reg
;
10680 Rm
= inst
.operands
[2].reg
;
10682 reject_bad_reg (Rd
);
10683 reject_bad_reg (Rn
);
10684 reject_bad_reg (Rm
);
10686 inst
.instruction
|= Rd
<< 8;
10687 inst
.instruction
|= Rn
<< 16;
10688 inst
.instruction
|= Rm
;
10689 if (inst
.operands
[3].present
)
10691 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
10692 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10693 _("expression too complex"));
10694 inst
.instruction
|= (val
& 0x1c) << 10;
10695 inst
.instruction
|= (val
& 0x03) << 6;
10702 if (!inst
.operands
[3].present
)
10706 inst
.instruction
&= ~0x00000020;
10708 /* PR 10168. Swap the Rm and Rn registers. */
10709 Rtmp
= inst
.operands
[1].reg
;
10710 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
10711 inst
.operands
[2].reg
= Rtmp
;
10719 if (inst
.operands
[0].immisreg
)
10720 reject_bad_reg (inst
.operands
[0].imm
);
10722 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10726 do_t_push_pop (void)
10730 constraint (inst
.operands
[0].writeback
,
10731 _("push/pop do not support {reglist}^"));
10732 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10733 _("expression too complex"));
10735 mask
= inst
.operands
[0].imm
;
10736 if ((mask
& ~0xff) == 0)
10737 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10738 else if ((inst
.instruction
== T_MNEM_push
10739 && (mask
& ~0xff) == 1 << REG_LR
)
10740 || (inst
.instruction
== T_MNEM_pop
10741 && (mask
& ~0xff) == 1 << REG_PC
))
10743 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10744 inst
.instruction
|= THUMB_PP_PC_LR
;
10745 inst
.instruction
|= mask
& 0xff;
10747 else if (unified_syntax
)
10749 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10750 encode_thumb2_ldmstm (13, mask
, TRUE
);
10754 inst
.error
= _("invalid register list to push/pop instruction");
10764 Rd
= inst
.operands
[0].reg
;
10765 Rm
= inst
.operands
[1].reg
;
10767 reject_bad_reg (Rd
);
10768 reject_bad_reg (Rm
);
10770 inst
.instruction
|= Rd
<< 8;
10771 inst
.instruction
|= Rm
<< 16;
10772 inst
.instruction
|= Rm
;
10780 Rd
= inst
.operands
[0].reg
;
10781 Rm
= inst
.operands
[1].reg
;
10783 reject_bad_reg (Rd
);
10784 reject_bad_reg (Rm
);
10786 if (Rd
<= 7 && Rm
<= 7
10787 && inst
.size_req
!= 4)
10789 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10790 inst
.instruction
|= Rd
;
10791 inst
.instruction
|= Rm
<< 3;
10793 else if (unified_syntax
)
10795 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10796 inst
.instruction
|= Rd
<< 8;
10797 inst
.instruction
|= Rm
<< 16;
10798 inst
.instruction
|= Rm
;
10801 inst
.error
= BAD_HIREG
;
10809 Rd
= inst
.operands
[0].reg
;
10810 Rm
= inst
.operands
[1].reg
;
10812 reject_bad_reg (Rd
);
10813 reject_bad_reg (Rm
);
10815 inst
.instruction
|= Rd
<< 8;
10816 inst
.instruction
|= Rm
;
10824 Rd
= inst
.operands
[0].reg
;
10825 Rs
= (inst
.operands
[1].present
10826 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10827 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10829 reject_bad_reg (Rd
);
10830 reject_bad_reg (Rs
);
10831 if (inst
.operands
[2].isreg
)
10832 reject_bad_reg (inst
.operands
[2].reg
);
10834 inst
.instruction
|= Rd
<< 8;
10835 inst
.instruction
|= Rs
<< 16;
10836 if (!inst
.operands
[2].isreg
)
10838 bfd_boolean narrow
;
10840 if ((inst
.instruction
& 0x00100000) != 0)
10841 narrow
= !in_it_block ();
10843 narrow
= in_it_block ();
10845 if (Rd
> 7 || Rs
> 7)
10848 if (inst
.size_req
== 4 || !unified_syntax
)
10851 if (inst
.reloc
.exp
.X_op
!= O_constant
10852 || inst
.reloc
.exp
.X_add_number
!= 0)
10855 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10856 relaxation, but it doesn't seem worth the hassle. */
10859 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10860 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10861 inst
.instruction
|= Rs
<< 3;
10862 inst
.instruction
|= Rd
;
10866 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10867 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10871 encode_thumb32_shifted_operand (2);
10877 set_it_insn_type (OUTSIDE_IT_INSN
);
10878 if (inst
.operands
[0].imm
)
10879 inst
.instruction
|= 0x8;
10885 if (!inst
.operands
[1].present
)
10886 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
10888 if (unified_syntax
)
10890 bfd_boolean narrow
;
10893 switch (inst
.instruction
)
10896 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
10898 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
10900 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
10902 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
10906 if (THUMB_SETS_FLAGS (inst
.instruction
))
10907 narrow
= !in_it_block ();
10909 narrow
= in_it_block ();
10910 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10912 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
10914 if (inst
.operands
[2].isreg
10915 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
10916 || inst
.operands
[2].reg
> 7))
10918 if (inst
.size_req
== 4)
10921 reject_bad_reg (inst
.operands
[0].reg
);
10922 reject_bad_reg (inst
.operands
[1].reg
);
10926 if (inst
.operands
[2].isreg
)
10928 reject_bad_reg (inst
.operands
[2].reg
);
10929 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10930 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10931 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10932 inst
.instruction
|= inst
.operands
[2].reg
;
10936 inst
.operands
[1].shifted
= 1;
10937 inst
.operands
[1].shift_kind
= shift_kind
;
10938 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
10939 ? T_MNEM_movs
: T_MNEM_mov
);
10940 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10941 encode_thumb32_shifted_operand (1);
10942 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10943 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10948 if (inst
.operands
[2].isreg
)
10950 switch (shift_kind
)
10952 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10953 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10954 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10955 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10959 inst
.instruction
|= inst
.operands
[0].reg
;
10960 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10964 switch (shift_kind
)
10966 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10967 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10968 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10971 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10972 inst
.instruction
|= inst
.operands
[0].reg
;
10973 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10979 constraint (inst
.operands
[0].reg
> 7
10980 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
10981 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10983 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
10985 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
10986 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
10987 _("source1 and dest must be same register"));
10989 switch (inst
.instruction
)
10991 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10992 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10993 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10994 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10998 inst
.instruction
|= inst
.operands
[0].reg
;
10999 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11003 switch (inst
.instruction
)
11005 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11006 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11007 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11008 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
11011 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11012 inst
.instruction
|= inst
.operands
[0].reg
;
11013 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11021 unsigned Rd
, Rn
, Rm
;
11023 Rd
= inst
.operands
[0].reg
;
11024 Rn
= inst
.operands
[1].reg
;
11025 Rm
= inst
.operands
[2].reg
;
11027 reject_bad_reg (Rd
);
11028 reject_bad_reg (Rn
);
11029 reject_bad_reg (Rm
);
11031 inst
.instruction
|= Rd
<< 8;
11032 inst
.instruction
|= Rn
<< 16;
11033 inst
.instruction
|= Rm
;
11039 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
11040 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11041 _("expression too complex"));
11042 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11043 inst
.instruction
|= (value
& 0xf000) >> 12;
11044 inst
.instruction
|= (value
& 0x0ff0);
11045 inst
.instruction
|= (value
& 0x000f) << 16;
11049 do_t_ssat_usat (int bias
)
11053 Rd
= inst
.operands
[0].reg
;
11054 Rn
= inst
.operands
[2].reg
;
11056 reject_bad_reg (Rd
);
11057 reject_bad_reg (Rn
);
11059 inst
.instruction
|= Rd
<< 8;
11060 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
11061 inst
.instruction
|= Rn
<< 16;
11063 if (inst
.operands
[3].present
)
11065 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
11067 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11069 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11070 _("expression too complex"));
11072 if (shift_amount
!= 0)
11074 constraint (shift_amount
> 31,
11075 _("shift expression is too large"));
11077 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
11078 inst
.instruction
|= 0x00200000; /* sh bit. */
11080 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
11081 inst
.instruction
|= (shift_amount
& 0x03) << 6;
11089 do_t_ssat_usat (1);
11097 Rd
= inst
.operands
[0].reg
;
11098 Rn
= inst
.operands
[2].reg
;
11100 reject_bad_reg (Rd
);
11101 reject_bad_reg (Rn
);
11103 inst
.instruction
|= Rd
<< 8;
11104 inst
.instruction
|= inst
.operands
[1].imm
- 1;
11105 inst
.instruction
|= Rn
<< 16;
11111 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
11112 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
11113 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
11114 || inst
.operands
[2].negative
,
11117 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11118 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11119 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11120 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11126 if (!inst
.operands
[2].present
)
11127 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
11129 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
11130 || inst
.operands
[0].reg
== inst
.operands
[2].reg
11131 || inst
.operands
[0].reg
== inst
.operands
[3].reg
11132 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
11135 inst
.instruction
|= inst
.operands
[0].reg
;
11136 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11137 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
11138 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
11144 unsigned Rd
, Rn
, Rm
;
11146 Rd
= inst
.operands
[0].reg
;
11147 Rn
= inst
.operands
[1].reg
;
11148 Rm
= inst
.operands
[2].reg
;
11150 reject_bad_reg (Rd
);
11151 reject_bad_reg (Rn
);
11152 reject_bad_reg (Rm
);
11154 inst
.instruction
|= Rd
<< 8;
11155 inst
.instruction
|= Rn
<< 16;
11156 inst
.instruction
|= Rm
;
11157 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
11165 Rd
= inst
.operands
[0].reg
;
11166 Rm
= inst
.operands
[1].reg
;
11168 reject_bad_reg (Rd
);
11169 reject_bad_reg (Rm
);
11171 if (inst
.instruction
<= 0xffff
11172 && inst
.size_req
!= 4
11173 && Rd
<= 7 && Rm
<= 7
11174 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
11176 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11177 inst
.instruction
|= Rd
;
11178 inst
.instruction
|= Rm
<< 3;
11180 else if (unified_syntax
)
11182 if (inst
.instruction
<= 0xffff)
11183 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11184 inst
.instruction
|= Rd
<< 8;
11185 inst
.instruction
|= Rm
;
11186 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
11190 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
11191 _("Thumb encoding does not support rotation"));
11192 constraint (1, BAD_HIREG
);
11199 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
11208 half
= (inst
.instruction
& 0x10) != 0;
11209 set_it_insn_type_last ();
11210 constraint (inst
.operands
[0].immisreg
,
11211 _("instruction requires register index"));
11213 Rn
= inst
.operands
[0].reg
;
11214 Rm
= inst
.operands
[0].imm
;
11216 constraint (Rn
== REG_SP
, BAD_SP
);
11217 reject_bad_reg (Rm
);
11219 constraint (!half
&& inst
.operands
[0].shifted
,
11220 _("instruction does not allow shifted index"));
11221 inst
.instruction
|= (Rn
<< 16) | Rm
;
11227 do_t_ssat_usat (0);
11235 Rd
= inst
.operands
[0].reg
;
11236 Rn
= inst
.operands
[2].reg
;
11238 reject_bad_reg (Rd
);
11239 reject_bad_reg (Rn
);
11241 inst
.instruction
|= Rd
<< 8;
11242 inst
.instruction
|= inst
.operands
[1].imm
;
11243 inst
.instruction
|= Rn
<< 16;
11246 /* Neon instruction encoder helpers. */
11248 /* Encodings for the different types for various Neon opcodes. */
11250 /* An "invalid" code for the following tables. */
11253 struct neon_tab_entry
11256 unsigned float_or_poly
;
11257 unsigned scalar_or_imm
;
11260 /* Map overloaded Neon opcodes to their respective encodings. */
11261 #define NEON_ENC_TAB \
11262 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11263 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11264 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11265 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11266 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11267 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11268 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11269 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11270 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11271 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11272 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11273 /* Register variants of the following two instructions are encoded as
11274 vcge / vcgt with the operands reversed. */ \
11275 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11276 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11277 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11278 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11279 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11280 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11281 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11282 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11283 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11284 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11285 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11286 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11287 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11288 X(vshl, 0x0000400, N_INV, 0x0800510), \
11289 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11290 X(vand, 0x0000110, N_INV, 0x0800030), \
11291 X(vbic, 0x0100110, N_INV, 0x0800030), \
11292 X(veor, 0x1000110, N_INV, N_INV), \
11293 X(vorn, 0x0300110, N_INV, 0x0800010), \
11294 X(vorr, 0x0200110, N_INV, 0x0800010), \
11295 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11296 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11297 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11298 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11299 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11300 X(vst1, 0x0000000, 0x0800000, N_INV), \
11301 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11302 X(vst2, 0x0000100, 0x0800100, N_INV), \
11303 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11304 X(vst3, 0x0000200, 0x0800200, N_INV), \
11305 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11306 X(vst4, 0x0000300, 0x0800300, N_INV), \
11307 X(vmovn, 0x1b20200, N_INV, N_INV), \
11308 X(vtrn, 0x1b20080, N_INV, N_INV), \
11309 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11310 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11311 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11312 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
11313 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
11314 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11315 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11316 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11317 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11321 #define X(OPC,I,F,S) N_MNEM_##OPC
11326 static const struct neon_tab_entry neon_enc_tab
[] =
11328 #define X(OPC,I,F,S) { (I), (F), (S) }
11333 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11334 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11335 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11336 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11337 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11338 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11339 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11340 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11341 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11342 #define NEON_ENC_SINGLE(X) \
11343 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11344 #define NEON_ENC_DOUBLE(X) \
11345 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11347 /* Define shapes for instruction operands. The following mnemonic characters
11348 are used in this table:
11350 F - VFP S<n> register
11351 D - Neon D<n> register
11352 Q - Neon Q<n> register
11356 L - D<n> register list
11358 This table is used to generate various data:
11359 - enumerations of the form NS_DDR to be used as arguments to
11361 - a table classifying shapes into single, double, quad, mixed.
11362 - a table used to drive neon_select_shape. */
11364 #define NEON_SHAPE_DEF \
11365 X(3, (D, D, D), DOUBLE), \
11366 X(3, (Q, Q, Q), QUAD), \
11367 X(3, (D, D, I), DOUBLE), \
11368 X(3, (Q, Q, I), QUAD), \
11369 X(3, (D, D, S), DOUBLE), \
11370 X(3, (Q, Q, S), QUAD), \
11371 X(2, (D, D), DOUBLE), \
11372 X(2, (Q, Q), QUAD), \
11373 X(2, (D, S), DOUBLE), \
11374 X(2, (Q, S), QUAD), \
11375 X(2, (D, R), DOUBLE), \
11376 X(2, (Q, R), QUAD), \
11377 X(2, (D, I), DOUBLE), \
11378 X(2, (Q, I), QUAD), \
11379 X(3, (D, L, D), DOUBLE), \
11380 X(2, (D, Q), MIXED), \
11381 X(2, (Q, D), MIXED), \
11382 X(3, (D, Q, I), MIXED), \
11383 X(3, (Q, D, I), MIXED), \
11384 X(3, (Q, D, D), MIXED), \
11385 X(3, (D, Q, Q), MIXED), \
11386 X(3, (Q, Q, D), MIXED), \
11387 X(3, (Q, D, S), MIXED), \
11388 X(3, (D, Q, S), MIXED), \
11389 X(4, (D, D, D, I), DOUBLE), \
11390 X(4, (Q, Q, Q, I), QUAD), \
11391 X(2, (F, F), SINGLE), \
11392 X(3, (F, F, F), SINGLE), \
11393 X(2, (F, I), SINGLE), \
11394 X(2, (F, D), MIXED), \
11395 X(2, (D, F), MIXED), \
11396 X(3, (F, F, I), MIXED), \
11397 X(4, (R, R, F, F), SINGLE), \
11398 X(4, (F, F, R, R), SINGLE), \
11399 X(3, (D, R, R), DOUBLE), \
11400 X(3, (R, R, D), DOUBLE), \
11401 X(2, (S, R), SINGLE), \
11402 X(2, (R, S), SINGLE), \
11403 X(2, (F, R), SINGLE), \
11404 X(2, (R, F), SINGLE)
11406 #define S2(A,B) NS_##A##B
11407 #define S3(A,B,C) NS_##A##B##C
11408 #define S4(A,B,C,D) NS_##A##B##C##D
11410 #define X(N, L, C) S##N L
11423 enum neon_shape_class
11431 #define X(N, L, C) SC_##C
11433 static enum neon_shape_class neon_shape_class
[] =
11451 /* Register widths of above. */
11452 static unsigned neon_shape_el_size
[] =
11463 struct neon_shape_info
11466 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11469 #define S2(A,B) { SE_##A, SE_##B }
11470 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11471 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11473 #define X(N, L, C) { N, S##N L }
11475 static struct neon_shape_info neon_shape_tab
[] =
11485 /* Bit masks used in type checking given instructions.
11486 'N_EQK' means the type must be the same as (or based on in some way) the key
11487 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11488 set, various other bits can be set as well in order to modify the meaning of
11489 the type constraint. */
11491 enum neon_type_mask
11514 N_KEY
= 0x1000000, /* Key element (main type specifier). */
11515 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
11516 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11517 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
11518 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
11519 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11520 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11521 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11522 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
11523 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11525 N_MAX_NONSPECIAL
= N_F64
11528 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11530 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11531 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11532 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11533 #define N_SUF_32 (N_SU_32 | N_F32)
11534 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11535 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11537 /* Pass this as the first type argument to neon_check_type to ignore types
11539 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11541 /* Select a "shape" for the current instruction (describing register types or
11542 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11543 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11544 function of operand parsing, so this function doesn't need to be called.
11545 Shapes should be listed in order of decreasing length. */
11547 static enum neon_shape
11548 neon_select_shape (enum neon_shape shape
, ...)
11551 enum neon_shape first_shape
= shape
;
11553 /* Fix missing optional operands. FIXME: we don't know at this point how
11554 many arguments we should have, so this makes the assumption that we have
11555 > 1. This is true of all current Neon opcodes, I think, but may not be
11556 true in the future. */
11557 if (!inst
.operands
[1].present
)
11558 inst
.operands
[1] = inst
.operands
[0];
11560 va_start (ap
, shape
);
11562 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
11567 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11569 if (!inst
.operands
[j
].present
)
11575 switch (neon_shape_tab
[shape
].el
[j
])
11578 if (!(inst
.operands
[j
].isreg
11579 && inst
.operands
[j
].isvec
11580 && inst
.operands
[j
].issingle
11581 && !inst
.operands
[j
].isquad
))
11586 if (!(inst
.operands
[j
].isreg
11587 && inst
.operands
[j
].isvec
11588 && !inst
.operands
[j
].isquad
11589 && !inst
.operands
[j
].issingle
))
11594 if (!(inst
.operands
[j
].isreg
11595 && !inst
.operands
[j
].isvec
))
11600 if (!(inst
.operands
[j
].isreg
11601 && inst
.operands
[j
].isvec
11602 && inst
.operands
[j
].isquad
11603 && !inst
.operands
[j
].issingle
))
11608 if (!(!inst
.operands
[j
].isreg
11609 && !inst
.operands
[j
].isscalar
))
11614 if (!(!inst
.operands
[j
].isreg
11615 && inst
.operands
[j
].isscalar
))
11629 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
11630 first_error (_("invalid instruction shape"));
11635 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11636 means the Q bit should be set). */
11639 neon_quad (enum neon_shape shape
)
11641 return neon_shape_class
[shape
] == SC_QUAD
;
11645 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
11648 /* Allow modification to be made to types which are constrained to be
11649 based on the key element, based on bits set alongside N_EQK. */
11650 if ((typebits
& N_EQK
) != 0)
11652 if ((typebits
& N_HLF
) != 0)
11654 else if ((typebits
& N_DBL
) != 0)
11656 if ((typebits
& N_SGN
) != 0)
11657 *g_type
= NT_signed
;
11658 else if ((typebits
& N_UNS
) != 0)
11659 *g_type
= NT_unsigned
;
11660 else if ((typebits
& N_INT
) != 0)
11661 *g_type
= NT_integer
;
11662 else if ((typebits
& N_FLT
) != 0)
11663 *g_type
= NT_float
;
11664 else if ((typebits
& N_SIZ
) != 0)
11665 *g_type
= NT_untyped
;
11669 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11670 operand type, i.e. the single type specified in a Neon instruction when it
11671 is the only one given. */
11673 static struct neon_type_el
11674 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
11676 struct neon_type_el dest
= *key
;
11678 gas_assert ((thisarg
& N_EQK
) != 0);
11680 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
11685 /* Convert Neon type and size into compact bitmask representation. */
11687 static enum neon_type_mask
11688 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
11695 case 8: return N_8
;
11696 case 16: return N_16
;
11697 case 32: return N_32
;
11698 case 64: return N_64
;
11706 case 8: return N_I8
;
11707 case 16: return N_I16
;
11708 case 32: return N_I32
;
11709 case 64: return N_I64
;
11717 case 16: return N_F16
;
11718 case 32: return N_F32
;
11719 case 64: return N_F64
;
11727 case 8: return N_P8
;
11728 case 16: return N_P16
;
11736 case 8: return N_S8
;
11737 case 16: return N_S16
;
11738 case 32: return N_S32
;
11739 case 64: return N_S64
;
11747 case 8: return N_U8
;
11748 case 16: return N_U16
;
11749 case 32: return N_U32
;
11750 case 64: return N_U64
;
11761 /* Convert compact Neon bitmask type representation to a type and size. Only
11762 handles the case where a single bit is set in the mask. */
11765 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
11766 enum neon_type_mask mask
)
11768 if ((mask
& N_EQK
) != 0)
11771 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
11773 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
11775 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
11777 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
11782 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
11784 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
11785 *type
= NT_unsigned
;
11786 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
11787 *type
= NT_integer
;
11788 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
11789 *type
= NT_untyped
;
11790 else if ((mask
& (N_P8
| N_P16
)) != 0)
11792 else if ((mask
& (N_F32
| N_F64
)) != 0)
11800 /* Modify a bitmask of allowed types. This is only needed for type
11804 modify_types_allowed (unsigned allowed
, unsigned mods
)
11807 enum neon_el_type type
;
11813 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
11815 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
11817 neon_modify_type_size (mods
, &type
, &size
);
11818 destmask
|= type_chk_of_el_type (type
, size
);
11825 /* Check type and return type classification.
11826 The manual states (paraphrase): If one datatype is given, it indicates the
11828 - the second operand, if there is one
11829 - the operand, if there is no second operand
11830 - the result, if there are no operands.
11831 This isn't quite good enough though, so we use a concept of a "key" datatype
11832 which is set on a per-instruction basis, which is the one which matters when
11833 only one data type is written.
11834 Note: this function has side-effects (e.g. filling in missing operands). All
11835 Neon instructions should call it before performing bit encoding. */
11837 static struct neon_type_el
11838 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
11841 unsigned i
, pass
, key_el
= 0;
11842 unsigned types
[NEON_MAX_TYPE_ELS
];
11843 enum neon_el_type k_type
= NT_invtype
;
11844 unsigned k_size
= -1u;
11845 struct neon_type_el badtype
= {NT_invtype
, -1};
11846 unsigned key_allowed
= 0;
11848 /* Optional registers in Neon instructions are always (not) in operand 1.
11849 Fill in the missing operand here, if it was omitted. */
11850 if (els
> 1 && !inst
.operands
[1].present
)
11851 inst
.operands
[1] = inst
.operands
[0];
11853 /* Suck up all the varargs. */
11855 for (i
= 0; i
< els
; i
++)
11857 unsigned thisarg
= va_arg (ap
, unsigned);
11858 if (thisarg
== N_IGNORE_TYPE
)
11863 types
[i
] = thisarg
;
11864 if ((thisarg
& N_KEY
) != 0)
11869 if (inst
.vectype
.elems
> 0)
11870 for (i
= 0; i
< els
; i
++)
11871 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
11873 first_error (_("types specified in both the mnemonic and operands"));
11877 /* Duplicate inst.vectype elements here as necessary.
11878 FIXME: No idea if this is exactly the same as the ARM assembler,
11879 particularly when an insn takes one register and one non-register
11881 if (inst
.vectype
.elems
== 1 && els
> 1)
11884 inst
.vectype
.elems
= els
;
11885 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
11886 for (j
= 0; j
< els
; j
++)
11888 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11891 else if (inst
.vectype
.elems
== 0 && els
> 0)
11894 /* No types were given after the mnemonic, so look for types specified
11895 after each operand. We allow some flexibility here; as long as the
11896 "key" operand has a type, we can infer the others. */
11897 for (j
= 0; j
< els
; j
++)
11898 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
11899 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
11901 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
11903 for (j
= 0; j
< els
; j
++)
11904 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
11905 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11910 first_error (_("operand types can't be inferred"));
11914 else if (inst
.vectype
.elems
!= els
)
11916 first_error (_("type specifier has the wrong number of parts"));
11920 for (pass
= 0; pass
< 2; pass
++)
11922 for (i
= 0; i
< els
; i
++)
11924 unsigned thisarg
= types
[i
];
11925 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
11926 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
11927 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
11928 unsigned g_size
= inst
.vectype
.el
[i
].size
;
11930 /* Decay more-specific signed & unsigned types to sign-insensitive
11931 integer types if sign-specific variants are unavailable. */
11932 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
11933 && (types_allowed
& N_SU_ALL
) == 0)
11934 g_type
= NT_integer
;
11936 /* If only untyped args are allowed, decay any more specific types to
11937 them. Some instructions only care about signs for some element
11938 sizes, so handle that properly. */
11939 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
11940 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
11941 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
11942 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
11943 g_type
= NT_untyped
;
11947 if ((thisarg
& N_KEY
) != 0)
11951 key_allowed
= thisarg
& ~N_KEY
;
11956 if ((thisarg
& N_VFP
) != 0)
11958 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
11959 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
11961 /* In VFP mode, operands must match register widths. If we
11962 have a key operand, use its width, else use the width of
11963 the current operand. */
11969 if (regwidth
!= match
)
11971 first_error (_("operand size must match register width"));
11976 if ((thisarg
& N_EQK
) == 0)
11978 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
11980 if ((given_type
& types_allowed
) == 0)
11982 first_error (_("bad type in Neon instruction"));
11988 enum neon_el_type mod_k_type
= k_type
;
11989 unsigned mod_k_size
= k_size
;
11990 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
11991 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
11993 first_error (_("inconsistent types in Neon instruction"));
12001 return inst
.vectype
.el
[key_el
];
12004 /* Neon-style VFP instruction forwarding. */
12006 /* Thumb VFP instructions have 0xE in the condition field. */
12009 do_vfp_cond_or_thumb (void)
12012 inst
.instruction
|= 0xe0000000;
12014 inst
.instruction
|= inst
.cond
<< 28;
12017 /* Look up and encode a simple mnemonic, for use as a helper function for the
12018 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12019 etc. It is assumed that operand parsing has already been done, and that the
12020 operands are in the form expected by the given opcode (this isn't necessarily
12021 the same as the form in which they were parsed, hence some massaging must
12022 take place before this function is called).
12023 Checks current arch version against that in the looked-up opcode. */
12026 do_vfp_nsyn_opcode (const char *opname
)
12028 const struct asm_opcode
*opcode
;
12030 opcode
= hash_find (arm_ops_hsh
, opname
);
12035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
12036 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
12041 inst
.instruction
= opcode
->tvalue
;
12042 opcode
->tencode ();
12046 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
12047 opcode
->aencode ();
12052 do_vfp_nsyn_add_sub (enum neon_shape rs
)
12054 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
12059 do_vfp_nsyn_opcode ("fadds");
12061 do_vfp_nsyn_opcode ("fsubs");
12066 do_vfp_nsyn_opcode ("faddd");
12068 do_vfp_nsyn_opcode ("fsubd");
12072 /* Check operand types to see if this is a VFP instruction, and if so call
12076 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
12078 enum neon_shape rs
;
12079 struct neon_type_el et
;
12084 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12085 et
= neon_check_type (2, rs
,
12086 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12090 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12091 et
= neon_check_type (3, rs
,
12092 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12099 if (et
.type
!= NT_invtype
)
12111 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
12113 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
12118 do_vfp_nsyn_opcode ("fmacs");
12120 do_vfp_nsyn_opcode ("fmscs");
12125 do_vfp_nsyn_opcode ("fmacd");
12127 do_vfp_nsyn_opcode ("fmscd");
12132 do_vfp_nsyn_mul (enum neon_shape rs
)
12135 do_vfp_nsyn_opcode ("fmuls");
12137 do_vfp_nsyn_opcode ("fmuld");
12141 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
12143 int is_neg
= (inst
.instruction
& 0x80) != 0;
12144 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
12149 do_vfp_nsyn_opcode ("fnegs");
12151 do_vfp_nsyn_opcode ("fabss");
12156 do_vfp_nsyn_opcode ("fnegd");
12158 do_vfp_nsyn_opcode ("fabsd");
12162 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12163 insns belong to Neon, and are handled elsewhere. */
12166 do_vfp_nsyn_ldm_stm (int is_dbmode
)
12168 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
12172 do_vfp_nsyn_opcode ("fldmdbs");
12174 do_vfp_nsyn_opcode ("fldmias");
12179 do_vfp_nsyn_opcode ("fstmdbs");
12181 do_vfp_nsyn_opcode ("fstmias");
12186 do_vfp_nsyn_sqrt (void)
12188 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12189 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12192 do_vfp_nsyn_opcode ("fsqrts");
12194 do_vfp_nsyn_opcode ("fsqrtd");
12198 do_vfp_nsyn_div (void)
12200 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12201 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12202 N_F32
| N_F64
| N_KEY
| N_VFP
);
12205 do_vfp_nsyn_opcode ("fdivs");
12207 do_vfp_nsyn_opcode ("fdivd");
12211 do_vfp_nsyn_nmul (void)
12213 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12214 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12215 N_F32
| N_F64
| N_KEY
| N_VFP
);
12219 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12220 do_vfp_sp_dyadic ();
12224 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12225 do_vfp_dp_rd_rn_rm ();
12227 do_vfp_cond_or_thumb ();
12231 do_vfp_nsyn_cmp (void)
12233 if (inst
.operands
[1].isreg
)
12235 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12236 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12240 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12241 do_vfp_sp_monadic ();
12245 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12246 do_vfp_dp_rd_rm ();
12251 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
12252 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
12254 switch (inst
.instruction
& 0x0fffffff)
12257 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
12260 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
12268 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12269 do_vfp_sp_compare_z ();
12273 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12277 do_vfp_cond_or_thumb ();
12281 nsyn_insert_sp (void)
12283 inst
.operands
[1] = inst
.operands
[0];
12284 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
12285 inst
.operands
[0].reg
= REG_SP
;
12286 inst
.operands
[0].isreg
= 1;
12287 inst
.operands
[0].writeback
= 1;
12288 inst
.operands
[0].present
= 1;
12292 do_vfp_nsyn_push (void)
12295 if (inst
.operands
[1].issingle
)
12296 do_vfp_nsyn_opcode ("fstmdbs");
12298 do_vfp_nsyn_opcode ("fstmdbd");
12302 do_vfp_nsyn_pop (void)
12305 if (inst
.operands
[1].issingle
)
12306 do_vfp_nsyn_opcode ("fldmias");
12308 do_vfp_nsyn_opcode ("fldmiad");
12311 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12312 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12315 neon_dp_fixup (unsigned i
)
12319 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12333 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12337 neon_logbits (unsigned x
)
12339 return ffs (x
) - 4;
12342 #define LOW4(R) ((R) & 0xf)
12343 #define HI1(R) (((R) >> 4) & 1)
12345 /* Encode insns with bit pattern:
12347 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12348 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12350 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12351 different meaning for some instruction. */
12354 neon_three_same (int isquad
, int ubit
, int size
)
12356 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12357 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12358 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12359 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12360 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12361 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12362 inst
.instruction
|= (isquad
!= 0) << 6;
12363 inst
.instruction
|= (ubit
!= 0) << 24;
12365 inst
.instruction
|= neon_logbits (size
) << 20;
12367 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12370 /* Encode instructions of the form:
12372 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12373 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12375 Don't write size if SIZE == -1. */
12378 neon_two_same (int qbit
, int ubit
, int size
)
12380 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12381 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12382 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12383 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12384 inst
.instruction
|= (qbit
!= 0) << 6;
12385 inst
.instruction
|= (ubit
!= 0) << 24;
12388 inst
.instruction
|= neon_logbits (size
) << 18;
12390 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12393 /* Neon instruction encoders, in approximate order of appearance. */
12396 do_neon_dyadic_i_su (void)
12398 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12399 struct neon_type_el et
= neon_check_type (3, rs
,
12400 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12401 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12405 do_neon_dyadic_i64_su (void)
12407 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12408 struct neon_type_el et
= neon_check_type (3, rs
,
12409 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12410 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12414 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12417 unsigned size
= et
.size
>> 3;
12418 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12419 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12420 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12421 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12422 inst
.instruction
|= (isquad
!= 0) << 6;
12423 inst
.instruction
|= immbits
<< 16;
12424 inst
.instruction
|= (size
>> 3) << 7;
12425 inst
.instruction
|= (size
& 0x7) << 19;
12427 inst
.instruction
|= (uval
!= 0) << 24;
12429 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12433 do_neon_shl_imm (void)
12435 if (!inst
.operands
[2].isreg
)
12437 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12438 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12439 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12440 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12444 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12445 struct neon_type_el et
= neon_check_type (3, rs
,
12446 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12449 /* VSHL/VQSHL 3-register variants have syntax such as:
12451 whereas other 3-register operations encoded by neon_three_same have
12454 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12456 tmp
= inst
.operands
[2].reg
;
12457 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12458 inst
.operands
[1].reg
= tmp
;
12459 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12460 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12465 do_neon_qshl_imm (void)
12467 if (!inst
.operands
[2].isreg
)
12469 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12470 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12472 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12473 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12474 inst
.operands
[2].imm
);
12478 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12479 struct neon_type_el et
= neon_check_type (3, rs
,
12480 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12483 /* See note in do_neon_shl_imm. */
12484 tmp
= inst
.operands
[2].reg
;
12485 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12486 inst
.operands
[1].reg
= tmp
;
12487 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12488 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12493 do_neon_rshl (void)
12495 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12496 struct neon_type_el et
= neon_check_type (3, rs
,
12497 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12500 tmp
= inst
.operands
[2].reg
;
12501 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12502 inst
.operands
[1].reg
= tmp
;
12503 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12507 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12509 /* Handle .I8 pseudo-instructions. */
12512 /* Unfortunately, this will make everything apart from zero out-of-range.
12513 FIXME is this the intended semantics? There doesn't seem much point in
12514 accepting .I8 if so. */
12515 immediate
|= immediate
<< 8;
12521 if (immediate
== (immediate
& 0x000000ff))
12523 *immbits
= immediate
;
12526 else if (immediate
== (immediate
& 0x0000ff00))
12528 *immbits
= immediate
>> 8;
12531 else if (immediate
== (immediate
& 0x00ff0000))
12533 *immbits
= immediate
>> 16;
12536 else if (immediate
== (immediate
& 0xff000000))
12538 *immbits
= immediate
>> 24;
12541 if ((immediate
& 0xffff) != (immediate
>> 16))
12542 goto bad_immediate
;
12543 immediate
&= 0xffff;
12546 if (immediate
== (immediate
& 0x000000ff))
12548 *immbits
= immediate
;
12551 else if (immediate
== (immediate
& 0x0000ff00))
12553 *immbits
= immediate
>> 8;
12558 first_error (_("immediate value out of range"));
12562 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12566 neon_bits_same_in_bytes (unsigned imm
)
12568 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12569 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12570 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12571 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12574 /* For immediate of above form, return 0bABCD. */
12577 neon_squash_bits (unsigned imm
)
12579 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12580 | ((imm
& 0x01000000) >> 21);
12583 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12586 neon_qfloat_bits (unsigned imm
)
12588 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
12591 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12592 the instruction. *OP is passed as the initial value of the op field, and
12593 may be set to a different value depending on the constant (i.e.
12594 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12595 MVN). If the immediate looks like a repeated pattern then also
12596 try smaller element sizes. */
12599 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
12600 unsigned *immbits
, int *op
, int size
,
12601 enum neon_el_type type
)
12603 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12605 if (type
== NT_float
&& !float_p
)
12608 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
12610 if (size
!= 32 || *op
== 1)
12612 *immbits
= neon_qfloat_bits (immlo
);
12618 if (neon_bits_same_in_bytes (immhi
)
12619 && neon_bits_same_in_bytes (immlo
))
12623 *immbits
= (neon_squash_bits (immhi
) << 4)
12624 | neon_squash_bits (immlo
);
12629 if (immhi
!= immlo
)
12635 if (immlo
== (immlo
& 0x000000ff))
12640 else if (immlo
== (immlo
& 0x0000ff00))
12642 *immbits
= immlo
>> 8;
12645 else if (immlo
== (immlo
& 0x00ff0000))
12647 *immbits
= immlo
>> 16;
12650 else if (immlo
== (immlo
& 0xff000000))
12652 *immbits
= immlo
>> 24;
12655 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
12657 *immbits
= (immlo
>> 8) & 0xff;
12660 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
12662 *immbits
= (immlo
>> 16) & 0xff;
12666 if ((immlo
& 0xffff) != (immlo
>> 16))
12673 if (immlo
== (immlo
& 0x000000ff))
12678 else if (immlo
== (immlo
& 0x0000ff00))
12680 *immbits
= immlo
>> 8;
12684 if ((immlo
& 0xff) != (immlo
>> 8))
12689 if (immlo
== (immlo
& 0x000000ff))
12691 /* Don't allow MVN with 8-bit immediate. */
12701 /* Write immediate bits [7:0] to the following locations:
12703 |28/24|23 19|18 16|15 4|3 0|
12704 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12706 This function is used by VMOV/VMVN/VORR/VBIC. */
12709 neon_write_immbits (unsigned immbits
)
12711 inst
.instruction
|= immbits
& 0xf;
12712 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
12713 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
12716 /* Invert low-order SIZE bits of XHI:XLO. */
12719 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
12721 unsigned immlo
= xlo
? *xlo
: 0;
12722 unsigned immhi
= xhi
? *xhi
: 0;
12727 immlo
= (~immlo
) & 0xff;
12731 immlo
= (~immlo
) & 0xffff;
12735 immhi
= (~immhi
) & 0xffffffff;
12736 /* fall through. */
12739 immlo
= (~immlo
) & 0xffffffff;
12754 do_neon_logic (void)
12756 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
12758 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12759 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12760 /* U bit and size field were set as part of the bitmask. */
12761 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12762 neon_three_same (neon_quad (rs
), 0, -1);
12766 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12767 struct neon_type_el et
= neon_check_type (2, rs
,
12768 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12769 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
12773 if (et
.type
== NT_invtype
)
12776 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12778 immbits
= inst
.operands
[1].imm
;
12781 /* .i64 is a pseudo-op, so the immediate must be a repeating
12783 if (immbits
!= (inst
.operands
[1].regisimm
?
12784 inst
.operands
[1].reg
: 0))
12786 /* Set immbits to an invalid constant. */
12787 immbits
= 0xdeadbeef;
12794 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12798 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12802 /* Pseudo-instruction for VBIC. */
12803 neon_invert_size (&immbits
, 0, et
.size
);
12804 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12808 /* Pseudo-instruction for VORR. */
12809 neon_invert_size (&immbits
, 0, et
.size
);
12810 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12820 inst
.instruction
|= neon_quad (rs
) << 6;
12821 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12822 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12823 inst
.instruction
|= cmode
<< 8;
12824 neon_write_immbits (immbits
);
12826 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12831 do_neon_bitfield (void)
12833 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12834 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12835 neon_three_same (neon_quad (rs
), 0, -1);
12839 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
12842 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12843 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
12845 if (et
.type
== NT_float
)
12847 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
12848 neon_three_same (neon_quad (rs
), 0, -1);
12852 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12853 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
12858 do_neon_dyadic_if_su (void)
12860 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12864 do_neon_dyadic_if_su_d (void)
12866 /* This version only allow D registers, but that constraint is enforced during
12867 operand parsing so we don't need to do anything extra here. */
12868 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12872 do_neon_dyadic_if_i_d (void)
12874 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12875 affected if we specify unsigned args. */
12876 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12879 enum vfp_or_neon_is_neon_bits
12882 NEON_CHECK_ARCH
= 2
12885 /* Call this function if an instruction which may have belonged to the VFP or
12886 Neon instruction sets, but turned out to be a Neon instruction (due to the
12887 operand types involved, etc.). We have to check and/or fix-up a couple of
12890 - Make sure the user hasn't attempted to make a Neon instruction
12892 - Alter the value in the condition code field if necessary.
12893 - Make sure that the arch supports Neon instructions.
12895 Which of these operations take place depends on bits from enum
12896 vfp_or_neon_is_neon_bits.
12898 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12899 current instruction's condition is COND_ALWAYS, the condition field is
12900 changed to inst.uncond_value. This is necessary because instructions shared
12901 between VFP and Neon may be conditional for the VFP variants only, and the
12902 unconditional Neon version must have, e.g., 0xF in the condition field. */
12905 vfp_or_neon_is_neon (unsigned check
)
12907 /* Conditions are always legal in Thumb mode (IT blocks). */
12908 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
12910 if (inst
.cond
!= COND_ALWAYS
)
12912 first_error (_(BAD_COND
));
12915 if (inst
.uncond_value
!= -1)
12916 inst
.instruction
|= inst
.uncond_value
<< 28;
12919 if ((check
& NEON_CHECK_ARCH
)
12920 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
12922 first_error (_(BAD_FPU
));
12930 do_neon_addsub_if_i (void)
12932 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
12935 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12938 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12939 affected if we specify unsigned args. */
12940 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
12943 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12945 V<op> A,B (A is operand 0, B is operand 2)
12950 so handle that case specially. */
12953 neon_exchange_operands (void)
12955 void *scratch
= alloca (sizeof (inst
.operands
[0]));
12956 if (inst
.operands
[1].present
)
12958 /* Swap operands[1] and operands[2]. */
12959 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
12960 inst
.operands
[1] = inst
.operands
[2];
12961 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
12965 inst
.operands
[1] = inst
.operands
[2];
12966 inst
.operands
[2] = inst
.operands
[0];
12971 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
12973 if (inst
.operands
[2].isreg
)
12976 neon_exchange_operands ();
12977 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
12981 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12982 struct neon_type_el et
= neon_check_type (2, rs
,
12983 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
12985 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12986 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12987 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12988 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12989 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12990 inst
.instruction
|= neon_quad (rs
) << 6;
12991 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12992 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12994 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13001 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
13005 do_neon_cmp_inv (void)
13007 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
13013 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
13016 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13017 scalars, which are encoded in 5 bits, M : Rm.
13018 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13019 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13023 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
13025 unsigned regno
= NEON_SCALAR_REG (scalar
);
13026 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
13031 if (regno
> 7 || elno
> 3)
13033 return regno
| (elno
<< 3);
13036 if (regno
> 15 || elno
> 1)
13038 return regno
| (elno
<< 4);
13042 first_error (_("scalar out of range for multiply instruction"));
13048 /* Encode multiply / multiply-accumulate scalar instructions. */
13051 neon_mul_mac (struct neon_type_el et
, int ubit
)
13055 /* Give a more helpful error message if we have an invalid type. */
13056 if (et
.type
== NT_invtype
)
13059 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
13060 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13061 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13062 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13063 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13064 inst
.instruction
|= LOW4 (scalar
);
13065 inst
.instruction
|= HI1 (scalar
) << 5;
13066 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13067 inst
.instruction
|= neon_logbits (et
.size
) << 20;
13068 inst
.instruction
|= (ubit
!= 0) << 24;
13070 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13074 do_neon_mac_maybe_scalar (void)
13076 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
13079 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13082 if (inst
.operands
[2].isscalar
)
13084 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13085 struct neon_type_el et
= neon_check_type (3, rs
,
13086 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
13087 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13088 neon_mul_mac (et
, neon_quad (rs
));
13092 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13093 affected if we specify unsigned args. */
13094 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13101 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13102 struct neon_type_el et
= neon_check_type (3, rs
,
13103 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13104 neon_three_same (neon_quad (rs
), 0, et
.size
);
13107 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13108 same types as the MAC equivalents. The polynomial type for this instruction
13109 is encoded the same as the integer type. */
13114 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
13117 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13120 if (inst
.operands
[2].isscalar
)
13121 do_neon_mac_maybe_scalar ();
13123 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
13127 do_neon_qdmulh (void)
13129 if (inst
.operands
[2].isscalar
)
13131 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13132 struct neon_type_el et
= neon_check_type (3, rs
,
13133 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13134 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13135 neon_mul_mac (et
, neon_quad (rs
));
13139 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13140 struct neon_type_el et
= neon_check_type (3, rs
,
13141 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13142 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13143 /* The U bit (rounding) comes from bit mask. */
13144 neon_three_same (neon_quad (rs
), 0, et
.size
);
13149 do_neon_fcmp_absolute (void)
13151 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13152 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13153 /* Size field comes from bit mask. */
13154 neon_three_same (neon_quad (rs
), 1, -1);
13158 do_neon_fcmp_absolute_inv (void)
13160 neon_exchange_operands ();
13161 do_neon_fcmp_absolute ();
13165 do_neon_step (void)
13167 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13168 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13169 neon_three_same (neon_quad (rs
), 0, -1);
13173 do_neon_abs_neg (void)
13175 enum neon_shape rs
;
13176 struct neon_type_el et
;
13178 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
13181 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13184 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13185 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
13187 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13188 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13189 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13190 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13191 inst
.instruction
|= neon_quad (rs
) << 6;
13192 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13193 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13195 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13201 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13202 struct neon_type_el et
= neon_check_type (2, rs
,
13203 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13204 int imm
= inst
.operands
[2].imm
;
13205 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13206 _("immediate out of range for insert"));
13207 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13213 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13214 struct neon_type_el et
= neon_check_type (2, rs
,
13215 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13216 int imm
= inst
.operands
[2].imm
;
13217 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13218 _("immediate out of range for insert"));
13219 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
13223 do_neon_qshlu_imm (void)
13225 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13226 struct neon_type_el et
= neon_check_type (2, rs
,
13227 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
13228 int imm
= inst
.operands
[2].imm
;
13229 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13230 _("immediate out of range for shift"));
13231 /* Only encodes the 'U present' variant of the instruction.
13232 In this case, signed types have OP (bit 8) set to 0.
13233 Unsigned types have OP set to 1. */
13234 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
13235 /* The rest of the bits are the same as other immediate shifts. */
13236 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13240 do_neon_qmovn (void)
13242 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13243 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13244 /* Saturating move where operands can be signed or unsigned, and the
13245 destination has the same signedness. */
13246 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13247 if (et
.type
== NT_unsigned
)
13248 inst
.instruction
|= 0xc0;
13250 inst
.instruction
|= 0x80;
13251 neon_two_same (0, 1, et
.size
/ 2);
13255 do_neon_qmovun (void)
13257 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13258 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13259 /* Saturating move with unsigned results. Operands must be signed. */
13260 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13261 neon_two_same (0, 1, et
.size
/ 2);
13265 do_neon_rshift_sat_narrow (void)
13267 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13268 or unsigned. If operands are unsigned, results must also be unsigned. */
13269 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13270 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13271 int imm
= inst
.operands
[2].imm
;
13272 /* This gets the bounds check, size encoding and immediate bits calculation
13276 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13277 VQMOVN.I<size> <Dd>, <Qm>. */
13280 inst
.operands
[2].present
= 0;
13281 inst
.instruction
= N_MNEM_vqmovn
;
13286 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13287 _("immediate out of range"));
13288 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
13292 do_neon_rshift_sat_narrow_u (void)
13294 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13295 or unsigned. If operands are unsigned, results must also be unsigned. */
13296 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13297 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13298 int imm
= inst
.operands
[2].imm
;
13299 /* This gets the bounds check, size encoding and immediate bits calculation
13303 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13304 VQMOVUN.I<size> <Dd>, <Qm>. */
13307 inst
.operands
[2].present
= 0;
13308 inst
.instruction
= N_MNEM_vqmovun
;
13313 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13314 _("immediate out of range"));
13315 /* FIXME: The manual is kind of unclear about what value U should have in
13316 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13318 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
13322 do_neon_movn (void)
13324 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13325 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13326 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13327 neon_two_same (0, 1, et
.size
/ 2);
13331 do_neon_rshift_narrow (void)
13333 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13334 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13335 int imm
= inst
.operands
[2].imm
;
13336 /* This gets the bounds check, size encoding and immediate bits calculation
13340 /* If immediate is zero then we are a pseudo-instruction for
13341 VMOVN.I<size> <Dd>, <Qm> */
13344 inst
.operands
[2].present
= 0;
13345 inst
.instruction
= N_MNEM_vmovn
;
13350 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13351 _("immediate out of range for narrowing operation"));
13352 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
13356 do_neon_shll (void)
13358 /* FIXME: Type checking when lengthening. */
13359 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
13360 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
13361 unsigned imm
= inst
.operands
[2].imm
;
13363 if (imm
== et
.size
)
13365 /* Maximum shift variant. */
13366 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13367 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13368 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13369 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13370 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13371 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13373 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13377 /* A more-specific type check for non-max versions. */
13378 et
= neon_check_type (2, NS_QDI
,
13379 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13380 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13381 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13385 /* Check the various types for the VCVT instruction, and return which version
13386 the current instruction is. */
13389 neon_cvt_flavour (enum neon_shape rs
)
13391 #define CVT_VAR(C,X,Y) \
13392 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13393 if (et.type != NT_invtype) \
13395 inst.error = NULL; \
13398 struct neon_type_el et
;
13399 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13400 || rs
== NS_FF
) ? N_VFP
: 0;
13401 /* The instruction versions which take an immediate take one register
13402 argument, which is extended to the width of the full register. Thus the
13403 "source" and "destination" registers must have the same width. Hack that
13404 here by making the size equal to the key (wider, in this case) operand. */
13405 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13407 CVT_VAR (0, N_S32
, N_F32
);
13408 CVT_VAR (1, N_U32
, N_F32
);
13409 CVT_VAR (2, N_F32
, N_S32
);
13410 CVT_VAR (3, N_F32
, N_U32
);
13411 /* Half-precision conversions. */
13412 CVT_VAR (4, N_F32
, N_F16
);
13413 CVT_VAR (5, N_F16
, N_F32
);
13417 /* VFP instructions. */
13418 CVT_VAR (6, N_F32
, N_F64
);
13419 CVT_VAR (7, N_F64
, N_F32
);
13420 CVT_VAR (8, N_S32
, N_F64
| key
);
13421 CVT_VAR (9, N_U32
, N_F64
| key
);
13422 CVT_VAR (10, N_F64
| key
, N_S32
);
13423 CVT_VAR (11, N_F64
| key
, N_U32
);
13424 /* VFP instructions with bitshift. */
13425 CVT_VAR (12, N_F32
| key
, N_S16
);
13426 CVT_VAR (13, N_F32
| key
, N_U16
);
13427 CVT_VAR (14, N_F64
| key
, N_S16
);
13428 CVT_VAR (15, N_F64
| key
, N_U16
);
13429 CVT_VAR (16, N_S16
, N_F32
| key
);
13430 CVT_VAR (17, N_U16
, N_F32
| key
);
13431 CVT_VAR (18, N_S16
, N_F64
| key
);
13432 CVT_VAR (19, N_U16
, N_F64
| key
);
13438 /* Neon-syntax VFP conversions. */
13441 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13443 const char *opname
= 0;
13445 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13447 /* Conversions with immediate bitshift. */
13448 const char *enc
[] =
13472 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13474 opname
= enc
[flavour
];
13475 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13476 _("operands 0 and 1 must be the same register"));
13477 inst
.operands
[1] = inst
.operands
[2];
13478 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13483 /* Conversions without bitshift. */
13484 const char *enc
[] =
13500 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13501 opname
= enc
[flavour
];
13505 do_vfp_nsyn_opcode (opname
);
13509 do_vfp_nsyn_cvtz (void)
13511 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13512 int flavour
= neon_cvt_flavour (rs
);
13513 const char *enc
[] =
13527 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13528 do_vfp_nsyn_opcode (enc
[flavour
]);
13534 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13535 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13536 int flavour
= neon_cvt_flavour (rs
);
13538 /* VFP rather than Neon conversions. */
13541 do_vfp_nsyn_cvt (rs
, flavour
);
13551 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13553 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13556 /* Fixed-point conversion with #0 immediate is encoded as an
13557 integer conversion. */
13558 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
13560 immbits
= 32 - inst
.operands
[2].imm
;
13561 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13563 inst
.instruction
|= enctab
[flavour
];
13564 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13565 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13566 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13567 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13568 inst
.instruction
|= neon_quad (rs
) << 6;
13569 inst
.instruction
|= 1 << 21;
13570 inst
.instruction
|= immbits
<< 16;
13572 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13580 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
13582 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13584 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13588 inst
.instruction
|= enctab
[flavour
];
13590 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13591 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13592 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13593 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13594 inst
.instruction
|= neon_quad (rs
) << 6;
13595 inst
.instruction
|= 2 << 18;
13597 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13601 /* Half-precision conversions for Advanced SIMD -- neon. */
13606 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
13608 as_bad (_("operand size must match register width"));
13613 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
13615 as_bad (_("operand size must match register width"));
13620 inst
.instruction
= 0x3b60600;
13622 inst
.instruction
= 0x3b60700;
13624 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13625 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13626 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13627 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13628 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13632 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13633 do_vfp_nsyn_cvt (rs
, flavour
);
13638 do_neon_cvtb (void)
13640 inst
.instruction
= 0xeb20a40;
13642 /* The sizes are attached to the mnemonic. */
13643 if (inst
.vectype
.el
[0].type
!= NT_invtype
13644 && inst
.vectype
.el
[0].size
== 16)
13645 inst
.instruction
|= 0x00010000;
13647 /* Programmer's syntax: the sizes are attached to the operands. */
13648 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
13649 && inst
.operands
[0].vectype
.size
== 16)
13650 inst
.instruction
|= 0x00010000;
13652 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
13653 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
13654 do_vfp_cond_or_thumb ();
13659 do_neon_cvtt (void)
13662 inst
.instruction
|= 0x80;
13666 neon_move_immediate (void)
13668 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
13669 struct neon_type_el et
= neon_check_type (2, rs
,
13670 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13671 unsigned immlo
, immhi
= 0, immbits
;
13672 int op
, cmode
, float_p
;
13674 constraint (et
.type
== NT_invtype
,
13675 _("operand size must be specified for immediate VMOV"));
13677 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13678 op
= (inst
.instruction
& (1 << 5)) != 0;
13680 immlo
= inst
.operands
[1].imm
;
13681 if (inst
.operands
[1].regisimm
)
13682 immhi
= inst
.operands
[1].reg
;
13684 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
13685 _("immediate has bits set outside the operand size"));
13687 float_p
= inst
.operands
[1].immisfloat
;
13689 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
13690 et
.size
, et
.type
)) == FAIL
)
13692 /* Invert relevant bits only. */
13693 neon_invert_size (&immlo
, &immhi
, et
.size
);
13694 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13695 with one or the other; those cases are caught by
13696 neon_cmode_for_move_imm. */
13698 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
13699 &op
, et
.size
, et
.type
)) == FAIL
)
13701 first_error (_("immediate out of range"));
13706 inst
.instruction
&= ~(1 << 5);
13707 inst
.instruction
|= op
<< 5;
13709 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13710 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13711 inst
.instruction
|= neon_quad (rs
) << 6;
13712 inst
.instruction
|= cmode
<< 8;
13714 neon_write_immbits (immbits
);
13720 if (inst
.operands
[1].isreg
)
13722 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13724 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13725 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13726 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13727 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13728 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13729 inst
.instruction
|= neon_quad (rs
) << 6;
13733 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13734 neon_move_immediate ();
13737 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13740 /* Encode instructions of form:
13742 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13743 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13746 neon_mixed_length (struct neon_type_el et
, unsigned size
)
13748 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13749 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13750 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13751 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13752 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13753 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13754 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
13755 inst
.instruction
|= neon_logbits (size
) << 20;
13757 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13761 do_neon_dyadic_long (void)
13763 /* FIXME: Type checking for lengthening op. */
13764 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13765 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13766 neon_mixed_length (et
, et
.size
);
13770 do_neon_abal (void)
13772 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13773 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13774 neon_mixed_length (et
, et
.size
);
13778 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
13780 if (inst
.operands
[2].isscalar
)
13782 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
13783 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
13784 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13785 neon_mul_mac (et
, et
.type
== NT_unsigned
);
13789 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13790 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
13791 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13792 neon_mixed_length (et
, et
.size
);
13797 do_neon_mac_maybe_scalar_long (void)
13799 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
13803 do_neon_dyadic_wide (void)
13805 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
13806 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13807 neon_mixed_length (et
, et
.size
);
13811 do_neon_dyadic_narrow (void)
13813 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13814 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
13815 /* Operand sign is unimportant, and the U bit is part of the opcode,
13816 so force the operand type to integer. */
13817 et
.type
= NT_integer
;
13818 neon_mixed_length (et
, et
.size
/ 2);
13822 do_neon_mul_sat_scalar_long (void)
13824 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
13828 do_neon_vmull (void)
13830 if (inst
.operands
[2].isscalar
)
13831 do_neon_mac_maybe_scalar_long ();
13834 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13835 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
13836 if (et
.type
== NT_poly
)
13837 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
13839 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13840 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13841 zero. Should be OK as-is. */
13842 neon_mixed_length (et
, et
.size
);
13849 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
13850 struct neon_type_el et
= neon_check_type (3, rs
,
13851 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13852 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
13854 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
13855 _("shift out of range"));
13856 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13857 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13858 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13859 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13860 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13861 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13862 inst
.instruction
|= neon_quad (rs
) << 6;
13863 inst
.instruction
|= imm
<< 8;
13865 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13871 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13872 struct neon_type_el et
= neon_check_type (2, rs
,
13873 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13874 unsigned op
= (inst
.instruction
>> 7) & 3;
13875 /* N (width of reversed regions) is encoded as part of the bitmask. We
13876 extract it here to check the elements to be reversed are smaller.
13877 Otherwise we'd get a reserved instruction. */
13878 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
13879 gas_assert (elsize
!= 0);
13880 constraint (et
.size
>= elsize
,
13881 _("elements must be smaller than reversal region"));
13882 neon_two_same (neon_quad (rs
), 1, et
.size
);
13888 if (inst
.operands
[1].isscalar
)
13890 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
13891 struct neon_type_el et
= neon_check_type (2, rs
,
13892 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13893 unsigned sizebits
= et
.size
>> 3;
13894 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13895 int logsize
= neon_logbits (et
.size
);
13896 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
13898 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
13901 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13902 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13903 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13904 inst
.instruction
|= LOW4 (dm
);
13905 inst
.instruction
|= HI1 (dm
) << 5;
13906 inst
.instruction
|= neon_quad (rs
) << 6;
13907 inst
.instruction
|= x
<< 17;
13908 inst
.instruction
|= sizebits
<< 16;
13910 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13914 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
13915 struct neon_type_el et
= neon_check_type (2, rs
,
13916 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13917 /* Duplicate ARM register to lanes of vector. */
13918 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
13921 case 8: inst
.instruction
|= 0x400000; break;
13922 case 16: inst
.instruction
|= 0x000020; break;
13923 case 32: inst
.instruction
|= 0x000000; break;
13926 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13927 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
13928 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
13929 inst
.instruction
|= neon_quad (rs
) << 21;
13930 /* The encoding for this instruction is identical for the ARM and Thumb
13931 variants, except for the condition field. */
13932 do_vfp_cond_or_thumb ();
13936 /* VMOV has particularly many variations. It can be one of:
13937 0. VMOV<c><q> <Qd>, <Qm>
13938 1. VMOV<c><q> <Dd>, <Dm>
13939 (Register operations, which are VORR with Rm = Rn.)
13940 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13941 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13943 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13944 (ARM register to scalar.)
13945 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13946 (Two ARM registers to vector.)
13947 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13948 (Scalar to ARM register.)
13949 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13950 (Vector to two ARM registers.)
13951 8. VMOV.F32 <Sd>, <Sm>
13952 9. VMOV.F64 <Dd>, <Dm>
13953 (VFP register moves.)
13954 10. VMOV.F32 <Sd>, #imm
13955 11. VMOV.F64 <Dd>, #imm
13956 (VFP float immediate load.)
13957 12. VMOV <Rd>, <Sm>
13958 (VFP single to ARM reg.)
13959 13. VMOV <Sd>, <Rm>
13960 (ARM reg to VFP single.)
13961 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13962 (Two ARM regs to two VFP singles.)
13963 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13964 (Two VFP singles to two ARM regs.)
13966 These cases can be disambiguated using neon_select_shape, except cases 1/9
13967 and 3/11 which depend on the operand type too.
13969 All the encoded bits are hardcoded by this function.
13971 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13972 Cases 5, 7 may be used with VFPv2 and above.
13974 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13975 can specify a type where it doesn't make sense to, and is ignored). */
13980 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
13981 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
13983 struct neon_type_el et
;
13984 const char *ldconst
= 0;
13988 case NS_DD
: /* case 1/9. */
13989 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13990 /* It is not an error here if no type is given. */
13992 if (et
.type
== NT_float
&& et
.size
== 64)
13994 do_vfp_nsyn_opcode ("fcpyd");
13997 /* fall through. */
13999 case NS_QQ
: /* case 0/1. */
14001 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14003 /* The architecture manual I have doesn't explicitly state which
14004 value the U bit should have for register->register moves, but
14005 the equivalent VORR instruction has U = 0, so do that. */
14006 inst
.instruction
= 0x0200110;
14007 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14008 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14009 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14010 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14011 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14012 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14013 inst
.instruction
|= neon_quad (rs
) << 6;
14015 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14019 case NS_DI
: /* case 3/11. */
14020 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14022 if (et
.type
== NT_float
&& et
.size
== 64)
14024 /* case 11 (fconstd). */
14025 ldconst
= "fconstd";
14026 goto encode_fconstd
;
14028 /* fall through. */
14030 case NS_QI
: /* case 2/3. */
14031 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14033 inst
.instruction
= 0x0800010;
14034 neon_move_immediate ();
14035 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14038 case NS_SR
: /* case 4. */
14040 unsigned bcdebits
= 0;
14041 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
14042 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14043 int logsize
= neon_logbits (et
.size
);
14044 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
14045 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
14047 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14049 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14050 && et
.size
!= 32, _(BAD_FPU
));
14051 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14052 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14056 case 8: bcdebits
= 0x8; break;
14057 case 16: bcdebits
= 0x1; break;
14058 case 32: bcdebits
= 0x0; break;
14062 bcdebits
|= x
<< logsize
;
14064 inst
.instruction
= 0xe000b10;
14065 do_vfp_cond_or_thumb ();
14066 inst
.instruction
|= LOW4 (dn
) << 16;
14067 inst
.instruction
|= HI1 (dn
) << 7;
14068 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14069 inst
.instruction
|= (bcdebits
& 3) << 5;
14070 inst
.instruction
|= (bcdebits
>> 2) << 21;
14074 case NS_DRR
: /* case 5 (fmdrr). */
14075 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14078 inst
.instruction
= 0xc400b10;
14079 do_vfp_cond_or_thumb ();
14080 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
14081 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
14082 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14083 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14086 case NS_RS
: /* case 6. */
14088 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
14089 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
14090 unsigned logsize
= neon_logbits (et
.size
);
14091 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14092 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
14093 unsigned abcdebits
= 0;
14095 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14097 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14098 && et
.size
!= 32, _(BAD_FPU
));
14099 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14100 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14104 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
14105 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
14106 case 32: abcdebits
= 0x00; break;
14110 abcdebits
|= x
<< logsize
;
14111 inst
.instruction
= 0xe100b10;
14112 do_vfp_cond_or_thumb ();
14113 inst
.instruction
|= LOW4 (dn
) << 16;
14114 inst
.instruction
|= HI1 (dn
) << 7;
14115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14116 inst
.instruction
|= (abcdebits
& 3) << 5;
14117 inst
.instruction
|= (abcdebits
>> 2) << 21;
14121 case NS_RRD
: /* case 7 (fmrrd). */
14122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14125 inst
.instruction
= 0xc500b10;
14126 do_vfp_cond_or_thumb ();
14127 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14128 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14129 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14130 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14133 case NS_FF
: /* case 8 (fcpys). */
14134 do_vfp_nsyn_opcode ("fcpys");
14137 case NS_FI
: /* case 10 (fconsts). */
14138 ldconst
= "fconsts";
14140 if (is_quarter_float (inst
.operands
[1].imm
))
14142 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
14143 do_vfp_nsyn_opcode (ldconst
);
14146 first_error (_("immediate out of range"));
14149 case NS_RF
: /* case 12 (fmrs). */
14150 do_vfp_nsyn_opcode ("fmrs");
14153 case NS_FR
: /* case 13 (fmsr). */
14154 do_vfp_nsyn_opcode ("fmsr");
14157 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14158 (one of which is a list), but we have parsed four. Do some fiddling to
14159 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14161 case NS_RRFF
: /* case 14 (fmrrs). */
14162 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
14163 _("VFP registers must be adjacent"));
14164 inst
.operands
[2].imm
= 2;
14165 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14166 do_vfp_nsyn_opcode ("fmrrs");
14169 case NS_FFRR
: /* case 15 (fmsrr). */
14170 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
14171 _("VFP registers must be adjacent"));
14172 inst
.operands
[1] = inst
.operands
[2];
14173 inst
.operands
[2] = inst
.operands
[3];
14174 inst
.operands
[0].imm
= 2;
14175 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14176 do_vfp_nsyn_opcode ("fmsrr");
14185 do_neon_rshift_round_imm (void)
14187 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14188 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14189 int imm
= inst
.operands
[2].imm
;
14191 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14194 inst
.operands
[2].present
= 0;
14199 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14200 _("immediate out of range for shift"));
14201 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
14206 do_neon_movl (void)
14208 struct neon_type_el et
= neon_check_type (2, NS_QD
,
14209 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14210 unsigned sizebits
= et
.size
>> 3;
14211 inst
.instruction
|= sizebits
<< 19;
14212 neon_two_same (0, et
.type
== NT_unsigned
, -1);
14218 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14219 struct neon_type_el et
= neon_check_type (2, rs
,
14220 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14221 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
14222 neon_two_same (neon_quad (rs
), 1, et
.size
);
14226 do_neon_zip_uzp (void)
14228 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14229 struct neon_type_el et
= neon_check_type (2, rs
,
14230 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14231 if (rs
== NS_DD
&& et
.size
== 32)
14233 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14234 inst
.instruction
= N_MNEM_vtrn
;
14238 neon_two_same (neon_quad (rs
), 1, et
.size
);
14242 do_neon_sat_abs_neg (void)
14244 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14245 struct neon_type_el et
= neon_check_type (2, rs
,
14246 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14247 neon_two_same (neon_quad (rs
), 1, et
.size
);
14251 do_neon_pair_long (void)
14253 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14254 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
14255 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14256 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
14257 neon_two_same (neon_quad (rs
), 1, et
.size
);
14261 do_neon_recip_est (void)
14263 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14264 struct neon_type_el et
= neon_check_type (2, rs
,
14265 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
14266 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14267 neon_two_same (neon_quad (rs
), 1, et
.size
);
14273 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14274 struct neon_type_el et
= neon_check_type (2, rs
,
14275 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14276 neon_two_same (neon_quad (rs
), 1, et
.size
);
14282 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14283 struct neon_type_el et
= neon_check_type (2, rs
,
14284 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
14285 neon_two_same (neon_quad (rs
), 1, et
.size
);
14291 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14292 struct neon_type_el et
= neon_check_type (2, rs
,
14293 N_EQK
| N_INT
, N_8
| N_KEY
);
14294 neon_two_same (neon_quad (rs
), 1, et
.size
);
14300 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14301 neon_two_same (neon_quad (rs
), 1, -1);
14305 do_neon_tbl_tbx (void)
14307 unsigned listlenbits
;
14308 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
14310 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
14312 first_error (_("bad list length for table lookup"));
14316 listlenbits
= inst
.operands
[1].imm
- 1;
14317 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14318 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14319 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14320 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14321 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14322 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14323 inst
.instruction
|= listlenbits
<< 8;
14325 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14329 do_neon_ldm_stm (void)
14331 /* P, U and L bits are part of bitmask. */
14332 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
14333 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
14335 if (inst
.operands
[1].issingle
)
14337 do_vfp_nsyn_ldm_stm (is_dbmode
);
14341 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
14342 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14344 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14345 _("register list must contain at least 1 and at most 16 "
14348 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14349 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
14350 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14351 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
14353 inst
.instruction
|= offsetbits
;
14355 do_vfp_cond_or_thumb ();
14359 do_neon_ldr_str (void)
14361 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
14363 if (inst
.operands
[0].issingle
)
14366 do_vfp_nsyn_opcode ("flds");
14368 do_vfp_nsyn_opcode ("fsts");
14373 do_vfp_nsyn_opcode ("fldd");
14375 do_vfp_nsyn_opcode ("fstd");
14379 /* "interleave" version also handles non-interleaving register VLD1/VST1
14383 do_neon_ld_st_interleave (void)
14385 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14386 N_8
| N_16
| N_32
| N_64
);
14387 unsigned alignbits
= 0;
14389 /* The bits in this table go:
14390 0: register stride of one (0) or two (1)
14391 1,2: register list length, minus one (1, 2, 3, 4).
14392 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14393 We use -1 for invalid entries. */
14394 const int typetable
[] =
14396 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14397 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14398 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14399 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14403 if (et
.type
== NT_invtype
)
14406 if (inst
.operands
[1].immisalign
)
14407 switch (inst
.operands
[1].imm
>> 8)
14409 case 64: alignbits
= 1; break;
14411 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14412 goto bad_alignment
;
14416 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14417 goto bad_alignment
;
14422 first_error (_("bad alignment"));
14426 inst
.instruction
|= alignbits
<< 4;
14427 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14429 /* Bits [4:6] of the immediate in a list specifier encode register stride
14430 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14431 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14432 up the right value for "type" in a table based on this value and the given
14433 list style, then stick it back. */
14434 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14435 | (((inst
.instruction
>> 8) & 3) << 3);
14437 typebits
= typetable
[idx
];
14439 constraint (typebits
== -1, _("bad list type for instruction"));
14441 inst
.instruction
&= ~0xf00;
14442 inst
.instruction
|= typebits
<< 8;
14445 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14446 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14447 otherwise. The variable arguments are a list of pairs of legal (size, align)
14448 values, terminated with -1. */
14451 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14454 int result
= FAIL
, thissize
, thisalign
;
14456 if (!inst
.operands
[1].immisalign
)
14462 va_start (ap
, do_align
);
14466 thissize
= va_arg (ap
, int);
14467 if (thissize
== -1)
14469 thisalign
= va_arg (ap
, int);
14471 if (size
== thissize
&& align
== thisalign
)
14474 while (result
!= SUCCESS
);
14478 if (result
== SUCCESS
)
14481 first_error (_("unsupported alignment for instruction"));
14487 do_neon_ld_st_lane (void)
14489 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14490 int align_good
, do_align
= 0;
14491 int logsize
= neon_logbits (et
.size
);
14492 int align
= inst
.operands
[1].imm
>> 8;
14493 int n
= (inst
.instruction
>> 8) & 3;
14494 int max_el
= 64 / et
.size
;
14496 if (et
.type
== NT_invtype
)
14499 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14500 _("bad list length"));
14501 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14502 _("scalar index out of range"));
14503 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14505 _("stride of 2 unavailable when element size is 8"));
14509 case 0: /* VLD1 / VST1. */
14510 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14512 if (align_good
== FAIL
)
14516 unsigned alignbits
= 0;
14519 case 16: alignbits
= 0x1; break;
14520 case 32: alignbits
= 0x3; break;
14523 inst
.instruction
|= alignbits
<< 4;
14527 case 1: /* VLD2 / VST2. */
14528 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14530 if (align_good
== FAIL
)
14533 inst
.instruction
|= 1 << 4;
14536 case 2: /* VLD3 / VST3. */
14537 constraint (inst
.operands
[1].immisalign
,
14538 _("can't use alignment with this instruction"));
14541 case 3: /* VLD4 / VST4. */
14542 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14543 16, 64, 32, 64, 32, 128, -1);
14544 if (align_good
== FAIL
)
14548 unsigned alignbits
= 0;
14551 case 8: alignbits
= 0x1; break;
14552 case 16: alignbits
= 0x1; break;
14553 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
14556 inst
.instruction
|= alignbits
<< 4;
14563 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14564 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14565 inst
.instruction
|= 1 << (4 + logsize
);
14567 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
14568 inst
.instruction
|= logsize
<< 10;
14571 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14574 do_neon_ld_dup (void)
14576 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14577 int align_good
, do_align
= 0;
14579 if (et
.type
== NT_invtype
)
14582 switch ((inst
.instruction
>> 8) & 3)
14584 case 0: /* VLD1. */
14585 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
14586 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14587 &do_align
, 16, 16, 32, 32, -1);
14588 if (align_good
== FAIL
)
14590 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
14593 case 2: inst
.instruction
|= 1 << 5; break;
14594 default: first_error (_("bad list length")); return;
14596 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14599 case 1: /* VLD2. */
14600 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14601 &do_align
, 8, 16, 16, 32, 32, 64, -1);
14602 if (align_good
== FAIL
)
14604 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
14605 _("bad list length"));
14606 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14607 inst
.instruction
|= 1 << 5;
14608 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14611 case 2: /* VLD3. */
14612 constraint (inst
.operands
[1].immisalign
,
14613 _("can't use alignment with this instruction"));
14614 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
14615 _("bad list length"));
14616 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14617 inst
.instruction
|= 1 << 5;
14618 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14621 case 3: /* VLD4. */
14623 int align
= inst
.operands
[1].imm
>> 8;
14624 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14625 16, 64, 32, 64, 32, 128, -1);
14626 if (align_good
== FAIL
)
14628 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
14629 _("bad list length"));
14630 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14631 inst
.instruction
|= 1 << 5;
14632 if (et
.size
== 32 && align
== 128)
14633 inst
.instruction
|= 0x3 << 6;
14635 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14642 inst
.instruction
|= do_align
<< 4;
14645 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14646 apart from bits [11:4]. */
14649 do_neon_ldx_stx (void)
14651 switch (NEON_LANE (inst
.operands
[0].imm
))
14653 case NEON_INTERLEAVE_LANES
:
14654 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
14655 do_neon_ld_st_interleave ();
14658 case NEON_ALL_LANES
:
14659 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
14664 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
14665 do_neon_ld_st_lane ();
14668 /* L bit comes from bit mask. */
14669 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14670 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14671 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14673 if (inst
.operands
[1].postind
)
14675 int postreg
= inst
.operands
[1].imm
& 0xf;
14676 constraint (!inst
.operands
[1].immisreg
,
14677 _("post-index must be a register"));
14678 constraint (postreg
== 0xd || postreg
== 0xf,
14679 _("bad register for post-index"));
14680 inst
.instruction
|= postreg
;
14682 else if (inst
.operands
[1].writeback
)
14684 inst
.instruction
|= 0xd;
14687 inst
.instruction
|= 0xf;
14690 inst
.instruction
|= 0xf9000000;
14692 inst
.instruction
|= 0xf4000000;
14695 /* Overall per-instruction processing. */
14697 /* We need to be able to fix up arbitrary expressions in some statements.
14698 This is so that we can handle symbols that are an arbitrary distance from
14699 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14700 which returns part of an address in a form which will be valid for
14701 a data instruction. We do this by pushing the expression into a symbol
14702 in the expr_section, and creating a fix for that. */
14705 fix_new_arm (fragS
* frag
,
14720 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
14724 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
14729 /* Mark whether the fix is to a THUMB instruction, or an ARM
14731 new_fix
->tc_fix_data
= thumb_mode
;
14734 /* Create a frg for an instruction requiring relaxation. */
14736 output_relax_insn (void)
14742 /* The size of the instruction is unknown, so tie the debug info to the
14743 start of the instruction. */
14744 dwarf2_emit_insn (0);
14746 switch (inst
.reloc
.exp
.X_op
)
14749 sym
= inst
.reloc
.exp
.X_add_symbol
;
14750 offset
= inst
.reloc
.exp
.X_add_number
;
14754 offset
= inst
.reloc
.exp
.X_add_number
;
14757 sym
= make_expr_symbol (&inst
.reloc
.exp
);
14761 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
14762 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
14763 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
14766 /* Write a 32-bit thumb instruction to buf. */
14768 put_thumb32_insn (char * buf
, unsigned long insn
)
14770 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
14771 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
14775 output_inst (const char * str
)
14781 as_bad ("%s -- `%s'", inst
.error
, str
);
14786 output_relax_insn ();
14789 if (inst
.size
== 0)
14792 to
= frag_more (inst
.size
);
14793 /* PR 9814: Record the thumb mode into the current frag so that we know
14794 what type of NOP padding to use, if necessary. We override any previous
14795 setting so that if the mode has changed then the NOPS that we use will
14796 match the encoding of the last instruction in the frag. */
14797 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
14799 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
14801 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
14802 put_thumb32_insn (to
, inst
.instruction
);
14804 else if (inst
.size
> INSN_SIZE
)
14806 gas_assert (inst
.size
== (2 * INSN_SIZE
));
14807 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
14808 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
14811 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
14813 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
14814 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
14815 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
14818 dwarf2_emit_insn (inst
.size
);
14822 output_it_inst (int cond
, int mask
, char * to
)
14824 unsigned long instruction
= 0xbf00;
14827 instruction
|= mask
;
14828 instruction
|= cond
<< 4;
14832 to
= frag_more (2);
14834 dwarf2_emit_insn (2);
14838 md_number_to_chars (to
, instruction
, 2);
14843 /* Tag values used in struct asm_opcode's tag field. */
14846 OT_unconditional
, /* Instruction cannot be conditionalized.
14847 The ARM condition field is still 0xE. */
14848 OT_unconditionalF
, /* Instruction cannot be conditionalized
14849 and carries 0xF in its ARM condition field. */
14850 OT_csuffix
, /* Instruction takes a conditional suffix. */
14851 OT_csuffixF
, /* Some forms of the instruction take a conditional
14852 suffix, others place 0xF where the condition field
14854 OT_cinfix3
, /* Instruction takes a conditional infix,
14855 beginning at character index 3. (In
14856 unified mode, it becomes a suffix.) */
14857 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
14858 tsts, cmps, cmns, and teqs. */
14859 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
14860 character index 3, even in unified mode. Used for
14861 legacy instructions where suffix and infix forms
14862 may be ambiguous. */
14863 OT_csuf_or_in3
, /* Instruction takes either a conditional
14864 suffix or an infix at character index 3. */
14865 OT_odd_infix_unc
, /* This is the unconditional variant of an
14866 instruction that takes a conditional infix
14867 at an unusual position. In unified mode,
14868 this variant will accept a suffix. */
14869 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
14870 are the conditional variants of instructions that
14871 take conditional infixes in unusual positions.
14872 The infix appears at character index
14873 (tag - OT_odd_infix_0). These are not accepted
14874 in unified mode. */
14877 /* Subroutine of md_assemble, responsible for looking up the primary
14878 opcode from the mnemonic the user wrote. STR points to the
14879 beginning of the mnemonic.
14881 This is not simply a hash table lookup, because of conditional
14882 variants. Most instructions have conditional variants, which are
14883 expressed with a _conditional affix_ to the mnemonic. If we were
14884 to encode each conditional variant as a literal string in the opcode
14885 table, it would have approximately 20,000 entries.
14887 Most mnemonics take this affix as a suffix, and in unified syntax,
14888 'most' is upgraded to 'all'. However, in the divided syntax, some
14889 instructions take the affix as an infix, notably the s-variants of
14890 the arithmetic instructions. Of those instructions, all but six
14891 have the infix appear after the third character of the mnemonic.
14893 Accordingly, the algorithm for looking up primary opcodes given
14896 1. Look up the identifier in the opcode table.
14897 If we find a match, go to step U.
14899 2. Look up the last two characters of the identifier in the
14900 conditions table. If we find a match, look up the first N-2
14901 characters of the identifier in the opcode table. If we
14902 find a match, go to step CE.
14904 3. Look up the fourth and fifth characters of the identifier in
14905 the conditions table. If we find a match, extract those
14906 characters from the identifier, and look up the remaining
14907 characters in the opcode table. If we find a match, go
14912 U. Examine the tag field of the opcode structure, in case this is
14913 one of the six instructions with its conditional infix in an
14914 unusual place. If it is, the tag tells us where to find the
14915 infix; look it up in the conditions table and set inst.cond
14916 accordingly. Otherwise, this is an unconditional instruction.
14917 Again set inst.cond accordingly. Return the opcode structure.
14919 CE. Examine the tag field to make sure this is an instruction that
14920 should receive a conditional suffix. If it is not, fail.
14921 Otherwise, set inst.cond from the suffix we already looked up,
14922 and return the opcode structure.
14924 CM. Examine the tag field to make sure this is an instruction that
14925 should receive a conditional infix after the third character.
14926 If it is not, fail. Otherwise, undo the edits to the current
14927 line of input and proceed as for case CE. */
14929 static const struct asm_opcode
*
14930 opcode_lookup (char **str
)
14934 const struct asm_opcode
*opcode
;
14935 const struct asm_cond
*cond
;
14937 bfd_boolean neon_supported
;
14939 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
14941 /* Scan up to the end of the mnemonic, which must end in white space,
14942 '.' (in unified mode, or for Neon instructions), or end of string. */
14943 for (base
= end
= *str
; *end
!= '\0'; end
++)
14944 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
14950 /* Handle a possible width suffix and/or Neon type suffix. */
14955 /* The .w and .n suffixes are only valid if the unified syntax is in
14957 if (unified_syntax
&& end
[1] == 'w')
14959 else if (unified_syntax
&& end
[1] == 'n')
14964 inst
.vectype
.elems
= 0;
14966 *str
= end
+ offset
;
14968 if (end
[offset
] == '.')
14970 /* See if we have a Neon type suffix (possible in either unified or
14971 non-unified ARM syntax mode). */
14972 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
14975 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
14981 /* Look for unaffixed or special-case affixed mnemonic. */
14982 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
14986 if (opcode
->tag
< OT_odd_infix_0
)
14988 inst
.cond
= COND_ALWAYS
;
14992 if (warn_on_deprecated
&& unified_syntax
)
14993 as_warn (_("conditional infixes are deprecated in unified syntax"));
14994 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
14995 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14998 inst
.cond
= cond
->value
;
15002 /* Cannot have a conditional suffix on a mnemonic of less than two
15004 if (end
- base
< 3)
15007 /* Look for suffixed mnemonic. */
15009 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
15010 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
15011 if (opcode
&& cond
)
15014 switch (opcode
->tag
)
15016 case OT_cinfix3_legacy
:
15017 /* Ignore conditional suffixes matched on infix only mnemonics. */
15021 case OT_cinfix3_deprecated
:
15022 case OT_odd_infix_unc
:
15023 if (!unified_syntax
)
15025 /* else fall through */
15029 case OT_csuf_or_in3
:
15030 inst
.cond
= cond
->value
;
15033 case OT_unconditional
:
15034 case OT_unconditionalF
:
15036 inst
.cond
= cond
->value
;
15039 /* Delayed diagnostic. */
15040 inst
.error
= BAD_COND
;
15041 inst
.cond
= COND_ALWAYS
;
15050 /* Cannot have a usual-position infix on a mnemonic of less than
15051 six characters (five would be a suffix). */
15052 if (end
- base
< 6)
15055 /* Look for infixed mnemonic in the usual position. */
15057 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
15061 memcpy (save
, affix
, 2);
15062 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
15063 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
15064 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
15065 memcpy (affix
, save
, 2);
15068 && (opcode
->tag
== OT_cinfix3
15069 || opcode
->tag
== OT_cinfix3_deprecated
15070 || opcode
->tag
== OT_csuf_or_in3
15071 || opcode
->tag
== OT_cinfix3_legacy
))
15074 if (warn_on_deprecated
&& unified_syntax
15075 && (opcode
->tag
== OT_cinfix3
15076 || opcode
->tag
== OT_cinfix3_deprecated
))
15077 as_warn (_("conditional infixes are deprecated in unified syntax"));
15079 inst
.cond
= cond
->value
;
15086 /* This function generates an initial IT instruction, leaving its block
15087 virtually open for the new instructions. Eventually,
15088 the mask will be updated by now_it_add_mask () each time
15089 a new instruction needs to be included in the IT block.
15090 Finally, the block is closed with close_automatic_it_block ().
15091 The block closure can be requested either from md_assemble (),
15092 a tencode (), or due to a label hook. */
15095 new_automatic_it_block (int cond
)
15097 now_it
.state
= AUTOMATIC_IT_BLOCK
;
15098 now_it
.mask
= 0x18;
15100 now_it
.block_length
= 1;
15101 mapping_state (MAP_THUMB
);
15102 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
15105 /* Close an automatic IT block.
15106 See comments in new_automatic_it_block (). */
15109 close_automatic_it_block (void)
15111 now_it
.mask
= 0x10;
15112 now_it
.block_length
= 0;
15115 /* Update the mask of the current automatically-generated IT
15116 instruction. See comments in new_automatic_it_block (). */
15119 now_it_add_mask (int cond
)
15121 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15122 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15123 | ((bitvalue) << (nbit)))
15124 const int resulting_bit
= (cond
& 1);
15126 now_it
.mask
&= 0xf;
15127 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15129 (5 - now_it
.block_length
));
15130 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15132 ((5 - now_it
.block_length
) - 1) );
15133 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
15136 #undef SET_BIT_VALUE
15139 /* The IT blocks handling machinery is accessed through the these functions:
15140 it_fsm_pre_encode () from md_assemble ()
15141 set_it_insn_type () optional, from the tencode functions
15142 set_it_insn_type_last () ditto
15143 in_it_block () ditto
15144 it_fsm_post_encode () from md_assemble ()
15145 force_automatic_it_block_close () from label habdling functions
15148 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15149 initializing the IT insn type with a generic initial value depending
15150 on the inst.condition.
15151 2) During the tencode function, two things may happen:
15152 a) The tencode function overrides the IT insn type by
15153 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15154 b) The tencode function queries the IT block state by
15155 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15157 Both set_it_insn_type and in_it_block run the internal FSM state
15158 handling function (handle_it_state), because: a) setting the IT insn
15159 type may incur in an invalid state (exiting the function),
15160 and b) querying the state requires the FSM to be updated.
15161 Specifically we want to avoid creating an IT block for conditional
15162 branches, so it_fsm_pre_encode is actually a guess and we can't
15163 determine whether an IT block is required until the tencode () routine
15164 has decided what type of instruction this actually it.
15165 Because of this, if set_it_insn_type and in_it_block have to be used,
15166 set_it_insn_type has to be called first.
15168 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15169 determines the insn IT type depending on the inst.cond code.
15170 When a tencode () routine encodes an instruction that can be
15171 either outside an IT block, or, in the case of being inside, has to be
15172 the last one, set_it_insn_type_last () will determine the proper
15173 IT instruction type based on the inst.cond code. Otherwise,
15174 set_it_insn_type can be called for overriding that logic or
15175 for covering other cases.
15177 Calling handle_it_state () may not transition the IT block state to
15178 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15179 still queried. Instead, if the FSM determines that the state should
15180 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15181 after the tencode () function: that's what it_fsm_post_encode () does.
15183 Since in_it_block () calls the state handling function to get an
15184 updated state, an error may occur (due to invalid insns combination).
15185 In that case, inst.error is set.
15186 Therefore, inst.error has to be checked after the execution of
15187 the tencode () routine.
15189 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15190 any pending state change (if any) that didn't take place in
15191 handle_it_state () as explained above. */
15194 it_fsm_pre_encode (void)
15196 if (inst
.cond
!= COND_ALWAYS
)
15197 inst
.it_insn_type
= INSIDE_IT_INSN
;
15199 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
15201 now_it
.state_handled
= 0;
15204 /* IT state FSM handling function. */
15207 handle_it_state (void)
15209 now_it
.state_handled
= 1;
15211 switch (now_it
.state
)
15213 case OUTSIDE_IT_BLOCK
:
15214 switch (inst
.it_insn_type
)
15216 case OUTSIDE_IT_INSN
:
15219 case INSIDE_IT_INSN
:
15220 case INSIDE_IT_LAST_INSN
:
15221 if (thumb_mode
== 0)
15224 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
15225 as_tsktsk (_("Warning: conditional outside an IT block"\
15230 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
15231 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
15233 /* Automatically generate the IT instruction. */
15234 new_automatic_it_block (inst
.cond
);
15235 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
15236 close_automatic_it_block ();
15240 inst
.error
= BAD_OUT_IT
;
15246 case IF_INSIDE_IT_LAST_INSN
:
15247 case NEUTRAL_IT_INSN
:
15251 now_it
.state
= MANUAL_IT_BLOCK
;
15252 now_it
.block_length
= 0;
15257 case AUTOMATIC_IT_BLOCK
:
15258 /* Three things may happen now:
15259 a) We should increment current it block size;
15260 b) We should close current it block (closing insn or 4 insns);
15261 c) We should close current it block and start a new one (due
15262 to incompatible conditions or
15263 4 insns-length block reached). */
15265 switch (inst
.it_insn_type
)
15267 case OUTSIDE_IT_INSN
:
15268 /* The closure of the block shall happen immediatelly,
15269 so any in_it_block () call reports the block as closed. */
15270 force_automatic_it_block_close ();
15273 case INSIDE_IT_INSN
:
15274 case INSIDE_IT_LAST_INSN
:
15275 case IF_INSIDE_IT_LAST_INSN
:
15276 now_it
.block_length
++;
15278 if (now_it
.block_length
> 4
15279 || !now_it_compatible (inst
.cond
))
15281 force_automatic_it_block_close ();
15282 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
15283 new_automatic_it_block (inst
.cond
);
15287 now_it_add_mask (inst
.cond
);
15290 if (now_it
.state
== AUTOMATIC_IT_BLOCK
15291 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
15292 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
15293 close_automatic_it_block ();
15296 case NEUTRAL_IT_INSN
:
15297 now_it
.block_length
++;
15299 if (now_it
.block_length
> 4)
15300 force_automatic_it_block_close ();
15302 now_it_add_mask (now_it
.cc
& 1);
15306 close_automatic_it_block ();
15307 now_it
.state
= MANUAL_IT_BLOCK
;
15312 case MANUAL_IT_BLOCK
:
15314 /* Check conditional suffixes. */
15315 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
15318 now_it
.mask
&= 0x1f;
15319 is_last
= (now_it
.mask
== 0x10);
15321 switch (inst
.it_insn_type
)
15323 case OUTSIDE_IT_INSN
:
15324 inst
.error
= BAD_NOT_IT
;
15327 case INSIDE_IT_INSN
:
15328 if (cond
!= inst
.cond
)
15330 inst
.error
= BAD_IT_COND
;
15335 case INSIDE_IT_LAST_INSN
:
15336 case IF_INSIDE_IT_LAST_INSN
:
15337 if (cond
!= inst
.cond
)
15339 inst
.error
= BAD_IT_COND
;
15344 inst
.error
= BAD_BRANCH
;
15349 case NEUTRAL_IT_INSN
:
15350 /* The BKPT instruction is unconditional even in an IT block. */
15354 inst
.error
= BAD_IT_IT
;
15365 it_fsm_post_encode (void)
15369 if (!now_it
.state_handled
)
15370 handle_it_state ();
15372 is_last
= (now_it
.mask
== 0x10);
15375 now_it
.state
= OUTSIDE_IT_BLOCK
;
15381 force_automatic_it_block_close (void)
15383 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
15385 close_automatic_it_block ();
15386 now_it
.state
= OUTSIDE_IT_BLOCK
;
15394 if (!now_it
.state_handled
)
15395 handle_it_state ();
15397 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
15401 md_assemble (char *str
)
15404 const struct asm_opcode
* opcode
;
15406 /* Align the previous label if needed. */
15407 if (last_label_seen
!= NULL
)
15409 symbol_set_frag (last_label_seen
, frag_now
);
15410 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
15411 S_SET_SEGMENT (last_label_seen
, now_seg
);
15414 memset (&inst
, '\0', sizeof (inst
));
15415 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
15417 opcode
= opcode_lookup (&p
);
15420 /* It wasn't an instruction, but it might be a register alias of
15421 the form alias .req reg, or a Neon .dn/.qn directive. */
15422 if (! create_register_alias (str
, p
)
15423 && ! create_neon_reg_alias (str
, p
))
15424 as_bad (_("bad instruction `%s'"), str
);
15429 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
15430 as_warn (_("s suffix on comparison instruction is deprecated"));
15432 /* The value which unconditional instructions should have in place of the
15433 condition field. */
15434 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
15438 arm_feature_set variant
;
15440 variant
= cpu_variant
;
15441 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15442 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
15443 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
15444 /* Check that this instruction is supported for this CPU. */
15445 if (!opcode
->tvariant
15446 || (thumb_mode
== 1
15447 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
15449 as_bad (_("selected processor does not support `%s'"), str
);
15452 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
15453 && opcode
->tencode
!= do_t_branch
)
15455 as_bad (_("Thumb does not support conditional execution"));
15459 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
15461 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
15462 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
15463 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
15465 /* Two things are addressed here.
15466 1) Implicit require narrow instructions on Thumb-1.
15467 This avoids relaxation accidentally introducing Thumb-2
15469 2) Reject wide instructions in non Thumb-2 cores. */
15470 if (inst
.size_req
== 0)
15472 else if (inst
.size_req
== 4)
15474 as_bad (_("selected processor does not support `%s'"), str
);
15480 inst
.instruction
= opcode
->tvalue
;
15482 if (!parse_operands (p
, opcode
->operands
))
15484 /* Prepare the it_insn_type for those encodings that don't set
15486 it_fsm_pre_encode ();
15488 opcode
->tencode ();
15490 it_fsm_post_encode ();
15493 if (!(inst
.error
|| inst
.relax
))
15495 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
15496 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
15497 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
15499 as_bad (_("cannot honor width suffix -- `%s'"), str
);
15504 /* Something has gone badly wrong if we try to relax a fixed size
15506 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
15508 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15509 *opcode
->tvariant
);
15510 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15511 set those bits when Thumb-2 32-bit instructions are seen. ie.
15512 anything other than bl/blx and v6-M instructions.
15513 This is overly pessimistic for relaxable instructions. */
15514 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
15516 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
15517 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
15518 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15522 mapping_state (MAP_THUMB
);
15524 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
15528 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15529 is_bx
= (opcode
->aencode
== do_bx
);
15531 /* Check that this instruction is supported for this CPU. */
15532 if (!(is_bx
&& fix_v4bx
)
15533 && !(opcode
->avariant
&&
15534 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
15536 as_bad (_("selected processor does not support `%s'"), str
);
15541 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
15545 inst
.instruction
= opcode
->avalue
;
15546 if (opcode
->tag
== OT_unconditionalF
)
15547 inst
.instruction
|= 0xF << 28;
15549 inst
.instruction
|= inst
.cond
<< 28;
15550 inst
.size
= INSN_SIZE
;
15551 if (!parse_operands (p
, opcode
->operands
))
15553 it_fsm_pre_encode ();
15554 opcode
->aencode ();
15555 it_fsm_post_encode ();
15557 /* Arm mode bx is marked as both v4T and v5 because it's still required
15558 on a hypothetical non-thumb v5 core. */
15560 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
15562 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
15563 *opcode
->avariant
);
15565 mapping_state (MAP_ARM
);
15569 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15577 check_it_blocks_finished (void)
15582 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
15583 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
15584 == MANUAL_IT_BLOCK
)
15586 as_warn (_("section '%s' finished with an open IT block."),
15590 if (now_it
.state
== MANUAL_IT_BLOCK
)
15591 as_warn (_("file finished with an open IT block."));
15595 /* Various frobbings of labels and their addresses. */
15598 arm_start_line_hook (void)
15600 last_label_seen
= NULL
;
15604 arm_frob_label (symbolS
* sym
)
15606 last_label_seen
= sym
;
15608 ARM_SET_THUMB (sym
, thumb_mode
);
15610 #if defined OBJ_COFF || defined OBJ_ELF
15611 ARM_SET_INTERWORK (sym
, support_interwork
);
15614 force_automatic_it_block_close ();
15616 /* Note - do not allow local symbols (.Lxxx) to be labelled
15617 as Thumb functions. This is because these labels, whilst
15618 they exist inside Thumb code, are not the entry points for
15619 possible ARM->Thumb calls. Also, these labels can be used
15620 as part of a computed goto or switch statement. eg gcc
15621 can generate code that looks like this:
15623 ldr r2, [pc, .Laaa]
15633 The first instruction loads the address of the jump table.
15634 The second instruction converts a table index into a byte offset.
15635 The third instruction gets the jump address out of the table.
15636 The fourth instruction performs the jump.
15638 If the address stored at .Laaa is that of a symbol which has the
15639 Thumb_Func bit set, then the linker will arrange for this address
15640 to have the bottom bit set, which in turn would mean that the
15641 address computation performed by the third instruction would end
15642 up with the bottom bit set. Since the ARM is capable of unaligned
15643 word loads, the instruction would then load the incorrect address
15644 out of the jump table, and chaos would ensue. */
15645 if (label_is_thumb_function_name
15646 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
15647 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
15649 /* When the address of a Thumb function is taken the bottom
15650 bit of that address should be set. This will allow
15651 interworking between Arm and Thumb functions to work
15654 THUMB_SET_FUNC (sym
, 1);
15656 label_is_thumb_function_name
= FALSE
;
15659 dwarf2_emit_label (sym
);
15663 arm_data_in_code (void)
15665 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
15667 *input_line_pointer
= '/';
15668 input_line_pointer
+= 5;
15669 *input_line_pointer
= 0;
15677 arm_canonicalize_symbol_name (char * name
)
15681 if (thumb_mode
&& (len
= strlen (name
)) > 5
15682 && streq (name
+ len
- 5, "/data"))
15683 *(name
+ len
- 5) = 0;
15688 /* Table of all register names defined by default. The user can
15689 define additional names with .req. Note that all register names
15690 should appear in both upper and lowercase variants. Some registers
15691 also have mixed-case names. */
15693 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15694 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15695 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15696 #define REGSET(p,t) \
15697 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15698 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15699 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15700 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15701 #define REGSETH(p,t) \
15702 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15703 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15704 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15705 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15706 #define REGSET2(p,t) \
15707 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15708 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15709 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15710 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15712 static const struct reg_entry reg_names
[] =
15714 /* ARM integer registers. */
15715 REGSET(r
, RN
), REGSET(R
, RN
),
15717 /* ATPCS synonyms. */
15718 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
15719 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
15720 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
15722 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
15723 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
15724 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
15726 /* Well-known aliases. */
15727 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
15728 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
15730 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
15731 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
15733 /* Coprocessor numbers. */
15734 REGSET(p
, CP
), REGSET(P
, CP
),
15736 /* Coprocessor register numbers. The "cr" variants are for backward
15738 REGSET(c
, CN
), REGSET(C
, CN
),
15739 REGSET(cr
, CN
), REGSET(CR
, CN
),
15741 /* FPA registers. */
15742 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
15743 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
15745 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
15746 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
15748 /* VFP SP registers. */
15749 REGSET(s
,VFS
), REGSET(S
,VFS
),
15750 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
15752 /* VFP DP Registers. */
15753 REGSET(d
,VFD
), REGSET(D
,VFD
),
15754 /* Extra Neon DP registers. */
15755 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
15757 /* Neon QP registers. */
15758 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
15760 /* VFP control registers. */
15761 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
15762 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
15763 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
15764 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
15765 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
15766 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
15768 /* Maverick DSP coprocessor registers. */
15769 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
15770 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
15772 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
15773 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
15774 REGDEF(dspsc
,0,DSPSC
),
15776 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
15777 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
15778 REGDEF(DSPSC
,0,DSPSC
),
15780 /* iWMMXt data registers - p0, c0-15. */
15781 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
15783 /* iWMMXt control registers - p1, c0-3. */
15784 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
15785 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
15786 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
15787 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
15789 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15790 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
15791 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
15792 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
15793 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
15795 /* XScale accumulator registers. */
15796 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
15802 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15803 within psr_required_here. */
15804 static const struct asm_psr psrs
[] =
15806 /* Backward compatibility notation. Note that "all" is no longer
15807 truly all possible PSR bits. */
15808 {"all", PSR_c
| PSR_f
},
15812 /* Individual flags. */
15817 /* Combinations of flags. */
15818 {"fs", PSR_f
| PSR_s
},
15819 {"fx", PSR_f
| PSR_x
},
15820 {"fc", PSR_f
| PSR_c
},
15821 {"sf", PSR_s
| PSR_f
},
15822 {"sx", PSR_s
| PSR_x
},
15823 {"sc", PSR_s
| PSR_c
},
15824 {"xf", PSR_x
| PSR_f
},
15825 {"xs", PSR_x
| PSR_s
},
15826 {"xc", PSR_x
| PSR_c
},
15827 {"cf", PSR_c
| PSR_f
},
15828 {"cs", PSR_c
| PSR_s
},
15829 {"cx", PSR_c
| PSR_x
},
15830 {"fsx", PSR_f
| PSR_s
| PSR_x
},
15831 {"fsc", PSR_f
| PSR_s
| PSR_c
},
15832 {"fxs", PSR_f
| PSR_x
| PSR_s
},
15833 {"fxc", PSR_f
| PSR_x
| PSR_c
},
15834 {"fcs", PSR_f
| PSR_c
| PSR_s
},
15835 {"fcx", PSR_f
| PSR_c
| PSR_x
},
15836 {"sfx", PSR_s
| PSR_f
| PSR_x
},
15837 {"sfc", PSR_s
| PSR_f
| PSR_c
},
15838 {"sxf", PSR_s
| PSR_x
| PSR_f
},
15839 {"sxc", PSR_s
| PSR_x
| PSR_c
},
15840 {"scf", PSR_s
| PSR_c
| PSR_f
},
15841 {"scx", PSR_s
| PSR_c
| PSR_x
},
15842 {"xfs", PSR_x
| PSR_f
| PSR_s
},
15843 {"xfc", PSR_x
| PSR_f
| PSR_c
},
15844 {"xsf", PSR_x
| PSR_s
| PSR_f
},
15845 {"xsc", PSR_x
| PSR_s
| PSR_c
},
15846 {"xcf", PSR_x
| PSR_c
| PSR_f
},
15847 {"xcs", PSR_x
| PSR_c
| PSR_s
},
15848 {"cfs", PSR_c
| PSR_f
| PSR_s
},
15849 {"cfx", PSR_c
| PSR_f
| PSR_x
},
15850 {"csf", PSR_c
| PSR_s
| PSR_f
},
15851 {"csx", PSR_c
| PSR_s
| PSR_x
},
15852 {"cxf", PSR_c
| PSR_x
| PSR_f
},
15853 {"cxs", PSR_c
| PSR_x
| PSR_s
},
15854 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
15855 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
15856 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
15857 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
15858 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
15859 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
15860 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
15861 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
15862 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
15863 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
15864 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
15865 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
15866 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
15867 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
15868 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
15869 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
15870 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
15871 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
15872 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
15873 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
15874 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
15875 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
15876 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
15877 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
15880 /* Table of V7M psr names. */
15881 static const struct asm_psr v7m_psrs
[] =
15883 {"apsr", 0 }, {"APSR", 0 },
15884 {"iapsr", 1 }, {"IAPSR", 1 },
15885 {"eapsr", 2 }, {"EAPSR", 2 },
15886 {"psr", 3 }, {"PSR", 3 },
15887 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15888 {"ipsr", 5 }, {"IPSR", 5 },
15889 {"epsr", 6 }, {"EPSR", 6 },
15890 {"iepsr", 7 }, {"IEPSR", 7 },
15891 {"msp", 8 }, {"MSP", 8 },
15892 {"psp", 9 }, {"PSP", 9 },
15893 {"primask", 16}, {"PRIMASK", 16},
15894 {"basepri", 17}, {"BASEPRI", 17},
15895 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15896 {"faultmask", 19}, {"FAULTMASK", 19},
15897 {"control", 20}, {"CONTROL", 20}
15900 /* Table of all shift-in-operand names. */
15901 static const struct asm_shift_name shift_names
[] =
15903 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
15904 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
15905 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
15906 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
15907 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
15908 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
15911 /* Table of all explicit relocation names. */
15913 static struct reloc_entry reloc_names
[] =
15915 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
15916 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
15917 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
15918 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
15919 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
15920 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
15921 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
15922 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
15923 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
15924 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
15925 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
15929 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15930 static const struct asm_cond conds
[] =
15934 {"cs", 0x2}, {"hs", 0x2},
15935 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15949 static struct asm_barrier_opt barrier_opt_names
[] =
15957 /* Table of ARM-format instructions. */
15959 /* Macros for gluing together operand strings. N.B. In all cases
15960 other than OPS0, the trailing OP_stop comes from default
15961 zero-initialization of the unspecified elements of the array. */
15962 #define OPS0() { OP_stop, }
15963 #define OPS1(a) { OP_##a, }
15964 #define OPS2(a,b) { OP_##a,OP_##b, }
15965 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15966 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15967 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15968 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15970 /* These macros abstract out the exact format of the mnemonic table and
15971 save some repeated characters. */
15973 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15974 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15975 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15976 THUMB_VARIANT, do_##ae, do_##te }
15978 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15979 a T_MNEM_xyz enumerator. */
15980 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15981 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
15982 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15983 TxCE (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15985 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
15986 infix after the third character. */
15987 #define TxC3(mnem, op, top, nops, ops, ae, te) \
15988 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
15989 THUMB_VARIANT, do_##ae, do_##te }
15990 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
15991 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
15992 THUMB_VARIANT, do_##ae, do_##te }
15993 #define TC3(mnem, aop, top, nops, ops, ae, te) \
15994 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
15995 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
15996 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
15997 #define tC3(mnem, aop, top, nops, ops, ae, te) \
15998 TxC3 (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15999 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16000 TxC3w (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
16002 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16003 appear in the condition table. */
16004 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16005 { #m1 #m2 #m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16006 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16008 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16009 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16010 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16011 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16012 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16013 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16014 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16015 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16016 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16017 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16018 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16019 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16020 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16021 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16022 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16023 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16024 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16025 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16026 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16027 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16029 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16030 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16031 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16032 TxCM (m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
16034 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16035 field is still 0xE. Many of the Thumb variants can be executed
16036 conditionally, so this is checked separately. */
16037 #define TUE(mnem, op, top, nops, ops, ae, te) \
16038 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16039 THUMB_VARIANT, do_##ae, do_##te }
16041 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16042 condition code field. */
16043 #define TUF(mnem, op, top, nops, ops, ae, te) \
16044 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16045 THUMB_VARIANT, do_##ae, do_##te }
16047 /* ARM-only variants of all the above. */
16048 #define CE(mnem, op, nops, ops, ae) \
16049 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16051 #define C3(mnem, op, nops, ops, ae) \
16052 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16054 /* Legacy mnemonics that always have conditional infix after the third
16056 #define CL(mnem, op, nops, ops, ae) \
16057 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16058 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16060 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16061 #define cCE(mnem, op, nops, ops, ae) \
16062 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16064 /* Legacy coprocessor instructions where conditional infix and conditional
16065 suffix are ambiguous. For consistency this includes all FPA instructions,
16066 not just the potentially ambiguous ones. */
16067 #define cCL(mnem, op, nops, ops, ae) \
16068 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16069 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16071 /* Coprocessor, takes either a suffix or a position-3 infix
16072 (for an FPA corner case). */
16073 #define C3E(mnem, op, nops, ops, ae) \
16074 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
16075 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16077 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16078 { #m1 #m2 #m3, OPS##nops ops, \
16079 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16080 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16082 #define CM(m1, m2, op, nops, ops, ae) \
16083 xCM_ (m1, , m2, op, nops, ops, ae), \
16084 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16085 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16086 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16087 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16088 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16089 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16090 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16091 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16092 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16093 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16094 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16095 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16096 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16097 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16098 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16099 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16100 xCM_ (m1, le, m2, op, nops, ops, ae), \
16101 xCM_ (m1, al, m2, op, nops, ops, ae)
16103 #define UE(mnem, op, nops, ops, ae) \
16104 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16106 #define UF(mnem, op, nops, ops, ae) \
16107 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16109 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16110 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16111 use the same encoding function for each. */
16112 #define NUF(mnem, op, nops, ops, enc) \
16113 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16114 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16116 /* Neon data processing, version which indirects through neon_enc_tab for
16117 the various overloaded versions of opcodes. */
16118 #define nUF(mnem, op, nops, ops, enc) \
16119 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
16120 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16122 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16124 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16125 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16126 THUMB_VARIANT, do_##enc, do_##enc }
16128 #define NCE(mnem, op, nops, ops, enc) \
16129 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16131 #define NCEF(mnem, op, nops, ops, enc) \
16132 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16134 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16135 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16136 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
16137 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16139 #define nCE(mnem, op, nops, ops, enc) \
16140 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16142 #define nCEF(mnem, op, nops, ops, enc) \
16143 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16147 /* Thumb-only, unconditional. */
16148 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16150 static const struct asm_opcode insns
[] =
16152 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16153 #define THUMB_VARIANT &arm_ext_v4t
16154 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16155 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16156 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16157 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16158 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16159 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16160 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16161 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16162 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16163 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16164 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16165 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16166 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16167 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16168 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16169 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16171 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16172 for setting PSR flag bits. They are obsolete in V6 and do not
16173 have Thumb equivalents. */
16174 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16175 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16176 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
16177 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16178 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16179 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
16180 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16181 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16182 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
16184 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16185 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16186 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16187 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16189 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16190 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16191 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16192 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16194 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16195 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16196 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16197 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16198 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16199 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16201 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16202 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16203 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
16204 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
16207 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
16208 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
16209 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
16211 /* Thumb-compatibility pseudo ops. */
16212 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16213 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16214 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16215 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16216 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16217 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16218 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16219 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16220 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
16221 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
16222 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
16223 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
16225 /* These may simplify to neg. */
16226 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16227 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16229 #undef THUMB_VARIANT
16230 #define THUMB_VARIANT & arm_ext_v6
16232 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
16234 /* V1 instructions with no Thumb analogue prior to V6T2. */
16235 #undef THUMB_VARIANT
16236 #define THUMB_VARIANT & arm_ext_v6t2
16238 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16239 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16240 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
16242 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16243 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16244 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16245 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16247 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16248 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16250 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16251 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16253 /* V1 instructions with no Thumb analogue at all. */
16254 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
16255 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
16257 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16258 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16259 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16260 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16261 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16262 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16263 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16264 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16267 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16268 #undef THUMB_VARIANT
16269 #define THUMB_VARIANT & arm_ext_v4t
16271 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16272 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16274 #undef THUMB_VARIANT
16275 #define THUMB_VARIANT & arm_ext_v6t2
16277 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16278 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
16280 /* Generic coprocessor instructions. */
16281 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16282 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16283 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16284 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16285 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16286 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16287 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16290 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16292 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16293 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16296 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16297 #undef THUMB_VARIANT
16298 #define THUMB_VARIANT & arm_ext_msr
16300 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
16301 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
16304 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16305 #undef THUMB_VARIANT
16306 #define THUMB_VARIANT & arm_ext_v6t2
16308 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16309 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16310 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16311 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16312 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16313 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16314 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16315 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16318 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16319 #undef THUMB_VARIANT
16320 #define THUMB_VARIANT & arm_ext_v4t
16322 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16323 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16324 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16325 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16326 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16327 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16330 #define ARM_VARIANT & arm_ext_v4t_5
16332 /* ARM Architecture 4T. */
16333 /* Note: bx (and blx) are required on V5, even if the processor does
16334 not support Thumb. */
16335 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
16338 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16339 #undef THUMB_VARIANT
16340 #define THUMB_VARIANT & arm_ext_v5t
16342 /* Note: blx has 2 variants; the .value coded here is for
16343 BLX(2). Only this variant has conditional execution. */
16344 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
16345 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
16347 #undef THUMB_VARIANT
16348 #define THUMB_VARIANT & arm_ext_v6t2
16350 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
16351 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16352 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16353 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16354 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16355 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16356 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16357 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16360 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16362 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16363 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16364 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16365 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16367 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16368 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16370 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16371 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16372 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16373 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16375 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16376 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16377 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16378 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16380 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16381 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16383 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16384 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16385 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16386 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16389 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16391 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
16392 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
16393 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
16395 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16396 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16399 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16401 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
16404 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16405 #undef THUMB_VARIANT
16406 #define THUMB_VARIANT & arm_ext_v6
16408 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16409 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16410 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16411 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16412 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16413 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16414 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16415 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16416 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16417 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
16419 #undef THUMB_VARIANT
16420 #define THUMB_VARIANT & arm_ext_v6t2
16422 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
16423 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
16424 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16425 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16427 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
16428 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
16430 /* ARM V6 not included in V7M (eg. integer SIMD). */
16431 #undef THUMB_VARIANT
16432 #define THUMB_VARIANT & arm_ext_v6_notm
16434 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
16435 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
16436 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
16437 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16438 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16439 TCE(qasx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16440 /* Old name for QASX. */
16441 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16442 TCE(qsax
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16443 /* Old name for QSAX. */
16444 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16445 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16446 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16447 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16448 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16449 TCE(sasx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16450 /* Old name for SASX. */
16451 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16452 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16453 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16454 TCE(shasx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16455 /* Old name for SHASX. */
16456 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16457 TCE(shsax
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16458 /* Old name for SHSAX. */
16459 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16460 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16461 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16462 TCE(ssax
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16463 /* Old name for SSAX. */
16464 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16465 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16466 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16467 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16468 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16469 TCE(uasx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16470 /* Old name for UASX. */
16471 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16472 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16473 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16474 TCE(uhasx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16475 /* Old name for UHASX. */
16476 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16477 TCE(uhsax
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16478 /* Old name for UHSAX. */
16479 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16480 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16481 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16482 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16483 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16484 TCE(uqasx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16485 /* Old name for UQASX. */
16486 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16487 TCE(uqsax
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16488 /* Old name for UQSAX. */
16489 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16490 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16491 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16492 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16493 TCE(usax
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16494 /* Old name for USAX. */
16495 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16496 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16497 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16498 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
16499 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
16500 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16501 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16502 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
16503 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
16504 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16505 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16506 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16507 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16508 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16509 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16510 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16511 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16512 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16513 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16514 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16515 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16516 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16517 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16518 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16519 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16520 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16521 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16522 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16523 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16524 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16525 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16526 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16527 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16528 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16529 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16530 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16531 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16532 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
16533 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
16534 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
16535 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
16536 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
16537 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
16538 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16539 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16540 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
16543 #define ARM_VARIANT & arm_ext_v6k
16544 #undef THUMB_VARIANT
16545 #define THUMB_VARIANT & arm_ext_v6k
16547 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
16548 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
16549 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
16550 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
16552 #undef THUMB_VARIANT
16553 #define THUMB_VARIANT & arm_ext_v6_notm
16555 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
16556 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
16558 #undef THUMB_VARIANT
16559 #define THUMB_VARIANT & arm_ext_v6t2
16561 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
16562 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
16563 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
16564 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
16565 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
16568 #define ARM_VARIANT & arm_ext_v6z
16570 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
16573 #define ARM_VARIANT & arm_ext_v6t2
16575 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
16576 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
16577 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16578 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16580 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16581 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16582 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16583 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
16585 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16586 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16587 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16588 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16590 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
16591 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
16593 /* ARM does not really have an IT instruction, so always allow it.
16594 The opcode is copied from Thumb in order to allow warnings in
16595 -mimplicit-it=[never | arm] modes. */
16597 #define ARM_VARIANT & arm_ext_v1
16599 TUE(it
, bf08
, bf08
, 1, (COND
), it
, t_it
),
16600 TUE(itt
, bf0c
, bf0c
, 1, (COND
), it
, t_it
),
16601 TUE(ite
, bf04
, bf04
, 1, (COND
), it
, t_it
),
16602 TUE(ittt
, bf0e
, bf0e
, 1, (COND
), it
, t_it
),
16603 TUE(itet
, bf06
, bf06
, 1, (COND
), it
, t_it
),
16604 TUE(itte
, bf0a
, bf0a
, 1, (COND
), it
, t_it
),
16605 TUE(itee
, bf02
, bf02
, 1, (COND
), it
, t_it
),
16606 TUE(itttt
, bf0f
, bf0f
, 1, (COND
), it
, t_it
),
16607 TUE(itett
, bf07
, bf07
, 1, (COND
), it
, t_it
),
16608 TUE(ittet
, bf0b
, bf0b
, 1, (COND
), it
, t_it
),
16609 TUE(iteet
, bf03
, bf03
, 1, (COND
), it
, t_it
),
16610 TUE(ittte
, bf0d
, bf0d
, 1, (COND
), it
, t_it
),
16611 TUE(itete
, bf05
, bf05
, 1, (COND
), it
, t_it
),
16612 TUE(ittee
, bf09
, bf09
, 1, (COND
), it
, t_it
),
16613 TUE(iteee
, bf01
, bf01
, 1, (COND
), it
, t_it
),
16614 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16615 TC3(rrx
, 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16616 TC3(rrxs
, 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16618 /* Thumb2 only instructions. */
16620 #define ARM_VARIANT NULL
16622 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16623 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16624 TCE(orn
, 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16625 TCE(orns
, 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16626 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
16627 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
16629 /* Thumb-2 hardware division instructions (R and M profiles only). */
16630 #undef THUMB_VARIANT
16631 #define THUMB_VARIANT & arm_ext_div
16633 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16634 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16636 /* ARM V6M/V7 instructions. */
16638 #define ARM_VARIANT & arm_ext_barrier
16639 #undef THUMB_VARIANT
16640 #define THUMB_VARIANT & arm_ext_barrier
16642 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
16643 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
16644 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
16646 /* ARM V7 instructions. */
16648 #define ARM_VARIANT & arm_ext_v7
16649 #undef THUMB_VARIANT
16650 #define THUMB_VARIANT & arm_ext_v7
16652 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
16653 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
16656 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16658 cCE(wfs
, e200110
, 1, (RR
), rd
),
16659 cCE(rfs
, e300110
, 1, (RR
), rd
),
16660 cCE(wfc
, e400110
, 1, (RR
), rd
),
16661 cCE(rfc
, e500110
, 1, (RR
), rd
),
16663 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16664 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16665 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16666 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16668 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16669 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16670 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16671 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16673 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
16674 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
16675 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
16676 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
16677 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
16678 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
16679 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
16680 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
16681 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
16682 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
16683 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
16684 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
16686 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
16687 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
16688 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
16689 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
16690 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
16691 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
16692 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
16693 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
16694 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
16695 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
16696 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
16697 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
16699 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
16700 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
16701 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
16702 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
16703 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
16704 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
16705 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
16706 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
16707 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
16708 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
16709 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
16710 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
16712 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
16713 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
16714 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
16715 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
16716 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
16717 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
16718 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
16719 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
16720 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
16721 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
16722 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
16723 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
16725 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
16726 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
16727 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
16728 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
16729 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
16730 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
16731 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
16732 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
16733 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
16734 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
16735 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
16736 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
16738 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
16739 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
16740 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
16741 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
16742 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
16743 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
16744 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
16745 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
16746 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
16747 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
16748 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
16749 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
16751 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
16752 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
16753 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
16754 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
16755 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
16756 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
16757 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
16758 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
16759 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
16760 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
16761 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
16762 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
16764 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
16765 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
16766 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
16767 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
16768 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
16769 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
16770 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
16771 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
16772 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
16773 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
16774 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
16775 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
16777 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
16778 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
16779 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
16780 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
16781 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
16782 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
16783 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
16784 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
16785 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
16786 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
16787 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
16788 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
16790 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
16791 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
16792 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
16793 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
16794 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
16795 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
16796 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
16797 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
16798 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
16799 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
16800 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
16801 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
16803 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
16804 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
16805 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
16806 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
16807 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
16808 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
16809 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
16810 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
16811 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
16812 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
16813 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
16814 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
16816 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
16817 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
16818 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
16819 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
16820 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
16821 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
16822 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
16823 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
16824 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
16825 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
16826 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
16827 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
16829 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
16830 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
16831 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
16832 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
16833 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
16834 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
16835 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
16836 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
16837 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
16838 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
16839 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
16840 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
16842 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
16843 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
16844 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
16845 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
16846 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
16847 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
16848 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
16849 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
16850 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
16851 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
16852 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
16853 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
16855 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
16856 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
16857 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
16858 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
16859 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
16860 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
16861 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
16862 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
16863 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
16864 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
16865 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
16866 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
16868 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
16869 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
16870 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
16871 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
16872 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
16873 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
16874 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
16875 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
16876 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
16877 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
16878 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
16879 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
16881 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16882 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16883 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16884 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16885 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16886 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16887 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16888 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16889 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16890 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16891 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16892 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16894 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16895 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16896 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16897 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16898 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16899 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16900 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16901 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16902 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16903 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16904 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16905 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16907 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16908 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16909 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16910 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16911 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16912 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16913 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16914 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16915 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16916 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16917 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16918 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16920 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16921 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16922 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16923 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16924 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16925 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16926 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16927 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16928 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16929 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16930 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16931 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16933 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16934 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16935 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16936 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16937 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16938 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16939 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16940 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16941 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16942 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16943 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16944 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16946 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16947 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16948 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16949 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16950 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16951 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16952 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16953 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16954 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16955 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16956 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16957 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16959 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16960 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16961 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16962 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16963 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16964 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16965 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16966 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16967 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16968 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16969 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16970 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16972 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16973 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16974 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16975 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16976 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16977 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16978 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16979 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16980 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16981 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16982 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16983 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16985 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16986 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16987 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16988 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16989 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16990 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16991 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16992 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16993 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16994 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16995 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16996 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16998 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16999 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17000 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17001 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17002 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17003 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17004 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17005 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17006 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17007 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17008 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17009 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17011 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17012 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17013 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17014 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17015 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17016 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17017 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17018 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17019 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17020 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17021 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17022 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17024 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17025 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17026 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17027 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17028 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17029 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17030 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17031 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17032 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17033 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17034 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17035 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17037 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17038 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17039 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17040 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17041 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17042 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17043 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17044 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17045 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17046 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17047 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17048 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17050 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17051 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17052 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17053 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17055 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
17056 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
17057 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
17058 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
17059 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
17060 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
17061 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
17062 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
17063 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
17064 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
17065 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
17066 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
17068 /* The implementation of the FIX instruction is broken on some
17069 assemblers, in that it accepts a precision specifier as well as a
17070 rounding specifier, despite the fact that this is meaningless.
17071 To be more compatible, we accept it as well, though of course it
17072 does not set any bits. */
17073 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
17074 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
17075 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
17076 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
17077 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
17078 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
17079 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
17080 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
17081 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
17082 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
17083 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
17084 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
17085 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
17087 /* Instructions that were new with the real FPA, call them V2. */
17089 #define ARM_VARIANT & fpu_fpa_ext_v2
17091 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17092 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17093 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17094 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17095 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17096 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17099 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17101 /* Moves and type conversions. */
17102 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17103 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
17104 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
17105 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
17106 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17107 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17108 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17109 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17110 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17111 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17112 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
17113 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
17115 /* Memory operations. */
17116 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17117 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17118 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17119 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17120 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17121 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17122 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17123 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17124 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17125 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17126 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17127 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17128 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17129 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17130 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17131 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17132 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17133 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17135 /* Monadic operations. */
17136 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17137 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17138 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17140 /* Dyadic operations. */
17141 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17142 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17143 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17144 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17145 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17146 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17147 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17148 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17149 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17152 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17153 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
17154 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17155 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
17158 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17160 /* Moves and type conversions. */
17161 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17162 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17163 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17164 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17165 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17166 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17167 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17168 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17169 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17170 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17171 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17172 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17173 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17175 /* Memory operations. */
17176 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17177 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17178 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17179 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17180 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17181 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17182 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17183 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17184 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17185 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17187 /* Monadic operations. */
17188 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17189 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17190 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17192 /* Dyadic operations. */
17193 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17194 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17195 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17196 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17197 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17198 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17199 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17200 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17201 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17204 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17205 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
17206 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17207 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
17210 #define ARM_VARIANT & fpu_vfp_ext_v2
17212 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
17213 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
17214 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
17215 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
17217 /* Instructions which may belong to either the Neon or VFP instruction sets.
17218 Individual encoder functions perform additional architecture checks. */
17220 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17221 #undef THUMB_VARIANT
17222 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17224 /* These mnemonics are unique to VFP. */
17225 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
17226 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
17227 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17228 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17229 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17230 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17231 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17232 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
17233 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
17234 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
17236 /* Mnemonics shared by Neon and VFP. */
17237 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
17238 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17239 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17241 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17242 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17244 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17245 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17247 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17248 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17249 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17250 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17251 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17252 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17253 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17254 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17256 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
17257 nCEF(vcvtb
, vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
17258 nCEF(vcvtt
, vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
17261 /* NOTE: All VMOV encoding is special-cased! */
17262 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
17263 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
17265 #undef THUMB_VARIANT
17266 #define THUMB_VARIANT & fpu_neon_ext_v1
17268 #define ARM_VARIANT & fpu_neon_ext_v1
17270 /* Data processing with three registers of the same length. */
17271 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17272 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
17273 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
17274 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17275 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17276 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17277 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17278 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17279 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17280 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17281 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17282 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17283 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17284 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17285 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17286 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17287 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17288 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17289 /* If not immediate, fall back to neon_dyadic_i64_su.
17290 shl_imm should accept I8 I16 I32 I64,
17291 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17292 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
17293 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
17294 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
17295 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
17296 /* Logic ops, types optional & ignored. */
17297 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
17298 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
17299 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
17300 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
17301 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
17302 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
17303 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
17304 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
17305 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
17306 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
17307 /* Bitfield ops, untyped. */
17308 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17309 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17310 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17311 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17312 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17313 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17314 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17315 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17316 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17317 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17318 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17319 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17320 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17321 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17322 back to neon_dyadic_if_su. */
17323 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17324 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17325 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17326 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17327 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17328 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17329 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17330 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17331 /* Comparison. Type I8 I16 I32 F32. */
17332 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
17333 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
17334 /* As above, D registers only. */
17335 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17336 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17337 /* Int and float variants, signedness unimportant. */
17338 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17339 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17340 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
17341 /* Add/sub take types I8 I16 I32 I64 F32. */
17342 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17343 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17344 /* vtst takes sizes 8, 16, 32. */
17345 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
17346 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
17347 /* VMUL takes I8 I16 I32 F32 P8. */
17348 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
17349 /* VQD{R}MULH takes S16 S32. */
17350 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17351 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17352 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17353 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17354 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17355 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17356 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17357 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17358 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17359 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17360 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17361 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17362 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17363 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17364 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17365 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17367 /* Two address, int/float. Types S8 S16 S32 F32. */
17368 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17369 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17371 /* Data processing with two registers and a shift amount. */
17372 /* Right shifts, and variants with rounding.
17373 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17374 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17375 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17376 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17377 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17378 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17379 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17380 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17381 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17382 /* Shift and insert. Sizes accepted 8 16 32 64. */
17383 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
17384 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
17385 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
17386 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
17387 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17388 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
17389 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
17390 /* Right shift immediate, saturating & narrowing, with rounding variants.
17391 Types accepted S16 S32 S64 U16 U32 U64. */
17392 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17393 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17394 /* As above, unsigned. Types accepted S16 S32 S64. */
17395 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17396 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17397 /* Right shift narrowing. Types accepted I16 I32 I64. */
17398 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17399 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17400 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17401 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
17402 /* CVT with optional immediate for fixed-point variant. */
17403 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
17405 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
17406 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
17408 /* Data processing, three registers of different lengths. */
17409 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17410 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
17411 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17412 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17413 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17414 /* If not scalar, fall back to neon_dyadic_long.
17415 Vector types as above, scalar types S16 S32 U16 U32. */
17416 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17417 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17418 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17419 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17420 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17421 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17422 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17423 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17424 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17425 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17426 /* Saturating doubling multiplies. Types S16 S32. */
17427 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17428 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17429 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17430 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17431 S16 S32 U16 U32. */
17432 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
17434 /* Extract. Size 8. */
17435 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
17436 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
17438 /* Two registers, miscellaneous. */
17439 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17440 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
17441 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
17442 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
17443 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
17444 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
17445 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
17446 /* Vector replicate. Sizes 8 16 32. */
17447 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
17448 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
17449 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17450 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
17451 /* VMOVN. Types I16 I32 I64. */
17452 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
17453 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17454 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
17455 /* VQMOVUN. Types S16 S32 S64. */
17456 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
17457 /* VZIP / VUZP. Sizes 8 16 32. */
17458 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17459 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17460 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17461 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17462 /* VQABS / VQNEG. Types S8 S16 S32. */
17463 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17464 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17465 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17466 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17467 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17468 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17469 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
17470 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17471 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
17472 /* Reciprocal estimates. Types U32 F32. */
17473 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17474 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
17475 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17476 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
17477 /* VCLS. Types S8 S16 S32. */
17478 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
17479 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
17480 /* VCLZ. Types I8 I16 I32. */
17481 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
17482 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
17483 /* VCNT. Size 8. */
17484 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
17485 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
17486 /* Two address, untyped. */
17487 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
17488 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
17489 /* VTRN. Sizes 8 16 32. */
17490 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
17491 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
17493 /* Table lookup. Size 8. */
17494 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17495 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17497 #undef THUMB_VARIANT
17498 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17500 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17502 /* Neon element/structure load/store. */
17503 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17504 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17505 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17506 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17507 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17508 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17509 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17510 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17512 #undef THUMB_VARIANT
17513 #define THUMB_VARIANT & fpu_vfp_ext_v3
17515 #define ARM_VARIANT & fpu_vfp_ext_v3
17517 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
17518 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
17519 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17520 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17521 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17522 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17523 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17524 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17525 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17526 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17527 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17528 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17529 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17530 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17531 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17532 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17533 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17534 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17536 #undef THUMB_VARIANT
17538 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17540 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17541 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17542 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17543 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17544 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17545 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17546 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
17547 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
17550 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17552 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
17553 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
17554 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
17555 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
17556 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
17557 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
17558 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
17559 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
17560 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
17561 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17562 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17563 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17564 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17565 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17566 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17567 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17568 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17569 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17570 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
17571 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
17572 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17573 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17574 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17575 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17576 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17577 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17578 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
17579 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
17580 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
17581 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
17582 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
17583 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
17584 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
17585 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
17586 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17587 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17588 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17589 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17590 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17591 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17592 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17593 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17594 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17595 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17596 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17597 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17598 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
17599 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17600 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17601 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17602 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17603 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17604 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17605 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17606 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17607 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17608 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17609 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17610 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17611 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17612 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17613 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17614 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17615 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17616 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17617 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17618 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17619 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17620 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
17621 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
17622 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17623 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17624 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17625 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17626 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17627 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17628 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17629 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17630 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17631 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17632 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17633 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17634 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17635 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17636 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17637 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17638 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17639 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17640 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
17641 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17642 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17643 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17644 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17645 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17646 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17647 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17648 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17649 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17650 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17651 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17652 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17653 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17654 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17655 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17656 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17657 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17658 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17659 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17660 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17661 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17662 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
17663 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17664 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17665 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17666 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17667 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17668 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17669 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17670 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17671 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17672 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17673 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17674 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17675 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17676 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17677 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17678 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17679 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17680 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17681 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17682 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17683 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
17684 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
17685 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17686 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17687 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17688 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17689 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17690 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17691 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17692 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17693 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17694 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17695 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17696 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17697 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17698 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17699 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17700 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17701 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17702 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17703 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17704 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17705 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17706 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17707 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17708 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17709 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17710 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17711 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17712 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17713 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
17716 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
17718 cCE(torvscb
, e12f190
, 1, (RR
), iwmmxt_tandorc
),
17719 cCE(torvsch
, e52f190
, 1, (RR
), iwmmxt_tandorc
),
17720 cCE(torvscw
, e92f190
, 1, (RR
), iwmmxt_tandorc
),
17721 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17722 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17723 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17724 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17725 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17726 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17727 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17728 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17729 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17730 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17731 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17732 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17733 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17734 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17735 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17736 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17737 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17738 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
17739 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17740 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17741 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17742 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17743 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17744 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17745 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17746 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17747 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17748 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17749 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17750 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17751 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17752 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17753 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17754 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17755 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17756 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17757 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17758 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17759 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17760 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17761 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17762 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17763 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17764 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17765 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17766 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17767 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17768 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17769 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17770 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17771 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17772 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17773 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17774 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17777 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
17779 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17780 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17781 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17782 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17783 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17784 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17785 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17786 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17787 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
17788 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
17789 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
17790 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
17791 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
17792 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
17793 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
17794 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
17795 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
17796 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
17797 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
17798 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
17799 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
17800 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
17801 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
17802 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
17803 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
17804 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
17805 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
17806 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
17807 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
17808 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
17809 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
17810 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
17811 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
17812 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
17813 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
17814 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
17815 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
17816 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
17817 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
17818 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
17819 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
17820 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
17821 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
17822 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
17823 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
17824 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
17825 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
17826 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
17827 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
17828 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
17829 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
17830 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
17831 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
17832 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
17833 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17834 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17835 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17836 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17837 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17838 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17839 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
17840 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
17841 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
17842 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
17843 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17844 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17845 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17846 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17847 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17848 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17849 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17850 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17851 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17852 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17853 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17854 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17857 #undef THUMB_VARIANT
17884 /* MD interface: bits in the object file. */
17886 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17887 for use in the a.out file, and stores them in the array pointed to by buf.
17888 This knows about the endian-ness of the target machine and does
17889 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17890 2 (short) and 4 (long) Floating numbers are put out as a series of
17891 LITTLENUMS (shorts, here at least). */
17894 md_number_to_chars (char * buf
, valueT val
, int n
)
17896 if (target_big_endian
)
17897 number_to_chars_bigendian (buf
, val
, n
);
17899 number_to_chars_littleendian (buf
, val
, n
);
17903 md_chars_to_number (char * buf
, int n
)
17906 unsigned char * where
= (unsigned char *) buf
;
17908 if (target_big_endian
)
17913 result
|= (*where
++ & 255);
17921 result
|= (where
[n
] & 255);
17928 /* MD interface: Sections. */
17930 /* Estimate the size of a frag before relaxing. Assume everything fits in
17934 md_estimate_size_before_relax (fragS
* fragp
,
17935 segT segtype ATTRIBUTE_UNUSED
)
17941 /* Convert a machine dependent frag. */
17944 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
17946 unsigned long insn
;
17947 unsigned long old_op
;
17955 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
17957 old_op
= bfd_get_16(abfd
, buf
);
17958 if (fragp
->fr_symbol
)
17960 exp
.X_op
= O_symbol
;
17961 exp
.X_add_symbol
= fragp
->fr_symbol
;
17965 exp
.X_op
= O_constant
;
17967 exp
.X_add_number
= fragp
->fr_offset
;
17968 opcode
= fragp
->fr_subtype
;
17971 case T_MNEM_ldr_pc
:
17972 case T_MNEM_ldr_pc2
:
17973 case T_MNEM_ldr_sp
:
17974 case T_MNEM_str_sp
:
17981 if (fragp
->fr_var
== 4)
17983 insn
= THUMB_OP32 (opcode
);
17984 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
17986 insn
|= (old_op
& 0x700) << 4;
17990 insn
|= (old_op
& 7) << 12;
17991 insn
|= (old_op
& 0x38) << 13;
17993 insn
|= 0x00000c00;
17994 put_thumb32_insn (buf
, insn
);
17995 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
17999 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
18001 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
18004 if (fragp
->fr_var
== 4)
18006 insn
= THUMB_OP32 (opcode
);
18007 insn
|= (old_op
& 0xf0) << 4;
18008 put_thumb32_insn (buf
, insn
);
18009 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
18013 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18014 exp
.X_add_number
-= 4;
18022 if (fragp
->fr_var
== 4)
18024 int r0off
= (opcode
== T_MNEM_mov
18025 || opcode
== T_MNEM_movs
) ? 0 : 8;
18026 insn
= THUMB_OP32 (opcode
);
18027 insn
= (insn
& 0xe1ffffff) | 0x10000000;
18028 insn
|= (old_op
& 0x700) << r0off
;
18029 put_thumb32_insn (buf
, insn
);
18030 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18034 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
18039 if (fragp
->fr_var
== 4)
18041 insn
= THUMB_OP32(opcode
);
18042 put_thumb32_insn (buf
, insn
);
18043 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
18046 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
18050 if (fragp
->fr_var
== 4)
18052 insn
= THUMB_OP32(opcode
);
18053 insn
|= (old_op
& 0xf00) << 14;
18054 put_thumb32_insn (buf
, insn
);
18055 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
18058 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
18061 case T_MNEM_add_sp
:
18062 case T_MNEM_add_pc
:
18063 case T_MNEM_inc_sp
:
18064 case T_MNEM_dec_sp
:
18065 if (fragp
->fr_var
== 4)
18067 /* ??? Choose between add and addw. */
18068 insn
= THUMB_OP32 (opcode
);
18069 insn
|= (old_op
& 0xf0) << 4;
18070 put_thumb32_insn (buf
, insn
);
18071 if (opcode
== T_MNEM_add_pc
)
18072 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
18074 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18077 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18085 if (fragp
->fr_var
== 4)
18087 insn
= THUMB_OP32 (opcode
);
18088 insn
|= (old_op
& 0xf0) << 4;
18089 insn
|= (old_op
& 0xf) << 16;
18090 put_thumb32_insn (buf
, insn
);
18091 if (insn
& (1 << 20))
18092 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18094 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18097 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18103 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
18105 fixp
->fx_file
= fragp
->fr_file
;
18106 fixp
->fx_line
= fragp
->fr_line
;
18107 fragp
->fr_fix
+= fragp
->fr_var
;
18110 /* Return the size of a relaxable immediate operand instruction.
18111 SHIFT and SIZE specify the form of the allowable immediate. */
18113 relax_immediate (fragS
*fragp
, int size
, int shift
)
18119 /* ??? Should be able to do better than this. */
18120 if (fragp
->fr_symbol
)
18123 low
= (1 << shift
) - 1;
18124 mask
= (1 << (shift
+ size
)) - (1 << shift
);
18125 offset
= fragp
->fr_offset
;
18126 /* Force misaligned offsets to 32-bit variant. */
18129 if (offset
& ~mask
)
18134 /* Get the address of a symbol during relaxation. */
18136 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
18142 sym
= fragp
->fr_symbol
;
18143 sym_frag
= symbol_get_frag (sym
);
18144 know (S_GET_SEGMENT (sym
) != absolute_section
18145 || sym_frag
== &zero_address_frag
);
18146 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
18148 /* If frag has yet to be reached on this pass, assume it will
18149 move by STRETCH just as we did. If this is not so, it will
18150 be because some frag between grows, and that will force
18154 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
18158 /* Adjust stretch for any alignment frag. Note that if have
18159 been expanding the earlier code, the symbol may be
18160 defined in what appears to be an earlier frag. FIXME:
18161 This doesn't handle the fr_subtype field, which specifies
18162 a maximum number of bytes to skip when doing an
18164 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
18166 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
18169 stretch
= - ((- stretch
)
18170 & ~ ((1 << (int) f
->fr_offset
) - 1));
18172 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
18184 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18187 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
18192 /* Assume worst case for symbols not known to be in the same section. */
18193 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18194 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18197 val
= relaxed_symbol_addr (fragp
, stretch
);
18198 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
18199 addr
= (addr
+ 4) & ~3;
18200 /* Force misaligned targets to 32-bit variant. */
18204 if (val
< 0 || val
> 1020)
18209 /* Return the size of a relaxable add/sub immediate instruction. */
18211 relax_addsub (fragS
*fragp
, asection
*sec
)
18216 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18217 op
= bfd_get_16(sec
->owner
, buf
);
18218 if ((op
& 0xf) == ((op
>> 4) & 0xf))
18219 return relax_immediate (fragp
, 8, 0);
18221 return relax_immediate (fragp
, 3, 0);
18225 /* Return the size of a relaxable branch instruction. BITS is the
18226 size of the offset field in the narrow instruction. */
18229 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
18235 /* Assume worst case for symbols not known to be in the same section. */
18236 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18237 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18241 if (S_IS_DEFINED (fragp
->fr_symbol
)
18242 && ARM_IS_FUNC (fragp
->fr_symbol
))
18246 val
= relaxed_symbol_addr (fragp
, stretch
);
18247 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
18250 /* Offset is a signed value *2 */
18252 if (val
>= limit
|| val
< -limit
)
18258 /* Relax a machine dependent frag. This returns the amount by which
18259 the current size of the frag should change. */
18262 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
18267 oldsize
= fragp
->fr_var
;
18268 switch (fragp
->fr_subtype
)
18270 case T_MNEM_ldr_pc2
:
18271 newsize
= relax_adr (fragp
, sec
, stretch
);
18273 case T_MNEM_ldr_pc
:
18274 case T_MNEM_ldr_sp
:
18275 case T_MNEM_str_sp
:
18276 newsize
= relax_immediate (fragp
, 8, 2);
18280 newsize
= relax_immediate (fragp
, 5, 2);
18284 newsize
= relax_immediate (fragp
, 5, 1);
18288 newsize
= relax_immediate (fragp
, 5, 0);
18291 newsize
= relax_adr (fragp
, sec
, stretch
);
18297 newsize
= relax_immediate (fragp
, 8, 0);
18300 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
18303 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
18305 case T_MNEM_add_sp
:
18306 case T_MNEM_add_pc
:
18307 newsize
= relax_immediate (fragp
, 8, 2);
18309 case T_MNEM_inc_sp
:
18310 case T_MNEM_dec_sp
:
18311 newsize
= relax_immediate (fragp
, 7, 2);
18317 newsize
= relax_addsub (fragp
, sec
);
18323 fragp
->fr_var
= newsize
;
18324 /* Freeze wide instructions that are at or before the same location as
18325 in the previous pass. This avoids infinite loops.
18326 Don't freeze them unconditionally because targets may be artificially
18327 misaligned by the expansion of preceding frags. */
18328 if (stretch
<= 0 && newsize
> 2)
18330 md_convert_frag (sec
->owner
, sec
, fragp
);
18334 return newsize
- oldsize
;
18337 /* Round up a section size to the appropriate boundary. */
18340 md_section_align (segT segment ATTRIBUTE_UNUSED
,
18343 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18344 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
18346 /* For a.out, force the section size to be aligned. If we don't do
18347 this, BFD will align it for us, but it will not write out the
18348 final bytes of the section. This may be a bug in BFD, but it is
18349 easier to fix it here since that is how the other a.out targets
18353 align
= bfd_get_section_alignment (stdoutput
, segment
);
18354 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
18361 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18362 of an rs_align_code fragment. */
18365 arm_handle_align (fragS
* fragP
)
18367 static char const arm_noop
[2][2][4] =
18370 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18371 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18374 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18375 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18378 static char const thumb_noop
[2][2][2] =
18381 {0xc0, 0x46}, /* LE */
18382 {0x46, 0xc0}, /* BE */
18385 {0x00, 0xbf}, /* LE */
18386 {0xbf, 0x00} /* BE */
18389 static char const wide_thumb_noop
[2][4] =
18390 { /* Wide Thumb-2 */
18391 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18392 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18395 unsigned bytes
, fix
, noop_size
;
18398 const char *narrow_noop
= NULL
;
18403 if (fragP
->fr_type
!= rs_align_code
)
18406 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
18407 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
18410 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18411 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
18413 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
18415 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
18417 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
18419 narrow_noop
= thumb_noop
[1][target_big_endian
];
18420 noop
= wide_thumb_noop
[target_big_endian
];
18423 noop
= thumb_noop
[0][target_big_endian
];
18431 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
18432 [target_big_endian
];
18439 fragP
->fr_var
= noop_size
;
18441 if (bytes
& (noop_size
- 1))
18443 fix
= bytes
& (noop_size
- 1);
18445 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
18447 memset (p
, 0, fix
);
18454 if (bytes
& noop_size
)
18456 /* Insert a narrow noop. */
18457 memcpy (p
, narrow_noop
, noop_size
);
18459 bytes
-= noop_size
;
18463 /* Use wide noops for the remainder */
18467 while (bytes
>= noop_size
)
18469 memcpy (p
, noop
, noop_size
);
18471 bytes
-= noop_size
;
18475 fragP
->fr_fix
+= fix
;
18478 /* Called from md_do_align. Used to create an alignment
18479 frag in a code section. */
18482 arm_frag_align_code (int n
, int max
)
18486 /* We assume that there will never be a requirement
18487 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18488 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18493 _("alignments greater than %d bytes not supported in .text sections."),
18494 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
18495 as_fatal ("%s", err_msg
);
18498 p
= frag_var (rs_align_code
,
18499 MAX_MEM_FOR_RS_ALIGN_CODE
,
18501 (relax_substateT
) max
,
18508 /* Perform target specific initialisation of a frag.
18509 Note - despite the name this initialisation is not done when the frag
18510 is created, but only when its type is assigned. A frag can be created
18511 and used a long time before its type is set, so beware of assuming that
18512 this initialisationis performed first. */
18516 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
18518 /* Record whether this frag is in an ARM or a THUMB area. */
18519 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
;
18522 #else /* OBJ_ELF is defined. */
18524 arm_init_frag (fragS
* fragP
, int max_chars
)
18526 /* If the current ARM vs THUMB mode has not already
18527 been recorded into this frag then do so now. */
18528 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
18530 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18532 /* Record a mapping symbol for alignment frags. We will delete this
18533 later if the alignment ends up empty. */
18534 switch (fragP
->fr_type
)
18537 case rs_align_test
:
18539 mapping_state_2 (MAP_DATA
, max_chars
);
18541 case rs_align_code
:
18542 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
18550 /* When we change sections we need to issue a new mapping symbol. */
18553 arm_elf_change_section (void)
18555 segment_info_type
*seginfo
;
18557 /* Link an unlinked unwind index table section to the .text section. */
18558 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
18559 && elf_linked_to_section (now_seg
) == NULL
)
18560 elf_linked_to_section (now_seg
) = text_section
;
18562 if (!SEG_NORMAL (now_seg
))
18565 seginfo
= seg_info (now_seg
);
18566 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
18567 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
18571 arm_elf_section_type (const char * str
, size_t len
)
18573 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
18574 return SHT_ARM_EXIDX
;
18579 /* Code to deal with unwinding tables. */
18581 static void add_unwind_adjustsp (offsetT
);
18583 /* Generate any deferred unwind frame offset. */
18586 flush_pending_unwind (void)
18590 offset
= unwind
.pending_offset
;
18591 unwind
.pending_offset
= 0;
18593 add_unwind_adjustsp (offset
);
18596 /* Add an opcode to this list for this function. Two-byte opcodes should
18597 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18601 add_unwind_opcode (valueT op
, int length
)
18603 /* Add any deferred stack adjustment. */
18604 if (unwind
.pending_offset
)
18605 flush_pending_unwind ();
18607 unwind
.sp_restored
= 0;
18609 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
18611 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
18612 if (unwind
.opcodes
)
18613 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
18614 unwind
.opcode_alloc
);
18616 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
18621 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
18623 unwind
.opcode_count
++;
18627 /* Add unwind opcodes to adjust the stack pointer. */
18630 add_unwind_adjustsp (offsetT offset
)
18634 if (offset
> 0x200)
18636 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18641 /* Long form: 0xb2, uleb128. */
18642 /* This might not fit in a word so add the individual bytes,
18643 remembering the list is built in reverse order. */
18644 o
= (valueT
) ((offset
- 0x204) >> 2);
18646 add_unwind_opcode (0, 1);
18648 /* Calculate the uleb128 encoding of the offset. */
18652 bytes
[n
] = o
& 0x7f;
18658 /* Add the insn. */
18660 add_unwind_opcode (bytes
[n
- 1], 1);
18661 add_unwind_opcode (0xb2, 1);
18663 else if (offset
> 0x100)
18665 /* Two short opcodes. */
18666 add_unwind_opcode (0x3f, 1);
18667 op
= (offset
- 0x104) >> 2;
18668 add_unwind_opcode (op
, 1);
18670 else if (offset
> 0)
18672 /* Short opcode. */
18673 op
= (offset
- 4) >> 2;
18674 add_unwind_opcode (op
, 1);
18676 else if (offset
< 0)
18679 while (offset
> 0x100)
18681 add_unwind_opcode (0x7f, 1);
18684 op
= ((offset
- 4) >> 2) | 0x40;
18685 add_unwind_opcode (op
, 1);
18689 /* Finish the list of unwind opcodes for this function. */
18691 finish_unwind_opcodes (void)
18695 if (unwind
.fp_used
)
18697 /* Adjust sp as necessary. */
18698 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
18699 flush_pending_unwind ();
18701 /* After restoring sp from the frame pointer. */
18702 op
= 0x90 | unwind
.fp_reg
;
18703 add_unwind_opcode (op
, 1);
18706 flush_pending_unwind ();
18710 /* Start an exception table entry. If idx is nonzero this is an index table
18714 start_unwind_section (const segT text_seg
, int idx
)
18716 const char * text_name
;
18717 const char * prefix
;
18718 const char * prefix_once
;
18719 const char * group_name
;
18723 size_t sec_name_len
;
18730 prefix
= ELF_STRING_ARM_unwind
;
18731 prefix_once
= ELF_STRING_ARM_unwind_once
;
18732 type
= SHT_ARM_EXIDX
;
18736 prefix
= ELF_STRING_ARM_unwind_info
;
18737 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
18738 type
= SHT_PROGBITS
;
18741 text_name
= segment_name (text_seg
);
18742 if (streq (text_name
, ".text"))
18745 if (strncmp (text_name
, ".gnu.linkonce.t.",
18746 strlen (".gnu.linkonce.t.")) == 0)
18748 prefix
= prefix_once
;
18749 text_name
+= strlen (".gnu.linkonce.t.");
18752 prefix_len
= strlen (prefix
);
18753 text_len
= strlen (text_name
);
18754 sec_name_len
= prefix_len
+ text_len
;
18755 sec_name
= xmalloc (sec_name_len
+ 1);
18756 memcpy (sec_name
, prefix
, prefix_len
);
18757 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
18758 sec_name
[prefix_len
+ text_len
] = '\0';
18764 /* Handle COMDAT group. */
18765 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
18767 group_name
= elf_group_name (text_seg
);
18768 if (group_name
== NULL
)
18770 as_bad (_("Group section `%s' has no group signature"),
18771 segment_name (text_seg
));
18772 ignore_rest_of_line ();
18775 flags
|= SHF_GROUP
;
18779 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
18781 /* Set the section link for index tables. */
18783 elf_linked_to_section (now_seg
) = text_seg
;
18787 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
18788 personality routine data. Returns zero, or the index table value for
18789 and inline entry. */
18792 create_unwind_entry (int have_data
)
18797 /* The current word of data. */
18799 /* The number of bytes left in this word. */
18802 finish_unwind_opcodes ();
18804 /* Remember the current text section. */
18805 unwind
.saved_seg
= now_seg
;
18806 unwind
.saved_subseg
= now_subseg
;
18808 start_unwind_section (now_seg
, 0);
18810 if (unwind
.personality_routine
== NULL
)
18812 if (unwind
.personality_index
== -2)
18815 as_bad (_("handlerdata in cantunwind frame"));
18816 return 1; /* EXIDX_CANTUNWIND. */
18819 /* Use a default personality routine if none is specified. */
18820 if (unwind
.personality_index
== -1)
18822 if (unwind
.opcode_count
> 3)
18823 unwind
.personality_index
= 1;
18825 unwind
.personality_index
= 0;
18828 /* Space for the personality routine entry. */
18829 if (unwind
.personality_index
== 0)
18831 if (unwind
.opcode_count
> 3)
18832 as_bad (_("too many unwind opcodes for personality routine 0"));
18836 /* All the data is inline in the index table. */
18839 while (unwind
.opcode_count
> 0)
18841 unwind
.opcode_count
--;
18842 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18846 /* Pad with "finish" opcodes. */
18848 data
= (data
<< 8) | 0xb0;
18855 /* We get two opcodes "free" in the first word. */
18856 size
= unwind
.opcode_count
- 2;
18859 /* An extra byte is required for the opcode count. */
18860 size
= unwind
.opcode_count
+ 1;
18862 size
= (size
+ 3) >> 2;
18864 as_bad (_("too many unwind opcodes"));
18866 frag_align (2, 0, 0);
18867 record_alignment (now_seg
, 2);
18868 unwind
.table_entry
= expr_build_dot ();
18870 /* Allocate the table entry. */
18871 ptr
= frag_more ((size
<< 2) + 4);
18872 where
= frag_now_fix () - ((size
<< 2) + 4);
18874 switch (unwind
.personality_index
)
18877 /* ??? Should this be a PLT generating relocation? */
18878 /* Custom personality routine. */
18879 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
18880 BFD_RELOC_ARM_PREL31
);
18885 /* Set the first byte to the number of additional words. */
18890 /* ABI defined personality routines. */
18892 /* Three opcodes bytes are packed into the first word. */
18899 /* The size and first two opcode bytes go in the first word. */
18900 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
18905 /* Should never happen. */
18909 /* Pack the opcodes into words (MSB first), reversing the list at the same
18911 while (unwind
.opcode_count
> 0)
18915 md_number_to_chars (ptr
, data
, 4);
18920 unwind
.opcode_count
--;
18922 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18925 /* Finish off the last word. */
18928 /* Pad with "finish" opcodes. */
18930 data
= (data
<< 8) | 0xb0;
18932 md_number_to_chars (ptr
, data
, 4);
18937 /* Add an empty descriptor if there is no user-specified data. */
18938 ptr
= frag_more (4);
18939 md_number_to_chars (ptr
, 0, 4);
18946 /* Initialize the DWARF-2 unwind information for this procedure. */
18949 tc_arm_frame_initial_instructions (void)
18951 cfi_add_CFA_def_cfa (REG_SP
, 0);
18953 #endif /* OBJ_ELF */
18955 /* Convert REGNAME to a DWARF-2 register number. */
18958 tc_arm_regname_to_dw2regnum (char *regname
)
18960 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
18970 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
18974 expr
.X_op
= O_secrel
;
18975 expr
.X_add_symbol
= symbol
;
18976 expr
.X_add_number
= 0;
18977 emit_expr (&expr
, size
);
18981 /* MD interface: Symbol and relocation handling. */
18983 /* Return the address within the segment that a PC-relative fixup is
18984 relative to. For ARM, PC-relative fixups applied to instructions
18985 are generally relative to the location of the fixup plus 8 bytes.
18986 Thumb branches are offset by 4, and Thumb loads relative to PC
18987 require special handling. */
18990 md_pcrel_from_section (fixS
* fixP
, segT seg
)
18992 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
18994 /* If this is pc-relative and we are going to emit a relocation
18995 then we just want to put out any pipeline compensation that the linker
18996 will need. Otherwise we want to use the calculated base.
18997 For WinCE we skip the bias for externals as well, since this
18998 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19000 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19001 || (arm_force_relocation (fixP
)
19003 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19009 switch (fixP
->fx_r_type
)
19011 /* PC relative addressing on the Thumb is slightly odd as the
19012 bottom two bits of the PC are forced to zero for the
19013 calculation. This happens *after* application of the
19014 pipeline offset. However, Thumb adrl already adjusts for
19015 this, so we need not do it again. */
19016 case BFD_RELOC_ARM_THUMB_ADD
:
19019 case BFD_RELOC_ARM_THUMB_OFFSET
:
19020 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19021 case BFD_RELOC_ARM_T32_ADD_PC12
:
19022 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19023 return (base
+ 4) & ~3;
19025 /* Thumb branches are simply offset by +4. */
19026 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19027 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19028 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19029 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19030 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19033 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19035 && ARM_IS_FUNC (fixP
->fx_addsy
)
19036 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19037 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19040 /* BLX is like branches above, but forces the low two bits of PC to
19042 case BFD_RELOC_THUMB_PCREL_BLX
:
19044 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19045 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19046 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19047 return (base
+ 4) & ~3;
19049 /* ARM mode branches are offset by +8. However, the Windows CE
19050 loader expects the relocation not to take this into account. */
19051 case BFD_RELOC_ARM_PCREL_BLX
:
19053 && ARM_IS_FUNC (fixP
->fx_addsy
)
19054 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19055 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19058 case BFD_RELOC_ARM_PCREL_CALL
:
19060 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19061 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19062 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19065 case BFD_RELOC_ARM_PCREL_BRANCH
:
19066 case BFD_RELOC_ARM_PCREL_JUMP
:
19067 case BFD_RELOC_ARM_PLT32
:
19069 /* When handling fixups immediately, because we have already
19070 discovered the value of a symbol, or the address of the frag involved
19071 we must account for the offset by +8, as the OS loader will never see the reloc.
19072 see fixup_segment() in write.c
19073 The S_IS_EXTERNAL test handles the case of global symbols.
19074 Those need the calculated base, not just the pipe compensation the linker will need. */
19076 && fixP
->fx_addsy
!= NULL
19077 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19078 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
19086 /* ARM mode loads relative to PC are also offset by +8. Unlike
19087 branches, the Windows CE loader *does* expect the relocation
19088 to take this into account. */
19089 case BFD_RELOC_ARM_OFFSET_IMM
:
19090 case BFD_RELOC_ARM_OFFSET_IMM8
:
19091 case BFD_RELOC_ARM_HWLITERAL
:
19092 case BFD_RELOC_ARM_LITERAL
:
19093 case BFD_RELOC_ARM_CP_OFF_IMM
:
19097 /* Other PC-relative relocations are un-offset. */
19103 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19104 Otherwise we have no need to default values of symbols. */
19107 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
19110 if (name
[0] == '_' && name
[1] == 'G'
19111 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
19115 if (symbol_find (name
))
19116 as_bad (_("GOT already in the symbol table"));
19118 GOT_symbol
= symbol_new (name
, undefined_section
,
19119 (valueT
) 0, & zero_address_frag
);
19129 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19130 computed as two separate immediate values, added together. We
19131 already know that this value cannot be computed by just one ARM
19134 static unsigned int
19135 validate_immediate_twopart (unsigned int val
,
19136 unsigned int * highpart
)
19141 for (i
= 0; i
< 32; i
+= 2)
19142 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
19148 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
19150 else if (a
& 0xff0000)
19152 if (a
& 0xff000000)
19154 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
19158 gas_assert (a
& 0xff000000);
19159 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
19162 return (a
& 0xff) | (i
<< 7);
19169 validate_offset_imm (unsigned int val
, int hwse
)
19171 if ((hwse
&& val
> 255) || val
> 4095)
19176 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19177 negative immediate constant by altering the instruction. A bit of
19182 by inverting the second operand, and
19185 by negating the second operand. */
19188 negate_data_op (unsigned long * instruction
,
19189 unsigned long value
)
19192 unsigned long negated
, inverted
;
19194 negated
= encode_arm_immediate (-value
);
19195 inverted
= encode_arm_immediate (~value
);
19197 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
19200 /* First negates. */
19201 case OPCODE_SUB
: /* ADD <-> SUB */
19202 new_inst
= OPCODE_ADD
;
19207 new_inst
= OPCODE_SUB
;
19211 case OPCODE_CMP
: /* CMP <-> CMN */
19212 new_inst
= OPCODE_CMN
;
19217 new_inst
= OPCODE_CMP
;
19221 /* Now Inverted ops. */
19222 case OPCODE_MOV
: /* MOV <-> MVN */
19223 new_inst
= OPCODE_MVN
;
19228 new_inst
= OPCODE_MOV
;
19232 case OPCODE_AND
: /* AND <-> BIC */
19233 new_inst
= OPCODE_BIC
;
19238 new_inst
= OPCODE_AND
;
19242 case OPCODE_ADC
: /* ADC <-> SBC */
19243 new_inst
= OPCODE_SBC
;
19248 new_inst
= OPCODE_ADC
;
19252 /* We cannot do anything. */
19257 if (value
== (unsigned) FAIL
)
19260 *instruction
&= OPCODE_MASK
;
19261 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
19265 /* Like negate_data_op, but for Thumb-2. */
19267 static unsigned int
19268 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
19272 unsigned int negated
, inverted
;
19274 negated
= encode_thumb32_immediate (-value
);
19275 inverted
= encode_thumb32_immediate (~value
);
19277 rd
= (*instruction
>> 8) & 0xf;
19278 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
19281 /* ADD <-> SUB. Includes CMP <-> CMN. */
19282 case T2_OPCODE_SUB
:
19283 new_inst
= T2_OPCODE_ADD
;
19287 case T2_OPCODE_ADD
:
19288 new_inst
= T2_OPCODE_SUB
;
19292 /* ORR <-> ORN. Includes MOV <-> MVN. */
19293 case T2_OPCODE_ORR
:
19294 new_inst
= T2_OPCODE_ORN
;
19298 case T2_OPCODE_ORN
:
19299 new_inst
= T2_OPCODE_ORR
;
19303 /* AND <-> BIC. TST has no inverted equivalent. */
19304 case T2_OPCODE_AND
:
19305 new_inst
= T2_OPCODE_BIC
;
19312 case T2_OPCODE_BIC
:
19313 new_inst
= T2_OPCODE_AND
;
19318 case T2_OPCODE_ADC
:
19319 new_inst
= T2_OPCODE_SBC
;
19323 case T2_OPCODE_SBC
:
19324 new_inst
= T2_OPCODE_ADC
;
19328 /* We cannot do anything. */
19333 if (value
== (unsigned int)FAIL
)
19336 *instruction
&= T2_OPCODE_MASK
;
19337 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
19341 /* Read a 32-bit thumb instruction from buf. */
19342 static unsigned long
19343 get_thumb32_insn (char * buf
)
19345 unsigned long insn
;
19346 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
19347 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19353 /* We usually want to set the low bit on the address of thumb function
19354 symbols. In particular .word foo - . should have the low bit set.
19355 Generic code tries to fold the difference of two symbols to
19356 a constant. Prevent this and force a relocation when the first symbols
19357 is a thumb function. */
19360 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
19362 if (op
== O_subtract
19363 && l
->X_op
== O_symbol
19364 && r
->X_op
== O_symbol
19365 && THUMB_IS_FUNC (l
->X_add_symbol
))
19367 l
->X_op
= O_subtract
;
19368 l
->X_op_symbol
= r
->X_add_symbol
;
19369 l
->X_add_number
-= r
->X_add_number
;
19373 /* Process as normal. */
19378 md_apply_fix (fixS
* fixP
,
19382 offsetT value
= * valP
;
19384 unsigned int newimm
;
19385 unsigned long temp
;
19387 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
19389 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
19391 /* Note whether this will delete the relocation. */
19393 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
19396 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19397 consistency with the behaviour on 32-bit hosts. Remember value
19399 value
&= 0xffffffff;
19400 value
^= 0x80000000;
19401 value
-= 0x80000000;
19404 fixP
->fx_addnumber
= value
;
19406 /* Same treatment for fixP->fx_offset. */
19407 fixP
->fx_offset
&= 0xffffffff;
19408 fixP
->fx_offset
^= 0x80000000;
19409 fixP
->fx_offset
-= 0x80000000;
19411 switch (fixP
->fx_r_type
)
19413 case BFD_RELOC_NONE
:
19414 /* This will need to go in the object file. */
19418 case BFD_RELOC_ARM_IMMEDIATE
:
19419 /* We claim that this fixup has been processed here,
19420 even if in fact we generate an error because we do
19421 not have a reloc for it, so tc_gen_reloc will reject it. */
19425 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19427 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19428 _("undefined symbol %s used as an immediate value"),
19429 S_GET_NAME (fixP
->fx_addsy
));
19434 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19436 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19437 _("symbol %s is in a different section"),
19438 S_GET_NAME (fixP
->fx_addsy
));
19442 newimm
= encode_arm_immediate (value
);
19443 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19445 /* If the instruction will fail, see if we can fix things up by
19446 changing the opcode. */
19447 if (newimm
== (unsigned int) FAIL
19448 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
19450 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19451 _("invalid constant (%lx) after fixup"),
19452 (unsigned long) value
);
19456 newimm
|= (temp
& 0xfffff000);
19457 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19460 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19462 unsigned int highpart
= 0;
19463 unsigned int newinsn
= 0xe1a00000; /* nop. */
19466 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19468 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19469 _("undefined symbol %s used as an immediate value"),
19470 S_GET_NAME (fixP
->fx_addsy
));
19475 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19477 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19478 _("symbol %s is in a different section"),
19479 S_GET_NAME (fixP
->fx_addsy
));
19483 newimm
= encode_arm_immediate (value
);
19484 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19486 /* If the instruction will fail, see if we can fix things up by
19487 changing the opcode. */
19488 if (newimm
== (unsigned int) FAIL
19489 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
19491 /* No ? OK - try using two ADD instructions to generate
19493 newimm
= validate_immediate_twopart (value
, & highpart
);
19495 /* Yes - then make sure that the second instruction is
19497 if (newimm
!= (unsigned int) FAIL
)
19499 /* Still No ? Try using a negated value. */
19500 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
19501 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
19502 /* Otherwise - give up. */
19505 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19506 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19511 /* Replace the first operand in the 2nd instruction (which
19512 is the PC) with the destination register. We have
19513 already added in the PC in the first instruction and we
19514 do not want to do it again. */
19515 newinsn
&= ~ 0xf0000;
19516 newinsn
|= ((newinsn
& 0x0f000) << 4);
19519 newimm
|= (temp
& 0xfffff000);
19520 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19522 highpart
|= (newinsn
& 0xfffff000);
19523 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
19527 case BFD_RELOC_ARM_OFFSET_IMM
:
19528 if (!fixP
->fx_done
&& seg
->use_rela_p
)
19531 case BFD_RELOC_ARM_LITERAL
:
19537 if (validate_offset_imm (value
, 0) == FAIL
)
19539 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
19540 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19541 _("invalid literal constant: pool needs to be closer"));
19543 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19544 _("bad immediate value for offset (%ld)"),
19549 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19550 newval
&= 0xff7ff000;
19551 newval
|= value
| (sign
? INDEX_UP
: 0);
19552 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19555 case BFD_RELOC_ARM_OFFSET_IMM8
:
19556 case BFD_RELOC_ARM_HWLITERAL
:
19562 if (validate_offset_imm (value
, 1) == FAIL
)
19564 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
19565 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19566 _("invalid literal constant: pool needs to be closer"));
19568 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19573 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19574 newval
&= 0xff7ff0f0;
19575 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
19576 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19579 case BFD_RELOC_ARM_T32_OFFSET_U8
:
19580 if (value
< 0 || value
> 1020 || value
% 4 != 0)
19581 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19582 _("bad immediate value for offset (%ld)"), (long) value
);
19585 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
19587 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
19590 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19591 /* This is a complicated relocation used for all varieties of Thumb32
19592 load/store instruction with immediate offset:
19594 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19595 *4, optional writeback(W)
19596 (doubleword load/store)
19598 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19599 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19600 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19601 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19602 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19604 Uppercase letters indicate bits that are already encoded at
19605 this point. Lowercase letters are our problem. For the
19606 second block of instructions, the secondary opcode nybble
19607 (bits 8..11) is present, and bit 23 is zero, even if this is
19608 a PC-relative operation. */
19609 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19611 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
19613 if ((newval
& 0xf0000000) == 0xe0000000)
19615 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19617 newval
|= (1 << 23);
19620 if (value
% 4 != 0)
19622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19623 _("offset not a multiple of 4"));
19629 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19630 _("offset out of range"));
19635 else if ((newval
& 0x000f0000) == 0x000f0000)
19637 /* PC-relative, 12-bit offset. */
19639 newval
|= (1 << 23);
19644 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19645 _("offset out of range"));
19650 else if ((newval
& 0x00000100) == 0x00000100)
19652 /* Writeback: 8-bit, +/- offset. */
19654 newval
|= (1 << 9);
19659 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19660 _("offset out of range"));
19665 else if ((newval
& 0x00000f00) == 0x00000e00)
19667 /* T-instruction: positive 8-bit offset. */
19668 if (value
< 0 || value
> 0xff)
19670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19671 _("offset out of range"));
19679 /* Positive 12-bit or negative 8-bit offset. */
19683 newval
|= (1 << 23);
19693 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19694 _("offset out of range"));
19701 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
19702 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
19705 case BFD_RELOC_ARM_SHIFT_IMM
:
19706 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19707 if (((unsigned long) value
) > 32
19709 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
19711 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19712 _("shift expression is too large"));
19717 /* Shifts of zero must be done as lsl. */
19719 else if (value
== 32)
19721 newval
&= 0xfffff07f;
19722 newval
|= (value
& 0x1f) << 7;
19723 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19726 case BFD_RELOC_ARM_T32_IMMEDIATE
:
19727 case BFD_RELOC_ARM_T32_ADD_IMM
:
19728 case BFD_RELOC_ARM_T32_IMM12
:
19729 case BFD_RELOC_ARM_T32_ADD_PC12
:
19730 /* We claim that this fixup has been processed here,
19731 even if in fact we generate an error because we do
19732 not have a reloc for it, so tc_gen_reloc will reject it. */
19736 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19738 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19739 _("undefined symbol %s used as an immediate value"),
19740 S_GET_NAME (fixP
->fx_addsy
));
19744 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19746 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
19749 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
19750 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
19752 newimm
= encode_thumb32_immediate (value
);
19753 if (newimm
== (unsigned int) FAIL
)
19754 newimm
= thumb32_negate_data_op (&newval
, value
);
19756 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
19757 && newimm
== (unsigned int) FAIL
)
19759 /* Turn add/sum into addw/subw. */
19760 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
19761 newval
= (newval
& 0xfeffffff) | 0x02000000;
19763 /* 12 bit immediate for addw/subw. */
19767 newval
^= 0x00a00000;
19770 newimm
= (unsigned int) FAIL
;
19775 if (newimm
== (unsigned int)FAIL
)
19777 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19778 _("invalid constant (%lx) after fixup"),
19779 (unsigned long) value
);
19783 newval
|= (newimm
& 0x800) << 15;
19784 newval
|= (newimm
& 0x700) << 4;
19785 newval
|= (newimm
& 0x0ff);
19787 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
19788 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
19791 case BFD_RELOC_ARM_SMC
:
19792 if (((unsigned long) value
) > 0xffff)
19793 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19794 _("invalid smc expression"));
19795 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19796 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
19797 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19800 case BFD_RELOC_ARM_SWI
:
19801 if (fixP
->tc_fix_data
!= 0)
19803 if (((unsigned long) value
) > 0xff)
19804 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19805 _("invalid swi expression"));
19806 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19808 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19812 if (((unsigned long) value
) > 0x00ffffff)
19813 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19814 _("invalid swi expression"));
19815 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19817 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19821 case BFD_RELOC_ARM_MULTI
:
19822 if (((unsigned long) value
) > 0xffff)
19823 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19824 _("invalid expression in load/store multiple"));
19825 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
19826 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19830 case BFD_RELOC_ARM_PCREL_CALL
:
19832 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19834 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19835 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19836 && THUMB_IS_FUNC (fixP
->fx_addsy
))
19837 /* Flip the bl to blx. This is a simple flip
19838 bit here because we generate PCREL_CALL for
19839 unconditional bls. */
19841 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19842 newval
= newval
| 0x10000000;
19843 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19849 goto arm_branch_common
;
19851 case BFD_RELOC_ARM_PCREL_JUMP
:
19852 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19854 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19855 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19856 && THUMB_IS_FUNC (fixP
->fx_addsy
))
19858 /* This would map to a bl<cond>, b<cond>,
19859 b<always> to a Thumb function. We
19860 need to force a relocation for this particular
19862 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19866 case BFD_RELOC_ARM_PLT32
:
19868 case BFD_RELOC_ARM_PCREL_BRANCH
:
19870 goto arm_branch_common
;
19872 case BFD_RELOC_ARM_PCREL_BLX
:
19875 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19877 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19878 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19879 && ARM_IS_FUNC (fixP
->fx_addsy
))
19881 /* Flip the blx to a bl and warn. */
19882 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
19883 newval
= 0xeb000000;
19884 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
19885 _("blx to '%s' an ARM ISA state function changed to bl"),
19887 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19893 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
19894 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
19898 /* We are going to store value (shifted right by two) in the
19899 instruction, in a 24 bit, signed field. Bits 26 through 32 either
19900 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
19901 also be be clear. */
19903 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19904 _("misaligned branch destination"));
19905 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
19906 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
19907 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19908 _("branch out of range"));
19910 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19912 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19913 newval
|= (value
>> 2) & 0x00ffffff;
19914 /* Set the H bit on BLX instructions. */
19918 newval
|= 0x01000000;
19920 newval
&= ~0x01000000;
19922 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19926 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
19927 /* CBZ can only branch forward. */
19929 /* Attempts to use CBZ to branch to the next instruction
19930 (which, strictly speaking, are prohibited) will be turned into
19933 FIXME: It may be better to remove the instruction completely and
19934 perform relaxation. */
19937 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19938 newval
= 0xbf00; /* NOP encoding T1 */
19939 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19944 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19945 _("branch out of range"));
19947 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19949 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19950 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
19951 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19956 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
19957 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
19958 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19959 _("branch out of range"));
19961 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19963 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19964 newval
|= (value
& 0x1ff) >> 1;
19965 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19969 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
19970 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
19971 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19972 _("branch out of range"));
19974 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19976 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19977 newval
|= (value
& 0xfff) >> 1;
19978 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19982 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19984 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19985 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19986 && S_IS_DEFINED (fixP
->fx_addsy
)
19987 && ARM_IS_FUNC (fixP
->fx_addsy
)
19988 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19990 /* Force a relocation for a branch 20 bits wide. */
19993 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
19994 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19995 _("conditional branch out of range"));
19997 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20000 addressT S
, J1
, J2
, lo
, hi
;
20002 S
= (value
& 0x00100000) >> 20;
20003 J2
= (value
& 0x00080000) >> 19;
20004 J1
= (value
& 0x00040000) >> 18;
20005 hi
= (value
& 0x0003f000) >> 12;
20006 lo
= (value
& 0x00000ffe) >> 1;
20008 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20009 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20010 newval
|= (S
<< 10) | hi
;
20011 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
20012 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20013 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20017 case BFD_RELOC_THUMB_PCREL_BLX
:
20019 /* If there is a blx from a thumb state function to
20020 another thumb function flip this to a bl and warn
20024 && S_IS_DEFINED (fixP
->fx_addsy
)
20025 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20026 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20027 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20029 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20030 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20031 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20033 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20034 newval
= newval
| 0x1000;
20035 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20036 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20041 goto thumb_bl_common
;
20043 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20045 /* A bl from Thumb state ISA to an internal ARM state function
20046 is converted to a blx. */
20048 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20049 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20050 && S_IS_DEFINED (fixP
->fx_addsy
)
20051 && ARM_IS_FUNC (fixP
->fx_addsy
)
20052 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20054 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20055 newval
= newval
& ~0x1000;
20056 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20057 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
20064 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
&&
20065 fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20066 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20069 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
20070 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20071 _("branch out of range"));
20073 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20074 /* For a BLX instruction, make sure that the relocation is rounded up
20075 to a word boundary. This follows the semantics of the instruction
20076 which specifies that bit 1 of the target address will come from bit
20077 1 of the base address. */
20078 value
= (value
+ 1) & ~ 1;
20080 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20084 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20085 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20086 newval
|= (value
& 0x7fffff) >> 12;
20087 newval2
|= (value
& 0xfff) >> 1;
20088 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20089 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20093 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20094 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
20095 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20096 _("branch out of range"));
20098 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20101 addressT S
, I1
, I2
, lo
, hi
;
20103 S
= (value
& 0x01000000) >> 24;
20104 I1
= (value
& 0x00800000) >> 23;
20105 I2
= (value
& 0x00400000) >> 22;
20106 hi
= (value
& 0x003ff000) >> 12;
20107 lo
= (value
& 0x00000ffe) >> 1;
20112 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20113 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20114 newval
|= (S
<< 10) | hi
;
20115 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
20116 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20117 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20122 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20123 md_number_to_chars (buf
, value
, 1);
20127 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20128 md_number_to_chars (buf
, value
, 2);
20132 case BFD_RELOC_ARM_TLS_GD32
:
20133 case BFD_RELOC_ARM_TLS_LE32
:
20134 case BFD_RELOC_ARM_TLS_IE32
:
20135 case BFD_RELOC_ARM_TLS_LDM32
:
20136 case BFD_RELOC_ARM_TLS_LDO32
:
20137 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
20140 case BFD_RELOC_ARM_GOT32
:
20141 case BFD_RELOC_ARM_GOTOFF
:
20142 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20143 md_number_to_chars (buf
, 0, 4);
20146 case BFD_RELOC_ARM_TARGET2
:
20147 /* TARGET2 is not partial-inplace, so we need to write the
20148 addend here for REL targets, because it won't be written out
20149 during reloc processing later. */
20150 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20151 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
20155 case BFD_RELOC_RVA
:
20157 case BFD_RELOC_ARM_TARGET1
:
20158 case BFD_RELOC_ARM_ROSEGREL32
:
20159 case BFD_RELOC_ARM_SBREL32
:
20160 case BFD_RELOC_32_PCREL
:
20162 case BFD_RELOC_32_SECREL
:
20164 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20166 /* For WinCE we only do this for pcrel fixups. */
20167 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
20169 md_number_to_chars (buf
, value
, 4);
20173 case BFD_RELOC_ARM_PREL31
:
20174 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20176 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
20177 if ((value
^ (value
>> 1)) & 0x40000000)
20179 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20180 _("rel31 relocation overflow"));
20182 newval
|= value
& 0x7fffffff;
20183 md_number_to_chars (buf
, newval
, 4);
20188 case BFD_RELOC_ARM_CP_OFF_IMM
:
20189 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
20190 if (value
< -1023 || value
> 1023 || (value
& 3))
20191 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20192 _("co-processor offset out of range"));
20197 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20198 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20199 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20201 newval
= get_thumb32_insn (buf
);
20202 newval
&= 0xff7fff00;
20203 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
20204 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20205 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20206 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20208 put_thumb32_insn (buf
, newval
);
20211 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
20212 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
20213 if (value
< -255 || value
> 255)
20214 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20215 _("co-processor offset out of range"));
20217 goto cp_off_common
;
20219 case BFD_RELOC_ARM_THUMB_OFFSET
:
20220 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20221 /* Exactly what ranges, and where the offset is inserted depends
20222 on the type of instruction, we can establish this from the
20224 switch (newval
>> 12)
20226 case 4: /* PC load. */
20227 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20228 forced to zero for these loads; md_pcrel_from has already
20229 compensated for this. */
20231 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20232 _("invalid offset, target not word aligned (0x%08lX)"),
20233 (((unsigned long) fixP
->fx_frag
->fr_address
20234 + (unsigned long) fixP
->fx_where
) & ~3)
20235 + (unsigned long) value
);
20237 if (value
& ~0x3fc)
20238 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20239 _("invalid offset, value too big (0x%08lX)"),
20242 newval
|= value
>> 2;
20245 case 9: /* SP load/store. */
20246 if (value
& ~0x3fc)
20247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20248 _("invalid offset, value too big (0x%08lX)"),
20250 newval
|= value
>> 2;
20253 case 6: /* Word load/store. */
20255 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20256 _("invalid offset, value too big (0x%08lX)"),
20258 newval
|= value
<< 4; /* 6 - 2. */
20261 case 7: /* Byte load/store. */
20263 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20264 _("invalid offset, value too big (0x%08lX)"),
20266 newval
|= value
<< 6;
20269 case 8: /* Halfword load/store. */
20271 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20272 _("invalid offset, value too big (0x%08lX)"),
20274 newval
|= value
<< 5; /* 6 - 1. */
20278 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20279 "Unable to process relocation for thumb opcode: %lx",
20280 (unsigned long) newval
);
20283 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20286 case BFD_RELOC_ARM_THUMB_ADD
:
20287 /* This is a complicated relocation, since we use it for all of
20288 the following immediate relocations:
20292 9bit ADD/SUB SP word-aligned
20293 10bit ADD PC/SP word-aligned
20295 The type of instruction being processed is encoded in the
20302 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20304 int rd
= (newval
>> 4) & 0xf;
20305 int rs
= newval
& 0xf;
20306 int subtract
= !!(newval
& 0x8000);
20308 /* Check for HI regs, only very restricted cases allowed:
20309 Adjusting SP, and using PC or SP to get an address. */
20310 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
20311 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
20312 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20313 _("invalid Hi register with immediate"));
20315 /* If value is negative, choose the opposite instruction. */
20319 subtract
= !subtract
;
20321 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20322 _("immediate value out of range"));
20327 if (value
& ~0x1fc)
20328 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20329 _("invalid immediate for stack address calculation"));
20330 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
20331 newval
|= value
>> 2;
20333 else if (rs
== REG_PC
|| rs
== REG_SP
)
20335 if (subtract
|| value
& ~0x3fc)
20336 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20337 _("invalid immediate for address calculation (value = 0x%08lX)"),
20338 (unsigned long) value
);
20339 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
20341 newval
|= value
>> 2;
20346 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20347 _("immediate value out of range"));
20348 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
20349 newval
|= (rd
<< 8) | value
;
20354 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20355 _("immediate value out of range"));
20356 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
20357 newval
|= rd
| (rs
<< 3) | (value
<< 6);
20360 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20363 case BFD_RELOC_ARM_THUMB_IMM
:
20364 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20365 if (value
< 0 || value
> 255)
20366 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20367 _("invalid immediate: %ld is out of range"),
20370 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20373 case BFD_RELOC_ARM_THUMB_SHIFT
:
20374 /* 5bit shift value (0..32). LSL cannot take 32. */
20375 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
20376 temp
= newval
& 0xf800;
20377 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
20378 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20379 _("invalid shift value: %ld"), (long) value
);
20380 /* Shifts of zero must be encoded as LSL. */
20382 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
20383 /* Shifts of 32 are encoded as zero. */
20384 else if (value
== 32)
20386 newval
|= value
<< 6;
20387 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20390 case BFD_RELOC_VTABLE_INHERIT
:
20391 case BFD_RELOC_VTABLE_ENTRY
:
20395 case BFD_RELOC_ARM_MOVW
:
20396 case BFD_RELOC_ARM_MOVT
:
20397 case BFD_RELOC_ARM_THUMB_MOVW
:
20398 case BFD_RELOC_ARM_THUMB_MOVT
:
20399 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20401 /* REL format relocations are limited to a 16-bit addend. */
20402 if (!fixP
->fx_done
)
20404 if (value
< -0x8000 || value
> 0x7fff)
20405 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20406 _("offset out of range"));
20408 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20409 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20414 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20415 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20417 newval
= get_thumb32_insn (buf
);
20418 newval
&= 0xfbf08f00;
20419 newval
|= (value
& 0xf000) << 4;
20420 newval
|= (value
& 0x0800) << 15;
20421 newval
|= (value
& 0x0700) << 4;
20422 newval
|= (value
& 0x00ff);
20423 put_thumb32_insn (buf
, newval
);
20427 newval
= md_chars_to_number (buf
, 4);
20428 newval
&= 0xfff0f000;
20429 newval
|= value
& 0x0fff;
20430 newval
|= (value
& 0xf000) << 4;
20431 md_number_to_chars (buf
, newval
, 4);
20436 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20437 case BFD_RELOC_ARM_ALU_PC_G0
:
20438 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20439 case BFD_RELOC_ARM_ALU_PC_G1
:
20440 case BFD_RELOC_ARM_ALU_PC_G2
:
20441 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20442 case BFD_RELOC_ARM_ALU_SB_G0
:
20443 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20444 case BFD_RELOC_ARM_ALU_SB_G1
:
20445 case BFD_RELOC_ARM_ALU_SB_G2
:
20446 gas_assert (!fixP
->fx_done
);
20447 if (!seg
->use_rela_p
)
20450 bfd_vma encoded_addend
;
20451 bfd_vma addend_abs
= abs (value
);
20453 /* Check that the absolute value of the addend can be
20454 expressed as an 8-bit constant plus a rotation. */
20455 encoded_addend
= encode_arm_immediate (addend_abs
);
20456 if (encoded_addend
== (unsigned int) FAIL
)
20457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20458 _("the offset 0x%08lX is not representable"),
20459 (unsigned long) addend_abs
);
20461 /* Extract the instruction. */
20462 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20464 /* If the addend is positive, use an ADD instruction.
20465 Otherwise use a SUB. Take care not to destroy the S bit. */
20466 insn
&= 0xff1fffff;
20472 /* Place the encoded addend into the first 12 bits of the
20474 insn
&= 0xfffff000;
20475 insn
|= encoded_addend
;
20477 /* Update the instruction. */
20478 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20482 case BFD_RELOC_ARM_LDR_PC_G0
:
20483 case BFD_RELOC_ARM_LDR_PC_G1
:
20484 case BFD_RELOC_ARM_LDR_PC_G2
:
20485 case BFD_RELOC_ARM_LDR_SB_G0
:
20486 case BFD_RELOC_ARM_LDR_SB_G1
:
20487 case BFD_RELOC_ARM_LDR_SB_G2
:
20488 gas_assert (!fixP
->fx_done
);
20489 if (!seg
->use_rela_p
)
20492 bfd_vma addend_abs
= abs (value
);
20494 /* Check that the absolute value of the addend can be
20495 encoded in 12 bits. */
20496 if (addend_abs
>= 0x1000)
20497 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20498 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20499 (unsigned long) addend_abs
);
20501 /* Extract the instruction. */
20502 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20504 /* If the addend is negative, clear bit 23 of the instruction.
20505 Otherwise set it. */
20507 insn
&= ~(1 << 23);
20511 /* Place the absolute value of the addend into the first 12 bits
20512 of the instruction. */
20513 insn
&= 0xfffff000;
20514 insn
|= addend_abs
;
20516 /* Update the instruction. */
20517 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20521 case BFD_RELOC_ARM_LDRS_PC_G0
:
20522 case BFD_RELOC_ARM_LDRS_PC_G1
:
20523 case BFD_RELOC_ARM_LDRS_PC_G2
:
20524 case BFD_RELOC_ARM_LDRS_SB_G0
:
20525 case BFD_RELOC_ARM_LDRS_SB_G1
:
20526 case BFD_RELOC_ARM_LDRS_SB_G2
:
20527 gas_assert (!fixP
->fx_done
);
20528 if (!seg
->use_rela_p
)
20531 bfd_vma addend_abs
= abs (value
);
20533 /* Check that the absolute value of the addend can be
20534 encoded in 8 bits. */
20535 if (addend_abs
>= 0x100)
20536 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20537 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20538 (unsigned long) addend_abs
);
20540 /* Extract the instruction. */
20541 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20543 /* If the addend is negative, clear bit 23 of the instruction.
20544 Otherwise set it. */
20546 insn
&= ~(1 << 23);
20550 /* Place the first four bits of the absolute value of the addend
20551 into the first 4 bits of the instruction, and the remaining
20552 four into bits 8 .. 11. */
20553 insn
&= 0xfffff0f0;
20554 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
20556 /* Update the instruction. */
20557 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20561 case BFD_RELOC_ARM_LDC_PC_G0
:
20562 case BFD_RELOC_ARM_LDC_PC_G1
:
20563 case BFD_RELOC_ARM_LDC_PC_G2
:
20564 case BFD_RELOC_ARM_LDC_SB_G0
:
20565 case BFD_RELOC_ARM_LDC_SB_G1
:
20566 case BFD_RELOC_ARM_LDC_SB_G2
:
20567 gas_assert (!fixP
->fx_done
);
20568 if (!seg
->use_rela_p
)
20571 bfd_vma addend_abs
= abs (value
);
20573 /* Check that the absolute value of the addend is a multiple of
20574 four and, when divided by four, fits in 8 bits. */
20575 if (addend_abs
& 0x3)
20576 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20577 _("bad offset 0x%08lX (must be word-aligned)"),
20578 (unsigned long) addend_abs
);
20580 if ((addend_abs
>> 2) > 0xff)
20581 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20582 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20583 (unsigned long) addend_abs
);
20585 /* Extract the instruction. */
20586 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20588 /* If the addend is negative, clear bit 23 of the instruction.
20589 Otherwise set it. */
20591 insn
&= ~(1 << 23);
20595 /* Place the addend (divided by four) into the first eight
20596 bits of the instruction. */
20597 insn
&= 0xfffffff0;
20598 insn
|= addend_abs
>> 2;
20600 /* Update the instruction. */
20601 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20605 case BFD_RELOC_ARM_V4BX
:
20606 /* This will need to go in the object file. */
20610 case BFD_RELOC_UNUSED
:
20612 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20613 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
20617 /* Translate internal representation of relocation info to BFD target
20621 tc_gen_reloc (asection
*section
, fixS
*fixp
)
20624 bfd_reloc_code_real_type code
;
20626 reloc
= xmalloc (sizeof (arelent
));
20628 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
20629 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
20630 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
20632 if (fixp
->fx_pcrel
)
20634 if (section
->use_rela_p
)
20635 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
20637 fixp
->fx_offset
= reloc
->address
;
20639 reloc
->addend
= fixp
->fx_offset
;
20641 switch (fixp
->fx_r_type
)
20644 if (fixp
->fx_pcrel
)
20646 code
= BFD_RELOC_8_PCREL
;
20651 if (fixp
->fx_pcrel
)
20653 code
= BFD_RELOC_16_PCREL
;
20658 if (fixp
->fx_pcrel
)
20660 code
= BFD_RELOC_32_PCREL
;
20664 case BFD_RELOC_ARM_MOVW
:
20665 if (fixp
->fx_pcrel
)
20667 code
= BFD_RELOC_ARM_MOVW_PCREL
;
20671 case BFD_RELOC_ARM_MOVT
:
20672 if (fixp
->fx_pcrel
)
20674 code
= BFD_RELOC_ARM_MOVT_PCREL
;
20678 case BFD_RELOC_ARM_THUMB_MOVW
:
20679 if (fixp
->fx_pcrel
)
20681 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
20685 case BFD_RELOC_ARM_THUMB_MOVT
:
20686 if (fixp
->fx_pcrel
)
20688 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
20692 case BFD_RELOC_NONE
:
20693 case BFD_RELOC_ARM_PCREL_BRANCH
:
20694 case BFD_RELOC_ARM_PCREL_BLX
:
20695 case BFD_RELOC_RVA
:
20696 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
20697 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
20698 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
20699 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20700 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20701 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20702 case BFD_RELOC_VTABLE_ENTRY
:
20703 case BFD_RELOC_VTABLE_INHERIT
:
20705 case BFD_RELOC_32_SECREL
:
20707 code
= fixp
->fx_r_type
;
20710 case BFD_RELOC_THUMB_PCREL_BLX
:
20712 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
20713 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20716 code
= BFD_RELOC_THUMB_PCREL_BLX
;
20719 case BFD_RELOC_ARM_LITERAL
:
20720 case BFD_RELOC_ARM_HWLITERAL
:
20721 /* If this is called then the a literal has
20722 been referenced across a section boundary. */
20723 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20724 _("literal referenced across section boundary"));
20728 case BFD_RELOC_ARM_GOT32
:
20729 case BFD_RELOC_ARM_GOTOFF
:
20730 case BFD_RELOC_ARM_PLT32
:
20731 case BFD_RELOC_ARM_TARGET1
:
20732 case BFD_RELOC_ARM_ROSEGREL32
:
20733 case BFD_RELOC_ARM_SBREL32
:
20734 case BFD_RELOC_ARM_PREL31
:
20735 case BFD_RELOC_ARM_TARGET2
:
20736 case BFD_RELOC_ARM_TLS_LE32
:
20737 case BFD_RELOC_ARM_TLS_LDO32
:
20738 case BFD_RELOC_ARM_PCREL_CALL
:
20739 case BFD_RELOC_ARM_PCREL_JUMP
:
20740 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20741 case BFD_RELOC_ARM_ALU_PC_G0
:
20742 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20743 case BFD_RELOC_ARM_ALU_PC_G1
:
20744 case BFD_RELOC_ARM_ALU_PC_G2
:
20745 case BFD_RELOC_ARM_LDR_PC_G0
:
20746 case BFD_RELOC_ARM_LDR_PC_G1
:
20747 case BFD_RELOC_ARM_LDR_PC_G2
:
20748 case BFD_RELOC_ARM_LDRS_PC_G0
:
20749 case BFD_RELOC_ARM_LDRS_PC_G1
:
20750 case BFD_RELOC_ARM_LDRS_PC_G2
:
20751 case BFD_RELOC_ARM_LDC_PC_G0
:
20752 case BFD_RELOC_ARM_LDC_PC_G1
:
20753 case BFD_RELOC_ARM_LDC_PC_G2
:
20754 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20755 case BFD_RELOC_ARM_ALU_SB_G0
:
20756 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20757 case BFD_RELOC_ARM_ALU_SB_G1
:
20758 case BFD_RELOC_ARM_ALU_SB_G2
:
20759 case BFD_RELOC_ARM_LDR_SB_G0
:
20760 case BFD_RELOC_ARM_LDR_SB_G1
:
20761 case BFD_RELOC_ARM_LDR_SB_G2
:
20762 case BFD_RELOC_ARM_LDRS_SB_G0
:
20763 case BFD_RELOC_ARM_LDRS_SB_G1
:
20764 case BFD_RELOC_ARM_LDRS_SB_G2
:
20765 case BFD_RELOC_ARM_LDC_SB_G0
:
20766 case BFD_RELOC_ARM_LDC_SB_G1
:
20767 case BFD_RELOC_ARM_LDC_SB_G2
:
20768 case BFD_RELOC_ARM_V4BX
:
20769 code
= fixp
->fx_r_type
;
20772 case BFD_RELOC_ARM_TLS_GD32
:
20773 case BFD_RELOC_ARM_TLS_IE32
:
20774 case BFD_RELOC_ARM_TLS_LDM32
:
20775 /* BFD will include the symbol's address in the addend.
20776 But we don't want that, so subtract it out again here. */
20777 if (!S_IS_COMMON (fixp
->fx_addsy
))
20778 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
20779 code
= fixp
->fx_r_type
;
20783 case BFD_RELOC_ARM_IMMEDIATE
:
20784 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20785 _("internal relocation (type: IMMEDIATE) not fixed up"));
20788 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
20789 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20790 _("ADRL used for a symbol not defined in the same file"));
20793 case BFD_RELOC_ARM_OFFSET_IMM
:
20794 if (section
->use_rela_p
)
20796 code
= fixp
->fx_r_type
;
20800 if (fixp
->fx_addsy
!= NULL
20801 && !S_IS_DEFINED (fixp
->fx_addsy
)
20802 && S_IS_LOCAL (fixp
->fx_addsy
))
20804 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20805 _("undefined local label `%s'"),
20806 S_GET_NAME (fixp
->fx_addsy
));
20810 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20811 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
20818 switch (fixp
->fx_r_type
)
20820 case BFD_RELOC_NONE
: type
= "NONE"; break;
20821 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
20822 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
20823 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
20824 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
20825 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
20826 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
20827 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
20828 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
20829 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
20830 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
20831 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
20832 default: type
= _("<unknown>"); break;
20834 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20835 _("cannot represent %s relocation in this object file format"),
20842 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
20844 && fixp
->fx_addsy
== GOT_symbol
)
20846 code
= BFD_RELOC_ARM_GOTPC
;
20847 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
20851 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
20853 if (reloc
->howto
== NULL
)
20855 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20856 _("cannot represent %s relocation in this object file format"),
20857 bfd_get_reloc_code_name (code
));
20861 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
20862 vtable entry to be used in the relocation's section offset. */
20863 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
20864 reloc
->address
= fixp
->fx_offset
;
20869 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
20872 cons_fix_new_arm (fragS
* frag
,
20877 bfd_reloc_code_real_type type
;
20881 FIXME: @@ Should look at CPU word size. */
20885 type
= BFD_RELOC_8
;
20888 type
= BFD_RELOC_16
;
20892 type
= BFD_RELOC_32
;
20895 type
= BFD_RELOC_64
;
20900 if (exp
->X_op
== O_secrel
)
20902 exp
->X_op
= O_symbol
;
20903 type
= BFD_RELOC_32_SECREL
;
20907 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
20910 #if defined (OBJ_COFF)
20912 arm_validate_fix (fixS
* fixP
)
20914 /* If the destination of the branch is a defined symbol which does not have
20915 the THUMB_FUNC attribute, then we must be calling a function which has
20916 the (interfacearm) attribute. We look for the Thumb entry point to that
20917 function and change the branch to refer to that function instead. */
20918 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
20919 && fixP
->fx_addsy
!= NULL
20920 && S_IS_DEFINED (fixP
->fx_addsy
)
20921 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
20923 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
20930 arm_force_relocation (struct fix
* fixp
)
20932 #if defined (OBJ_COFF) && defined (TE_PE)
20933 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
20937 /* In case we have a call or a branch to a function in ARM ISA mode from
20938 a thumb function or vice-versa force the relocation. These relocations
20939 are cleared off for some cores that might have blx and simple transformations
20943 switch (fixp
->fx_r_type
)
20945 case BFD_RELOC_ARM_PCREL_JUMP
:
20946 case BFD_RELOC_ARM_PCREL_CALL
:
20947 case BFD_RELOC_THUMB_PCREL_BLX
:
20948 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
20952 case BFD_RELOC_ARM_PCREL_BLX
:
20953 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20954 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20955 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20956 if (ARM_IS_FUNC (fixp
->fx_addsy
))
20965 /* Resolve these relocations even if the symbol is extern or weak. */
20966 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
20967 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
20968 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
20969 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
20970 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
20971 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
20972 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
20975 /* Always leave these relocations for the linker. */
20976 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
20977 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
20978 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
20981 /* Always generate relocations against function symbols. */
20982 if (fixp
->fx_r_type
== BFD_RELOC_32
20984 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
20987 return generic_force_reloc (fixp
);
20990 #if defined (OBJ_ELF) || defined (OBJ_COFF)
20991 /* Relocations against function names must be left unadjusted,
20992 so that the linker can use this information to generate interworking
20993 stubs. The MIPS version of this function
20994 also prevents relocations that are mips-16 specific, but I do not
20995 know why it does this.
20998 There is one other problem that ought to be addressed here, but
20999 which currently is not: Taking the address of a label (rather
21000 than a function) and then later jumping to that address. Such
21001 addresses also ought to have their bottom bit set (assuming that
21002 they reside in Thumb code), but at the moment they will not. */
21005 arm_fix_adjustable (fixS
* fixP
)
21007 if (fixP
->fx_addsy
== NULL
)
21010 /* Preserve relocations against symbols with function type. */
21011 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
21014 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
21015 && fixP
->fx_subsy
== NULL
)
21018 /* We need the symbol name for the VTABLE entries. */
21019 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
21020 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21023 /* Don't allow symbols to be discarded on GOT related relocs. */
21024 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
21025 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
21026 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
21027 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
21028 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
21029 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
21030 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
21031 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
21032 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
21035 /* Similarly for group relocations. */
21036 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21037 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21038 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21041 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21042 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
21043 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
21044 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
21045 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
21046 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
21047 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
21048 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
21049 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
21054 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21059 elf32_arm_target_format (void)
21062 return (target_big_endian
21063 ? "elf32-bigarm-symbian"
21064 : "elf32-littlearm-symbian");
21065 #elif defined (TE_VXWORKS)
21066 return (target_big_endian
21067 ? "elf32-bigarm-vxworks"
21068 : "elf32-littlearm-vxworks");
21070 if (target_big_endian
)
21071 return "elf32-bigarm";
21073 return "elf32-littlearm";
21078 armelf_frob_symbol (symbolS
* symp
,
21081 elf_frob_symbol (symp
, puntp
);
21085 /* MD interface: Finalization. */
21090 literal_pool
* pool
;
21092 /* Ensure that all the IT blocks are properly closed. */
21093 check_it_blocks_finished ();
21095 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
21097 /* Put it at the end of the relevant section. */
21098 subseg_set (pool
->section
, pool
->sub_section
);
21100 arm_elf_change_section ();
21107 /* Remove any excess mapping symbols generated for alignment frags in
21108 SEC. We may have created a mapping symbol before a zero byte
21109 alignment; remove it if there's a mapping symbol after the
21112 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
21113 void *dummy ATTRIBUTE_UNUSED
)
21115 segment_info_type
*seginfo
= seg_info (sec
);
21118 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
21121 for (fragp
= seginfo
->frchainP
->frch_root
;
21123 fragp
= fragp
->fr_next
)
21125 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
21126 fragS
*next
= fragp
->fr_next
;
21128 /* Variable-sized frags have been converted to fixed size by
21129 this point. But if this was variable-sized to start with,
21130 there will be a fixed-size frag after it. So don't handle
21132 if (sym
== NULL
|| next
== NULL
)
21135 if (S_GET_VALUE (sym
) < next
->fr_address
)
21136 /* Not at the end of this frag. */
21138 know (S_GET_VALUE (sym
) == next
->fr_address
);
21142 if (next
->tc_frag_data
.first_map
!= NULL
)
21144 /* Next frag starts with a mapping symbol. Discard this
21146 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21150 if (next
->fr_next
== NULL
)
21152 /* This mapping symbol is at the end of the section. Discard
21154 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
21155 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21159 /* As long as we have empty frags without any mapping symbols,
21161 /* If the next frag is non-empty and does not start with a
21162 mapping symbol, then this mapping symbol is required. */
21163 if (next
->fr_address
!= next
->fr_next
->fr_address
)
21166 next
= next
->fr_next
;
21168 while (next
!= NULL
);
21173 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21177 arm_adjust_symtab (void)
21182 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21184 if (ARM_IS_THUMB (sym
))
21186 if (THUMB_IS_FUNC (sym
))
21188 /* Mark the symbol as a Thumb function. */
21189 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
21190 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
21191 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
21193 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
21194 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
21196 as_bad (_("%s: unexpected function type: %d"),
21197 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
21199 else switch (S_GET_STORAGE_CLASS (sym
))
21202 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
21205 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
21208 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
21216 if (ARM_IS_INTERWORK (sym
))
21217 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
21224 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21226 if (ARM_IS_THUMB (sym
))
21228 elf_symbol_type
* elf_sym
;
21230 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
21231 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
21233 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
21234 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
21236 /* If it's a .thumb_func, declare it as so,
21237 otherwise tag label as .code 16. */
21238 if (THUMB_IS_FUNC (sym
))
21239 elf_sym
->internal_elf_sym
.st_info
=
21240 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
21241 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21242 elf_sym
->internal_elf_sym
.st_info
=
21243 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
21248 /* Remove any overlapping mapping symbols generated by alignment frags. */
21249 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
21253 /* MD interface: Initialization. */
21256 set_constant_flonums (void)
21260 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
21261 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
21265 /* Auto-select Thumb mode if it's the only available instruction set for the
21266 given architecture. */
21269 autoselect_thumb_from_cpu_variant (void)
21271 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
21272 opcode_select (16);
21281 if ( (arm_ops_hsh
= hash_new ()) == NULL
21282 || (arm_cond_hsh
= hash_new ()) == NULL
21283 || (arm_shift_hsh
= hash_new ()) == NULL
21284 || (arm_psr_hsh
= hash_new ()) == NULL
21285 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
21286 || (arm_reg_hsh
= hash_new ()) == NULL
21287 || (arm_reloc_hsh
= hash_new ()) == NULL
21288 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
21289 as_fatal (_("virtual memory exhausted"));
21291 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
21292 hash_insert (arm_ops_hsh
, insns
[i
].template, (void *) (insns
+ i
));
21293 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
21294 hash_insert (arm_cond_hsh
, conds
[i
].template, (void *) (conds
+ i
));
21295 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
21296 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
21297 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
21298 hash_insert (arm_psr_hsh
, psrs
[i
].template, (void *) (psrs
+ i
));
21299 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
21300 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (void *) (v7m_psrs
+ i
));
21301 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
21302 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
21304 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
21306 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
21307 (void *) (barrier_opt_names
+ i
));
21309 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
21310 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
21313 set_constant_flonums ();
21315 /* Set the cpu variant based on the command-line options. We prefer
21316 -mcpu= over -march= if both are set (as for GCC); and we prefer
21317 -mfpu= over any other way of setting the floating point unit.
21318 Use of legacy options with new options are faulted. */
21321 if (mcpu_cpu_opt
|| march_cpu_opt
)
21322 as_bad (_("use of old and new-style options to set CPU type"));
21324 mcpu_cpu_opt
= legacy_cpu
;
21326 else if (!mcpu_cpu_opt
)
21327 mcpu_cpu_opt
= march_cpu_opt
;
21332 as_bad (_("use of old and new-style options to set FPU type"));
21334 mfpu_opt
= legacy_fpu
;
21336 else if (!mfpu_opt
)
21338 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21339 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21340 /* Some environments specify a default FPU. If they don't, infer it
21341 from the processor. */
21343 mfpu_opt
= mcpu_fpu_opt
;
21345 mfpu_opt
= march_fpu_opt
;
21347 mfpu_opt
= &fpu_default
;
21353 if (mcpu_cpu_opt
!= NULL
)
21354 mfpu_opt
= &fpu_default
;
21355 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
21356 mfpu_opt
= &fpu_arch_vfp_v2
;
21358 mfpu_opt
= &fpu_arch_fpa
;
21364 mcpu_cpu_opt
= &cpu_default
;
21365 selected_cpu
= cpu_default
;
21369 selected_cpu
= *mcpu_cpu_opt
;
21371 mcpu_cpu_opt
= &arm_arch_any
;
21374 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21376 autoselect_thumb_from_cpu_variant ();
21378 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
21380 #if defined OBJ_COFF || defined OBJ_ELF
21382 unsigned int flags
= 0;
21384 #if defined OBJ_ELF
21385 flags
= meabi_flags
;
21387 switch (meabi_flags
)
21389 case EF_ARM_EABI_UNKNOWN
:
21391 /* Set the flags in the private structure. */
21392 if (uses_apcs_26
) flags
|= F_APCS26
;
21393 if (support_interwork
) flags
|= F_INTERWORK
;
21394 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
21395 if (pic_code
) flags
|= F_PIC
;
21396 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
21397 flags
|= F_SOFT_FLOAT
;
21399 switch (mfloat_abi_opt
)
21401 case ARM_FLOAT_ABI_SOFT
:
21402 case ARM_FLOAT_ABI_SOFTFP
:
21403 flags
|= F_SOFT_FLOAT
;
21406 case ARM_FLOAT_ABI_HARD
:
21407 if (flags
& F_SOFT_FLOAT
)
21408 as_bad (_("hard-float conflicts with specified fpu"));
21412 /* Using pure-endian doubles (even if soft-float). */
21413 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
21414 flags
|= F_VFP_FLOAT
;
21416 #if defined OBJ_ELF
21417 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
21418 flags
|= EF_ARM_MAVERICK_FLOAT
;
21421 case EF_ARM_EABI_VER4
:
21422 case EF_ARM_EABI_VER5
:
21423 /* No additional flags to set. */
21430 bfd_set_private_flags (stdoutput
, flags
);
21432 /* We have run out flags in the COFF header to encode the
21433 status of ATPCS support, so instead we create a dummy,
21434 empty, debug section called .arm.atpcs. */
21439 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
21443 bfd_set_section_flags
21444 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
21445 bfd_set_section_size (stdoutput
, sec
, 0);
21446 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
21452 /* Record the CPU type as well. */
21453 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
21454 mach
= bfd_mach_arm_iWMMXt2
;
21455 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
21456 mach
= bfd_mach_arm_iWMMXt
;
21457 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
21458 mach
= bfd_mach_arm_XScale
;
21459 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
21460 mach
= bfd_mach_arm_ep9312
;
21461 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
21462 mach
= bfd_mach_arm_5TE
;
21463 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
21465 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21466 mach
= bfd_mach_arm_5T
;
21468 mach
= bfd_mach_arm_5
;
21470 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
21472 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21473 mach
= bfd_mach_arm_4T
;
21475 mach
= bfd_mach_arm_4
;
21477 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
21478 mach
= bfd_mach_arm_3M
;
21479 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
21480 mach
= bfd_mach_arm_3
;
21481 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
21482 mach
= bfd_mach_arm_2a
;
21483 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
21484 mach
= bfd_mach_arm_2
;
21486 mach
= bfd_mach_arm_unknown
;
21488 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
21491 /* Command line processing. */
21494 Invocation line includes a switch not recognized by the base assembler.
21495 See if it's a processor-specific option.
21497 This routine is somewhat complicated by the need for backwards
21498 compatibility (since older releases of gcc can't be changed).
21499 The new options try to make the interface as compatible as
21502 New options (supported) are:
21504 -mcpu=<cpu name> Assemble for selected processor
21505 -march=<architecture name> Assemble for selected architecture
21506 -mfpu=<fpu architecture> Assemble for selected FPU.
21507 -EB/-mbig-endian Big-endian
21508 -EL/-mlittle-endian Little-endian
21509 -k Generate PIC code
21510 -mthumb Start in Thumb mode
21511 -mthumb-interwork Code supports ARM/Thumb interworking
21513 -m[no-]warn-deprecated Warn about deprecated features
21515 For now we will also provide support for:
21517 -mapcs-32 32-bit Program counter
21518 -mapcs-26 26-bit Program counter
21519 -macps-float Floats passed in FP registers
21520 -mapcs-reentrant Reentrant code
21522 (sometime these will probably be replaced with -mapcs=<list of options>
21523 and -matpcs=<list of options>)
21525 The remaining options are only supported for back-wards compatibility.
21526 Cpu variants, the arm part is optional:
21527 -m[arm]1 Currently not supported.
21528 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21529 -m[arm]3 Arm 3 processor
21530 -m[arm]6[xx], Arm 6 processors
21531 -m[arm]7[xx][t][[d]m] Arm 7 processors
21532 -m[arm]8[10] Arm 8 processors
21533 -m[arm]9[20][tdmi] Arm 9 processors
21534 -mstrongarm[110[0]] StrongARM processors
21535 -mxscale XScale processors
21536 -m[arm]v[2345[t[e]]] Arm architectures
21537 -mall All (except the ARM1)
21539 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21540 -mfpe-old (No float load/store multiples)
21541 -mvfpxd VFP Single precision
21543 -mno-fpu Disable all floating point instructions
21545 The following CPU names are recognized:
21546 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21547 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21548 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21549 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21550 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21551 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21552 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21556 const char * md_shortopts
= "m:k";
21558 #ifdef ARM_BI_ENDIAN
21559 #define OPTION_EB (OPTION_MD_BASE + 0)
21560 #define OPTION_EL (OPTION_MD_BASE + 1)
21562 #if TARGET_BYTES_BIG_ENDIAN
21563 #define OPTION_EB (OPTION_MD_BASE + 0)
21565 #define OPTION_EL (OPTION_MD_BASE + 1)
21568 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21570 struct option md_longopts
[] =
21573 {"EB", no_argument
, NULL
, OPTION_EB
},
21576 {"EL", no_argument
, NULL
, OPTION_EL
},
21578 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
21579 {NULL
, no_argument
, NULL
, 0}
21582 size_t md_longopts_size
= sizeof (md_longopts
);
21584 struct arm_option_table
21586 char *option
; /* Option name to match. */
21587 char *help
; /* Help information. */
21588 int *var
; /* Variable to change. */
21589 int value
; /* What to change it to. */
21590 char *deprecated
; /* If non-null, print this message. */
21593 struct arm_option_table arm_opts
[] =
21595 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
21596 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
21597 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21598 &support_interwork
, 1, NULL
},
21599 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
21600 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
21601 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
21603 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
21604 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
21605 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
21606 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
21609 /* These are recognized by the assembler, but have no affect on code. */
21610 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
21611 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
21613 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
21614 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21615 &warn_on_deprecated
, 0, NULL
},
21616 {NULL
, NULL
, NULL
, 0, NULL
}
21619 struct arm_legacy_option_table
21621 char *option
; /* Option name to match. */
21622 const arm_feature_set
**var
; /* Variable to change. */
21623 const arm_feature_set value
; /* What to change it to. */
21624 char *deprecated
; /* If non-null, print this message. */
21627 const struct arm_legacy_option_table arm_legacy_opts
[] =
21629 /* DON'T add any new processors to this list -- we want the whole list
21630 to go away... Add them to the processors table instead. */
21631 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21632 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21633 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21634 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21635 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21636 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21637 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21638 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21639 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21640 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21641 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21642 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21643 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21644 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21645 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21646 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21647 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
21648 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
21649 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
21650 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
21651 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
21652 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
21653 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
21654 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
21655 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
21656 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
21657 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
21658 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
21659 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
21660 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
21661 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
21662 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
21663 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
21664 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
21665 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
21666 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
21667 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
21668 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
21669 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
21670 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
21671 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
21672 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
21673 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
21674 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
21675 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
21676 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
21677 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21678 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21679 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21680 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21681 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
21682 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
21683 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
21684 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
21685 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
21686 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
21687 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
21688 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
21689 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
21690 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
21691 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
21692 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
21693 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
21694 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
21695 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
21696 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
21697 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
21698 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
21699 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
21700 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
21701 N_("use -mcpu=strongarm110")},
21702 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
21703 N_("use -mcpu=strongarm1100")},
21704 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
21705 N_("use -mcpu=strongarm1110")},
21706 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
21707 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
21708 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
21710 /* Architecture variants -- don't add any more to this list either. */
21711 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
21712 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
21713 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
21714 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
21715 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
21716 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
21717 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
21718 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
21719 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
21720 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
21721 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
21722 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
21723 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
21724 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
21725 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
21726 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
21727 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
21728 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
21730 /* Floating point variants -- don't add any more to this list either. */
21731 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
21732 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
21733 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
21734 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
21735 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
21737 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
21740 struct arm_cpu_option_table
21743 const arm_feature_set value
;
21744 /* For some CPUs we assume an FPU unless the user explicitly sets
21746 const arm_feature_set default_fpu
;
21747 /* The canonical name of the CPU, or NULL to use NAME converted to upper
21749 const char *canonical_name
;
21752 /* This list should, at a minimum, contain all the cpu names
21753 recognized by GCC. */
21754 static const struct arm_cpu_option_table arm_cpus
[] =
21756 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
21757 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
21758 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
21759 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
21760 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
21761 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21762 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21763 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21764 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21765 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21766 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21767 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21768 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21769 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21770 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21771 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21772 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21773 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21774 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21775 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21776 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21777 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21778 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21779 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21780 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21781 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21782 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21783 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21784 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21785 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21786 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21787 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21788 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21789 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21790 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21791 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21792 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21793 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21794 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21795 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
21796 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21797 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21798 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21799 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21800 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21801 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21802 /* For V5 or later processors we default to using VFP; but the user
21803 should really set the FPU type explicitly. */
21804 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21805 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21806 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
21807 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
21808 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
21809 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21810 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
21811 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21812 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21813 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
21814 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21815 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21816 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21817 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21818 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21819 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
21820 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21821 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21822 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21823 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
21824 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
21825 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
21826 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21827 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
21828 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
21829 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
21830 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
21831 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
21832 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
21833 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
21834 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
21835 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
21836 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
21837 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
21838 | FPU_NEON_EXT_V1
),
21840 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
21841 | FPU_NEON_EXT_V1
),
21843 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
21844 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
21845 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
21846 {"cortex-m0", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
21847 /* ??? XSCALE is really an architecture. */
21848 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
21849 /* ??? iwmmxt is not a processor. */
21850 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
21851 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
21852 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
21854 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
21855 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
21858 struct arm_arch_option_table
21861 const arm_feature_set value
;
21862 const arm_feature_set default_fpu
;
21865 /* This list should, at a minimum, contain all the architecture names
21866 recognized by GCC. */
21867 static const struct arm_arch_option_table arm_archs
[] =
21869 {"all", ARM_ANY
, FPU_ARCH_FPA
},
21870 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
21871 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
21872 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
21873 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
21874 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
21875 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
21876 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
21877 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
21878 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
21879 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
21880 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
21881 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
21882 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
21883 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
21884 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
21885 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
21886 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
21887 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
21888 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
21889 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
21890 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
21891 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
21892 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
21893 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
21894 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
21895 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
21896 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
21897 /* The official spelling of the ARMv7 profile variants is the dashed form.
21898 Accept the non-dashed form for compatibility with old toolchains. */
21899 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
21900 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
21901 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
21902 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
21903 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
21904 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
21905 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
21906 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
21907 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
21908 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
21911 /* ISA extensions in the co-processor space. */
21912 struct arm_option_cpu_value_table
21915 const arm_feature_set value
;
21918 static const struct arm_option_cpu_value_table arm_extensions
[] =
21920 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
21921 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
21922 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
21923 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
21924 {NULL
, ARM_ARCH_NONE
}
21927 /* This list should, at a minimum, contain all the fpu names
21928 recognized by GCC. */
21929 static const struct arm_option_cpu_value_table arm_fpus
[] =
21931 {"softfpa", FPU_NONE
},
21932 {"fpe", FPU_ARCH_FPE
},
21933 {"fpe2", FPU_ARCH_FPE
},
21934 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
21935 {"fpa", FPU_ARCH_FPA
},
21936 {"fpa10", FPU_ARCH_FPA
},
21937 {"fpa11", FPU_ARCH_FPA
},
21938 {"arm7500fe", FPU_ARCH_FPA
},
21939 {"softvfp", FPU_ARCH_VFP
},
21940 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
21941 {"vfp", FPU_ARCH_VFP_V2
},
21942 {"vfp9", FPU_ARCH_VFP_V2
},
21943 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
21944 {"vfp10", FPU_ARCH_VFP_V2
},
21945 {"vfp10-r0", FPU_ARCH_VFP_V1
},
21946 {"vfpxd", FPU_ARCH_VFP_V1xD
},
21947 {"vfpv2", FPU_ARCH_VFP_V2
},
21948 {"vfpv3", FPU_ARCH_VFP_V3
},
21949 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
21950 {"arm1020t", FPU_ARCH_VFP_V1
},
21951 {"arm1020e", FPU_ARCH_VFP_V2
},
21952 {"arm1136jfs", FPU_ARCH_VFP_V2
},
21953 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
21954 {"maverick", FPU_ARCH_MAVERICK
},
21955 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
21956 {"neon-fp16", FPU_ARCH_NEON_FP16
},
21957 {NULL
, ARM_ARCH_NONE
}
21960 struct arm_option_value_table
21966 static const struct arm_option_value_table arm_float_abis
[] =
21968 {"hard", ARM_FLOAT_ABI_HARD
},
21969 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
21970 {"soft", ARM_FLOAT_ABI_SOFT
},
21975 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
21976 static const struct arm_option_value_table arm_eabis
[] =
21978 {"gnu", EF_ARM_EABI_UNKNOWN
},
21979 {"4", EF_ARM_EABI_VER4
},
21980 {"5", EF_ARM_EABI_VER5
},
21985 struct arm_long_option_table
21987 char * option
; /* Substring to match. */
21988 char * help
; /* Help information. */
21989 int (* func
) (char * subopt
); /* Function to decode sub-option. */
21990 char * deprecated
; /* If non-null, print this message. */
21994 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
21996 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
21998 /* Copy the feature set, so that we can modify it. */
21999 *ext_set
= **opt_p
;
22002 while (str
!= NULL
&& *str
!= 0)
22004 const struct arm_option_cpu_value_table
* opt
;
22010 as_bad (_("invalid architectural extension"));
22015 ext
= strchr (str
, '+');
22018 optlen
= ext
- str
;
22020 optlen
= strlen (str
);
22024 as_bad (_("missing architectural extension"));
22028 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
22029 if (strncmp (opt
->name
, str
, optlen
) == 0)
22031 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
22035 if (opt
->name
== NULL
)
22037 as_bad (_("unknown architectural extension `%s'"), str
);
22048 arm_parse_cpu (char * str
)
22050 const struct arm_cpu_option_table
* opt
;
22051 char * ext
= strchr (str
, '+');
22055 optlen
= ext
- str
;
22057 optlen
= strlen (str
);
22061 as_bad (_("missing cpu name `%s'"), str
);
22065 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
22066 if (strncmp (opt
->name
, str
, optlen
) == 0)
22068 mcpu_cpu_opt
= &opt
->value
;
22069 mcpu_fpu_opt
= &opt
->default_fpu
;
22070 if (opt
->canonical_name
)
22071 strcpy (selected_cpu_name
, opt
->canonical_name
);
22076 for (i
= 0; i
< optlen
; i
++)
22077 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22078 selected_cpu_name
[i
] = 0;
22082 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
22087 as_bad (_("unknown cpu `%s'"), str
);
22092 arm_parse_arch (char * str
)
22094 const struct arm_arch_option_table
*opt
;
22095 char *ext
= strchr (str
, '+');
22099 optlen
= ext
- str
;
22101 optlen
= strlen (str
);
22105 as_bad (_("missing architecture name `%s'"), str
);
22109 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
22110 if (streq (opt
->name
, str
))
22112 march_cpu_opt
= &opt
->value
;
22113 march_fpu_opt
= &opt
->default_fpu
;
22114 strcpy (selected_cpu_name
, opt
->name
);
22117 return arm_parse_extension (ext
, &march_cpu_opt
);
22122 as_bad (_("unknown architecture `%s'\n"), str
);
22127 arm_parse_fpu (char * str
)
22129 const struct arm_option_cpu_value_table
* opt
;
22131 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22132 if (streq (opt
->name
, str
))
22134 mfpu_opt
= &opt
->value
;
22138 as_bad (_("unknown floating point format `%s'\n"), str
);
22143 arm_parse_float_abi (char * str
)
22145 const struct arm_option_value_table
* opt
;
22147 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
22148 if (streq (opt
->name
, str
))
22150 mfloat_abi_opt
= opt
->value
;
22154 as_bad (_("unknown floating point abi `%s'\n"), str
);
22160 arm_parse_eabi (char * str
)
22162 const struct arm_option_value_table
*opt
;
22164 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
22165 if (streq (opt
->name
, str
))
22167 meabi_flags
= opt
->value
;
22170 as_bad (_("unknown EABI `%s'\n"), str
);
22176 arm_parse_it_mode (char * str
)
22178 bfd_boolean ret
= TRUE
;
22180 if (streq ("arm", str
))
22181 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
22182 else if (streq ("thumb", str
))
22183 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
22184 else if (streq ("always", str
))
22185 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
22186 else if (streq ("never", str
))
22187 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
22190 as_bad (_("unknown implicit IT mode `%s', should be "\
22191 "arm, thumb, always, or never."), str
);
22198 struct arm_long_option_table arm_long_opts
[] =
22200 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22201 arm_parse_cpu
, NULL
},
22202 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22203 arm_parse_arch
, NULL
},
22204 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22205 arm_parse_fpu
, NULL
},
22206 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22207 arm_parse_float_abi
, NULL
},
22209 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22210 arm_parse_eabi
, NULL
},
22212 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22213 arm_parse_it_mode
, NULL
},
22214 {NULL
, NULL
, 0, NULL
}
22218 md_parse_option (int c
, char * arg
)
22220 struct arm_option_table
*opt
;
22221 const struct arm_legacy_option_table
*fopt
;
22222 struct arm_long_option_table
*lopt
;
22228 target_big_endian
= 1;
22234 target_big_endian
= 0;
22238 case OPTION_FIX_V4BX
:
22243 /* Listing option. Just ignore these, we don't support additional
22248 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22250 if (c
== opt
->option
[0]
22251 && ((arg
== NULL
&& opt
->option
[1] == 0)
22252 || streq (arg
, opt
->option
+ 1)))
22254 /* If the option is deprecated, tell the user. */
22255 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
22256 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22257 arg
? arg
: "", _(opt
->deprecated
));
22259 if (opt
->var
!= NULL
)
22260 *opt
->var
= opt
->value
;
22266 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
22268 if (c
== fopt
->option
[0]
22269 && ((arg
== NULL
&& fopt
->option
[1] == 0)
22270 || streq (arg
, fopt
->option
+ 1)))
22272 /* If the option is deprecated, tell the user. */
22273 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
22274 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22275 arg
? arg
: "", _(fopt
->deprecated
));
22277 if (fopt
->var
!= NULL
)
22278 *fopt
->var
= &fopt
->value
;
22284 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22286 /* These options are expected to have an argument. */
22287 if (c
== lopt
->option
[0]
22289 && strncmp (arg
, lopt
->option
+ 1,
22290 strlen (lopt
->option
+ 1)) == 0)
22292 /* If the option is deprecated, tell the user. */
22293 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
22294 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
22295 _(lopt
->deprecated
));
22297 /* Call the sup-option parser. */
22298 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
22309 md_show_usage (FILE * fp
)
22311 struct arm_option_table
*opt
;
22312 struct arm_long_option_table
*lopt
;
22314 fprintf (fp
, _(" ARM-specific assembler options:\n"));
22316 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22317 if (opt
->help
!= NULL
)
22318 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
22320 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22321 if (lopt
->help
!= NULL
)
22322 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
22326 -EB assemble code for a big-endian cpu\n"));
22331 -EL assemble code for a little-endian cpu\n"));
22335 --fix-v4bx Allow BX in ARMv4 code\n"));
22343 arm_feature_set flags
;
22344 } cpu_arch_ver_table
;
22346 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22347 least features first. */
22348 static const cpu_arch_ver_table cpu_arch_ver
[] =
22354 {4, ARM_ARCH_V5TE
},
22355 {5, ARM_ARCH_V5TEJ
},
22359 {11, ARM_ARCH_V6M
},
22360 {8, ARM_ARCH_V6T2
},
22361 {10, ARM_ARCH_V7A
},
22362 {10, ARM_ARCH_V7R
},
22363 {10, ARM_ARCH_V7M
},
22367 /* Set an attribute if it has not already been set by the user. */
22369 aeabi_set_attribute_int (int tag
, int value
)
22372 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22373 || !attributes_set_explicitly
[tag
])
22374 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
22378 aeabi_set_attribute_string (int tag
, const char *value
)
22381 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22382 || !attributes_set_explicitly
[tag
])
22383 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
22386 /* Set the public EABI object attributes. */
22388 aeabi_set_public_attributes (void)
22391 arm_feature_set flags
;
22392 arm_feature_set tmp
;
22393 const cpu_arch_ver_table
*p
;
22395 /* Choose the architecture based on the capabilities of the requested cpu
22396 (if any) and/or the instructions actually used. */
22397 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
22398 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
22399 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
22400 /*Allow the user to override the reported architecture. */
22403 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
22404 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
22409 for (p
= cpu_arch_ver
; p
->val
; p
++)
22411 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
22414 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
22418 /* Tag_CPU_name. */
22419 if (selected_cpu_name
[0])
22423 p
= selected_cpu_name
;
22424 if (strncmp (p
, "armv", 4) == 0)
22429 for (i
= 0; p
[i
]; i
++)
22430 p
[i
] = TOUPPER (p
[i
]);
22432 aeabi_set_attribute_string (Tag_CPU_name
, p
);
22434 /* Tag_CPU_arch. */
22435 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
22436 /* Tag_CPU_arch_profile. */
22437 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
22438 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
22439 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
22440 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
22441 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
22442 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
22443 /* Tag_ARM_ISA_use. */
22444 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
22446 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
22447 /* Tag_THUMB_ISA_use. */
22448 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
22450 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
22451 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
22452 /* Tag_VFP_arch. */
22453 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
22454 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
22455 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3
))
22456 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
22457 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
22458 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
22459 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
22460 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
22461 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
22462 /* Tag_WMMX_arch. */
22463 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
22464 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
22465 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
22466 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
22467 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22468 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
22469 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
22470 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22471 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_fp16
))
22472 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
22475 /* Add the default contents for the .ARM.attributes section. */
22479 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
22482 aeabi_set_public_attributes ();
22484 #endif /* OBJ_ELF */
22487 /* Parse a .cpu directive. */
22490 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
22492 const struct arm_cpu_option_table
*opt
;
22496 name
= input_line_pointer
;
22497 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22498 input_line_pointer
++;
22499 saved_char
= *input_line_pointer
;
22500 *input_line_pointer
= 0;
22502 /* Skip the first "all" entry. */
22503 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
22504 if (streq (opt
->name
, name
))
22506 mcpu_cpu_opt
= &opt
->value
;
22507 selected_cpu
= opt
->value
;
22508 if (opt
->canonical_name
)
22509 strcpy (selected_cpu_name
, opt
->canonical_name
);
22513 for (i
= 0; opt
->name
[i
]; i
++)
22514 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22515 selected_cpu_name
[i
] = 0;
22517 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22518 *input_line_pointer
= saved_char
;
22519 demand_empty_rest_of_line ();
22522 as_bad (_("unknown cpu `%s'"), name
);
22523 *input_line_pointer
= saved_char
;
22524 ignore_rest_of_line ();
22528 /* Parse a .arch directive. */
22531 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
22533 const struct arm_arch_option_table
*opt
;
22537 name
= input_line_pointer
;
22538 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22539 input_line_pointer
++;
22540 saved_char
= *input_line_pointer
;
22541 *input_line_pointer
= 0;
22543 /* Skip the first "all" entry. */
22544 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22545 if (streq (opt
->name
, name
))
22547 mcpu_cpu_opt
= &opt
->value
;
22548 selected_cpu
= opt
->value
;
22549 strcpy (selected_cpu_name
, opt
->name
);
22550 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22551 *input_line_pointer
= saved_char
;
22552 demand_empty_rest_of_line ();
22556 as_bad (_("unknown architecture `%s'\n"), name
);
22557 *input_line_pointer
= saved_char
;
22558 ignore_rest_of_line ();
22562 /* Parse a .object_arch directive. */
22565 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
22567 const struct arm_arch_option_table
*opt
;
22571 name
= input_line_pointer
;
22572 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22573 input_line_pointer
++;
22574 saved_char
= *input_line_pointer
;
22575 *input_line_pointer
= 0;
22577 /* Skip the first "all" entry. */
22578 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22579 if (streq (opt
->name
, name
))
22581 object_arch
= &opt
->value
;
22582 *input_line_pointer
= saved_char
;
22583 demand_empty_rest_of_line ();
22587 as_bad (_("unknown architecture `%s'\n"), name
);
22588 *input_line_pointer
= saved_char
;
22589 ignore_rest_of_line ();
22592 /* Parse a .fpu directive. */
22595 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
22597 const struct arm_option_cpu_value_table
*opt
;
22601 name
= input_line_pointer
;
22602 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22603 input_line_pointer
++;
22604 saved_char
= *input_line_pointer
;
22605 *input_line_pointer
= 0;
22607 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22608 if (streq (opt
->name
, name
))
22610 mfpu_opt
= &opt
->value
;
22611 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22612 *input_line_pointer
= saved_char
;
22613 demand_empty_rest_of_line ();
22617 as_bad (_("unknown floating point format `%s'\n"), name
);
22618 *input_line_pointer
= saved_char
;
22619 ignore_rest_of_line ();
22622 /* Copy symbol information. */
22625 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
22627 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
22631 /* Given a symbolic attribute NAME, return the proper integer value.
22632 Returns -1 if the attribute is not known. */
22635 arm_convert_symbolic_attribute (const char *name
)
22637 static const struct
22642 attribute_table
[] =
22644 /* When you modify this table you should
22645 also modify the list in doc/c-arm.texi. */
22646 #define T(tag) {#tag, tag}
22647 T (Tag_CPU_raw_name
),
22650 T (Tag_CPU_arch_profile
),
22651 T (Tag_ARM_ISA_use
),
22652 T (Tag_THUMB_ISA_use
),
22655 T (Tag_Advanced_SIMD_arch
),
22656 T (Tag_PCS_config
),
22657 T (Tag_ABI_PCS_R9_use
),
22658 T (Tag_ABI_PCS_RW_data
),
22659 T (Tag_ABI_PCS_RO_data
),
22660 T (Tag_ABI_PCS_GOT_use
),
22661 T (Tag_ABI_PCS_wchar_t
),
22662 T (Tag_ABI_FP_rounding
),
22663 T (Tag_ABI_FP_denormal
),
22664 T (Tag_ABI_FP_exceptions
),
22665 T (Tag_ABI_FP_user_exceptions
),
22666 T (Tag_ABI_FP_number_model
),
22667 T (Tag_ABI_align8_needed
),
22668 T (Tag_ABI_align8_preserved
),
22669 T (Tag_ABI_enum_size
),
22670 T (Tag_ABI_HardFP_use
),
22671 T (Tag_ABI_VFP_args
),
22672 T (Tag_ABI_WMMX_args
),
22673 T (Tag_ABI_optimization_goals
),
22674 T (Tag_ABI_FP_optimization_goals
),
22675 T (Tag_compatibility
),
22676 T (Tag_CPU_unaligned_access
),
22677 T (Tag_VFP_HP_extension
),
22678 T (Tag_ABI_FP_16bit_format
),
22679 T (Tag_nodefaults
),
22680 T (Tag_also_compatible_with
),
22681 T (Tag_conformance
),
22683 T (Tag_Virtualization_use
),
22684 T (Tag_MPextension_use
)
22692 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
22693 if (streq (name
, attribute_table
[i
].name
))
22694 return attribute_table
[i
].tag
;
22700 /* Apply sym value for relocations only in the case that
22701 they are for local symbols and you have the respective
22702 architectural feature for blx and simple switches. */
22704 arm_apply_sym_value (struct fix
* fixP
)
22707 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22708 && !S_IS_EXTERNAL (fixP
->fx_addsy
))
22710 switch (fixP
->fx_r_type
)
22712 case BFD_RELOC_ARM_PCREL_BLX
:
22713 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22714 if (ARM_IS_FUNC (fixP
->fx_addsy
))
22718 case BFD_RELOC_ARM_PCREL_CALL
:
22719 case BFD_RELOC_THUMB_PCREL_BLX
:
22720 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
22731 #endif /* OBJ_ELF */