1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
45 #define WARN_DEPRECATED 1
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
56 symbolS
* table_entry
;
57 symbolS
* personality_routine
;
58 int personality_index
;
59 /* The segment containing the function. */
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes
;
66 /* The number of bytes pushed to the stack. */
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset
;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
76 /* Nonzero if an unwind_setfp directive has been seen. */
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored
:1;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency
= 0;
89 /* Results from operand parsing worker functions. */
93 PARSE_OPERAND_SUCCESS
,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result
;
101 ARM_FLOAT_ABI_SOFTFP
,
105 /* Types of processor to assemble for. */
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
132 #endif /* ifndef FPU_DEFAULT */
134 #define streq(a, b) (strcmp (a, b) == 0)
136 static arm_feature_set cpu_variant
;
137 static arm_feature_set arm_arch_used
;
138 static arm_feature_set thumb_arch_used
;
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26
= FALSE
;
142 static int atpcs
= FALSE
;
143 static int support_interwork
= FALSE
;
144 static int uses_apcs_float
= FALSE
;
145 static int pic_code
= FALSE
;
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
150 static const arm_feature_set
*legacy_cpu
= NULL
;
151 static const arm_feature_set
*legacy_fpu
= NULL
;
153 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
154 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
155 static const arm_feature_set
*march_cpu_opt
= NULL
;
156 static const arm_feature_set
*march_fpu_opt
= NULL
;
157 static const arm_feature_set
*mfpu_opt
= NULL
;
158 static const arm_feature_set
*object_arch
= NULL
;
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
162 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
163 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
164 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
165 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
166 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
167 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
168 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
169 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
172 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
175 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
176 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
177 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
178 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
179 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
180 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
181 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
182 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
183 static const arm_feature_set arm_ext_v4t_5
=
184 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
185 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
186 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
187 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
188 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
189 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
190 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
191 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
192 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
193 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
194 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
195 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
196 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
197 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
198 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
200 static const arm_feature_set arm_arch_any
= ARM_ANY
;
201 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
203 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
205 static const arm_feature_set arm_cext_iwmmxt2
=
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
207 static const arm_feature_set arm_cext_iwmmxt
=
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
209 static const arm_feature_set arm_cext_xscale
=
210 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
211 static const arm_feature_set arm_cext_maverick
=
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
213 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
214 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
215 static const arm_feature_set fpu_vfp_ext_v1xd
=
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
217 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
218 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
219 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
220 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
222 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
224 static int mfloat_abi_opt
= -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name
[16];
231 static int meabi_flags
= EABI_DEFAULT
;
233 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
239 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS
* GOT_symbol
;
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
252 static int thumb_mode
= 0;
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
266 Important differences from the old Thumb mode:
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
277 static bfd_boolean unified_syntax
= FALSE
;
292 enum neon_el_type type
;
296 #define NEON_MAX_TYPE_ELS 4
300 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
307 unsigned long instruction
;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
315 struct neon_type vectype
;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
321 bfd_reloc_code_real_type type
;
330 struct neon_type_el vectype
;
331 unsigned present
: 1; /* Operand present. */
332 unsigned isreg
: 1; /* Operand was a register. */
333 unsigned immisreg
: 1; /* .imm field is a second register. */
334 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
336 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
338 instructions. This allows us to disambiguate ARM <-> vector insns. */
339 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
340 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
341 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
342 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
343 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
344 unsigned writeback
: 1; /* Operand has trailing ! */
345 unsigned preind
: 1; /* Preindexed address. */
346 unsigned postind
: 1; /* Postindexed address. */
347 unsigned negative
: 1; /* Index register was negated. */
348 unsigned shifted
: 1; /* Shift applied to operation. */
349 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
353 static struct arm_it inst
;
355 #define NUM_FLOAT_VALS 8
357 const char * fp_const
[] =
359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
362 /* Number of littlenums required to hold an extended precision number. */
363 #define MAX_LITTLENUMS 6
365 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
375 #define CP_T_X 0x00008000
376 #define CP_T_Y 0x00400000
378 #define CONDS_BIT 0x00100000
379 #define LOAD_BIT 0x00100000
381 #define DOUBLE_LOAD_FLAG 0x00000001
385 const char * template;
389 #define COND_ALWAYS 0xE
393 const char *template;
397 struct asm_barrier_opt
399 const char *template;
403 /* The bit that distinguishes CPSR and SPSR. */
404 #define SPSR_BIT (1 << 22)
406 /* The individual PSR flag bits. */
407 #define PSR_c (1 << 16)
408 #define PSR_x (1 << 17)
409 #define PSR_s (1 << 18)
410 #define PSR_f (1 << 19)
415 bfd_reloc_code_real_type reloc
;
420 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
421 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
426 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
429 /* Bits for DEFINED field in neon_typed_alias. */
430 #define NTA_HASTYPE 1
431 #define NTA_HASINDEX 2
433 struct neon_typed_alias
435 unsigned char defined
;
437 struct neon_type_el eltype
;
440 /* ARM register categories. This includes coprocessor numbers and various
441 architecture extensions' registers. */
467 /* Structure for a hash table entry for a register.
468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
469 information which states whether a vector type or index is specified (for a
470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
474 unsigned char number
;
476 unsigned char builtin
;
477 struct neon_typed_alias
*neon
;
480 /* Diagnostics used when we don't get a register of the expected type. */
481 const char *const reg_expected_msgs
[] =
483 N_("ARM register expected"),
484 N_("bad or missing co-processor number"),
485 N_("co-processor register expected"),
486 N_("FPA register expected"),
487 N_("VFP single precision register expected"),
488 N_("VFP/Neon double precision register expected"),
489 N_("Neon quad precision register expected"),
490 N_("VFP single or double precision register expected"),
491 N_("Neon double or quad precision register expected"),
492 N_("VFP single, double or Neon quad precision register expected"),
493 N_("VFP system register expected"),
494 N_("Maverick MVF register expected"),
495 N_("Maverick MVD register expected"),
496 N_("Maverick MVFX register expected"),
497 N_("Maverick MVDX register expected"),
498 N_("Maverick MVAX register expected"),
499 N_("Maverick DSPSC register expected"),
500 N_("iWMMXt data register expected"),
501 N_("iWMMXt control register expected"),
502 N_("iWMMXt scalar register expected"),
503 N_("XScale accumulator register expected"),
506 /* Some well known registers that we refer to directly elsewhere. */
511 /* ARM instructions take 4bytes in the object file, Thumb instructions
517 /* Basic string to match. */
518 const char *template;
520 /* Parameters to instruction. */
521 unsigned char operands
[8];
523 /* Conditional tag - see opcode_lookup. */
524 unsigned int tag
: 4;
526 /* Basic instruction code. */
527 unsigned int avalue
: 28;
529 /* Thumb-format instruction code. */
532 /* Which architecture variant provides this instruction. */
533 const arm_feature_set
*avariant
;
534 const arm_feature_set
*tvariant
;
536 /* Function to call to encode instruction in ARM format. */
537 void (* aencode
) (void);
539 /* Function to call to encode instruction in Thumb format. */
540 void (* tencode
) (void);
543 /* Defines for various bits that we will want to toggle. */
544 #define INST_IMMEDIATE 0x02000000
545 #define OFFSET_REG 0x02000000
546 #define HWOFFSET_IMM 0x00400000
547 #define SHIFT_BY_REG 0x00000010
548 #define PRE_INDEX 0x01000000
549 #define INDEX_UP 0x00800000
550 #define WRITE_BACK 0x00200000
551 #define LDM_TYPE_2_OR_3 0x00400000
552 #define CPSI_MMOD 0x00020000
554 #define LITERAL_MASK 0xf000f000
555 #define OPCODE_MASK 0xfe1fffff
556 #define V4_STR_BIT 0x00000020
558 #define T2_SUBS_PC_LR 0xf3de8f00
560 #define DATA_OP_SHIFT 21
562 #define T2_OPCODE_MASK 0xfe1fffff
563 #define T2_DATA_OP_SHIFT 21
565 /* Codes to distinguish the arithmetic instructions. */
576 #define OPCODE_CMP 10
577 #define OPCODE_CMN 11
578 #define OPCODE_ORR 12
579 #define OPCODE_MOV 13
580 #define OPCODE_BIC 14
581 #define OPCODE_MVN 15
583 #define T2_OPCODE_AND 0
584 #define T2_OPCODE_BIC 1
585 #define T2_OPCODE_ORR 2
586 #define T2_OPCODE_ORN 3
587 #define T2_OPCODE_EOR 4
588 #define T2_OPCODE_ADD 8
589 #define T2_OPCODE_ADC 10
590 #define T2_OPCODE_SBC 11
591 #define T2_OPCODE_SUB 13
592 #define T2_OPCODE_RSB 14
594 #define T_OPCODE_MUL 0x4340
595 #define T_OPCODE_TST 0x4200
596 #define T_OPCODE_CMN 0x42c0
597 #define T_OPCODE_NEG 0x4240
598 #define T_OPCODE_MVN 0x43c0
600 #define T_OPCODE_ADD_R3 0x1800
601 #define T_OPCODE_SUB_R3 0x1a00
602 #define T_OPCODE_ADD_HI 0x4400
603 #define T_OPCODE_ADD_ST 0xb000
604 #define T_OPCODE_SUB_ST 0xb080
605 #define T_OPCODE_ADD_SP 0xa800
606 #define T_OPCODE_ADD_PC 0xa000
607 #define T_OPCODE_ADD_I8 0x3000
608 #define T_OPCODE_SUB_I8 0x3800
609 #define T_OPCODE_ADD_I3 0x1c00
610 #define T_OPCODE_SUB_I3 0x1e00
612 #define T_OPCODE_ASR_R 0x4100
613 #define T_OPCODE_LSL_R 0x4080
614 #define T_OPCODE_LSR_R 0x40c0
615 #define T_OPCODE_ROR_R 0x41c0
616 #define T_OPCODE_ASR_I 0x1000
617 #define T_OPCODE_LSL_I 0x0000
618 #define T_OPCODE_LSR_I 0x0800
620 #define T_OPCODE_MOV_I8 0x2000
621 #define T_OPCODE_CMP_I8 0x2800
622 #define T_OPCODE_CMP_LR 0x4280
623 #define T_OPCODE_MOV_HR 0x4600
624 #define T_OPCODE_CMP_HR 0x4500
626 #define T_OPCODE_LDR_PC 0x4800
627 #define T_OPCODE_LDR_SP 0x9800
628 #define T_OPCODE_STR_SP 0x9000
629 #define T_OPCODE_LDR_IW 0x6800
630 #define T_OPCODE_STR_IW 0x6000
631 #define T_OPCODE_LDR_IH 0x8800
632 #define T_OPCODE_STR_IH 0x8000
633 #define T_OPCODE_LDR_IB 0x7800
634 #define T_OPCODE_STR_IB 0x7000
635 #define T_OPCODE_LDR_RW 0x5800
636 #define T_OPCODE_STR_RW 0x5000
637 #define T_OPCODE_LDR_RH 0x5a00
638 #define T_OPCODE_STR_RH 0x5200
639 #define T_OPCODE_LDR_RB 0x5c00
640 #define T_OPCODE_STR_RB 0x5400
642 #define T_OPCODE_PUSH 0xb400
643 #define T_OPCODE_POP 0xbc00
645 #define T_OPCODE_BRANCH 0xe000
647 #define THUMB_SIZE 2 /* Size of thumb instruction. */
648 #define THUMB_PP_PC_LR 0x0100
649 #define THUMB_LOAD_BIT 0x0800
650 #define THUMB2_LOAD_BIT 0x00100000
652 #define BAD_ARGS _("bad arguments to instruction")
653 #define BAD_PC _("r15 not allowed here")
654 #define BAD_COND _("instruction cannot be conditional")
655 #define BAD_OVERLAP _("registers may not be the same")
656 #define BAD_HIREG _("lo register required")
657 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
658 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
659 #define BAD_BRANCH _("branch must be last instruction in IT block")
660 #define BAD_NOT_IT _("instruction not allowed in IT block")
661 #define BAD_FPU _("selected FPU does not support instruction")
663 static struct hash_control
*arm_ops_hsh
;
664 static struct hash_control
*arm_cond_hsh
;
665 static struct hash_control
*arm_shift_hsh
;
666 static struct hash_control
*arm_psr_hsh
;
667 static struct hash_control
*arm_v7m_psr_hsh
;
668 static struct hash_control
*arm_reg_hsh
;
669 static struct hash_control
*arm_reloc_hsh
;
670 static struct hash_control
*arm_barrier_opt_hsh
;
672 /* Stuff needed to resolve the label ambiguity
682 symbolS
* last_label_seen
;
683 static int label_is_thumb_function_name
= FALSE
;
685 /* Literal pool structure. Held on a per-section
686 and per-sub-section basis. */
688 #define MAX_LITERAL_POOL_SIZE 1024
689 typedef struct literal_pool
691 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
692 unsigned int next_free_entry
;
697 struct literal_pool
* next
;
700 /* Pointer to a linked list of literal pools. */
701 literal_pool
* list_of_pools
= NULL
;
703 /* State variables for IT block handling. */
704 static bfd_boolean current_it_mask
= 0;
705 static int current_cc
;
710 /* This array holds the chars that always start a comment. If the
711 pre-processor is disabled, these aren't very useful. */
712 const char comment_chars
[] = "@";
714 /* This array holds the chars that only start a comment at the beginning of
715 a line. If the line seems to have the form '# 123 filename'
716 .line and .file directives will appear in the pre-processed output. */
717 /* Note that input_file.c hand checks for '#' at the beginning of the
718 first line of the input file. This is because the compiler outputs
719 #NO_APP at the beginning of its output. */
720 /* Also note that comments like this one will always work. */
721 const char line_comment_chars
[] = "#";
723 const char line_separator_chars
[] = ";";
725 /* Chars that can be used to separate mant
726 from exp in floating point numbers. */
727 const char EXP_CHARS
[] = "eE";
729 /* Chars that mean this number is a floating point constant. */
733 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
735 /* Prefix characters that indicate the start of an immediate
737 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
739 /* Separator character handling. */
741 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
744 skip_past_char (char ** str
, char c
)
754 #define skip_past_comma(str) skip_past_char (str, ',')
756 /* Arithmetic expressions (possibly involving symbols). */
758 /* Return TRUE if anything in the expression is a bignum. */
761 walk_no_bignums (symbolS
* sp
)
763 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
766 if (symbol_get_value_expression (sp
)->X_add_symbol
)
768 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
769 || (symbol_get_value_expression (sp
)->X_op_symbol
770 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
776 static int in_my_get_expression
= 0;
778 /* Third argument to my_get_expression. */
779 #define GE_NO_PREFIX 0
780 #define GE_IMM_PREFIX 1
781 #define GE_OPT_PREFIX 2
782 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
783 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
784 #define GE_OPT_PREFIX_BIG 3
787 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
792 /* In unified syntax, all prefixes are optional. */
794 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
799 case GE_NO_PREFIX
: break;
801 if (!is_immediate_prefix (**str
))
803 inst
.error
= _("immediate expression requires a # prefix");
809 case GE_OPT_PREFIX_BIG
:
810 if (is_immediate_prefix (**str
))
816 memset (ep
, 0, sizeof (expressionS
));
818 save_in
= input_line_pointer
;
819 input_line_pointer
= *str
;
820 in_my_get_expression
= 1;
821 seg
= expression (ep
);
822 in_my_get_expression
= 0;
824 if (ep
->X_op
== O_illegal
)
826 /* We found a bad expression in md_operand(). */
827 *str
= input_line_pointer
;
828 input_line_pointer
= save_in
;
829 if (inst
.error
== NULL
)
830 inst
.error
= _("bad expression");
835 if (seg
!= absolute_section
836 && seg
!= text_section
837 && seg
!= data_section
838 && seg
!= bss_section
839 && seg
!= undefined_section
)
841 inst
.error
= _("bad segment");
842 *str
= input_line_pointer
;
843 input_line_pointer
= save_in
;
848 /* Get rid of any bignums now, so that we don't generate an error for which
849 we can't establish a line number later on. Big numbers are never valid
850 in instructions, which is where this routine is always called. */
851 if (prefix_mode
!= GE_OPT_PREFIX_BIG
852 && (ep
->X_op
== O_big
854 && (walk_no_bignums (ep
->X_add_symbol
)
856 && walk_no_bignums (ep
->X_op_symbol
))))))
858 inst
.error
= _("invalid constant");
859 *str
= input_line_pointer
;
860 input_line_pointer
= save_in
;
864 *str
= input_line_pointer
;
865 input_line_pointer
= save_in
;
869 /* Turn a string in input_line_pointer into a floating point constant
870 of type TYPE, and store the appropriate bytes in *LITP. The number
871 of LITTLENUMS emitted is stored in *SIZEP. An error message is
872 returned, or NULL on OK.
874 Note that fp constants aren't represent in the normal way on the ARM.
875 In big endian mode, things are as expected. However, in little endian
876 mode fp constants are big-endian word-wise, and little-endian byte-wise
877 within the words. For example, (double) 1.1 in big endian mode is
878 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
879 the byte sequence 99 99 f1 3f 9a 99 99 99.
881 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
884 md_atof (int type
, char * litP
, int * sizeP
)
887 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
919 return _("Unrecognized or unsupported floating point constant");
922 t
= atof_ieee (input_line_pointer
, type
, words
);
924 input_line_pointer
= t
;
925 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
927 if (target_big_endian
)
929 for (i
= 0; i
< prec
; i
++)
931 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
932 litP
+= sizeof (LITTLENUM_TYPE
);
937 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
938 for (i
= prec
- 1; i
>= 0; i
--)
940 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
941 litP
+= sizeof (LITTLENUM_TYPE
);
944 /* For a 4 byte float the order of elements in `words' is 1 0.
945 For an 8 byte float the order is 1 0 3 2. */
946 for (i
= 0; i
< prec
; i
+= 2)
948 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
949 sizeof (LITTLENUM_TYPE
));
950 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
951 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
952 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
959 /* We handle all bad expressions here, so that we can report the faulty
960 instruction in the error message. */
962 md_operand (expressionS
* expr
)
964 if (in_my_get_expression
)
965 expr
->X_op
= O_illegal
;
968 /* Immediate values. */
970 /* Generic immediate-value read function for use in directives.
971 Accepts anything that 'expression' can fold to a constant.
972 *val receives the number. */
975 immediate_for_directive (int *val
)
978 exp
.X_op
= O_illegal
;
980 if (is_immediate_prefix (*input_line_pointer
))
982 input_line_pointer
++;
986 if (exp
.X_op
!= O_constant
)
988 as_bad (_("expected #constant"));
989 ignore_rest_of_line ();
992 *val
= exp
.X_add_number
;
997 /* Register parsing. */
999 /* Generic register parser. CCP points to what should be the
1000 beginning of a register name. If it is indeed a valid register
1001 name, advance CCP over it and return the reg_entry structure;
1002 otherwise return NULL. Does not issue diagnostics. */
1004 static struct reg_entry
*
1005 arm_reg_parse_multi (char **ccp
)
1009 struct reg_entry
*reg
;
1011 #ifdef REGISTER_PREFIX
1012 if (*start
!= REGISTER_PREFIX
)
1016 #ifdef OPTIONAL_REGISTER_PREFIX
1017 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1022 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1027 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1029 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1039 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1040 enum arm_reg_type type
)
1042 /* Alternative syntaxes are accepted for a few register classes. */
1049 /* Generic coprocessor register names are allowed for these. */
1050 if (reg
&& reg
->type
== REG_TYPE_CN
)
1055 /* For backward compatibility, a bare number is valid here. */
1057 unsigned long processor
= strtoul (start
, ccp
, 10);
1058 if (*ccp
!= start
&& processor
<= 15)
1062 case REG_TYPE_MMXWC
:
1063 /* WC includes WCG. ??? I'm not sure this is true for all
1064 instructions that take WC registers. */
1065 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1076 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1077 return value is the register number or FAIL. */
1080 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1083 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1086 /* Do not allow a scalar (reg+index) to parse as a register. */
1087 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1090 if (reg
&& reg
->type
== type
)
1093 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1100 /* Parse a Neon type specifier. *STR should point at the leading '.'
1101 character. Does no verification at this stage that the type fits the opcode
1108 Can all be legally parsed by this function.
1110 Fills in neon_type struct pointer with parsed information, and updates STR
1111 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1112 type, FAIL if not. */
1115 parse_neon_type (struct neon_type
*type
, char **str
)
1122 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1124 enum neon_el_type thistype
= NT_untyped
;
1125 unsigned thissize
= -1u;
1132 /* Just a size without an explicit type. */
1136 switch (TOLOWER (*ptr
))
1138 case 'i': thistype
= NT_integer
; break;
1139 case 'f': thistype
= NT_float
; break;
1140 case 'p': thistype
= NT_poly
; break;
1141 case 's': thistype
= NT_signed
; break;
1142 case 'u': thistype
= NT_unsigned
; break;
1144 thistype
= NT_float
;
1149 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1155 /* .f is an abbreviation for .f32. */
1156 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1161 thissize
= strtoul (ptr
, &ptr
, 10);
1163 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1166 as_bad (_("bad size %d in type specifier"), thissize
);
1174 type
->el
[type
->elems
].type
= thistype
;
1175 type
->el
[type
->elems
].size
= thissize
;
1180 /* Empty/missing type is not a successful parse. */
1181 if (type
->elems
== 0)
1189 /* Errors may be set multiple times during parsing or bit encoding
1190 (particularly in the Neon bits), but usually the earliest error which is set
1191 will be the most meaningful. Avoid overwriting it with later (cascading)
1192 errors by calling this function. */
1195 first_error (const char *err
)
1201 /* Parse a single type, e.g. ".s32", leading period included. */
1203 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1206 struct neon_type optype
;
1210 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1212 if (optype
.elems
== 1)
1213 *vectype
= optype
.el
[0];
1216 first_error (_("only one type should be specified for operand"));
1222 first_error (_("vector type expected"));
1234 /* Special meanings for indices (which have a range of 0-7), which will fit into
1237 #define NEON_ALL_LANES 15
1238 #define NEON_INTERLEAVE_LANES 14
1240 /* Parse either a register or a scalar, with an optional type. Return the
1241 register number, and optionally fill in the actual type of the register
1242 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1243 type/index information in *TYPEINFO. */
1246 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1247 enum arm_reg_type
*rtype
,
1248 struct neon_typed_alias
*typeinfo
)
1251 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1252 struct neon_typed_alias atype
;
1253 struct neon_type_el parsetype
;
1257 atype
.eltype
.type
= NT_invtype
;
1258 atype
.eltype
.size
= -1;
1260 /* Try alternate syntax for some types of register. Note these are mutually
1261 exclusive with the Neon syntax extensions. */
1264 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1272 /* Undo polymorphism when a set of register types may be accepted. */
1273 if ((type
== REG_TYPE_NDQ
1274 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1275 || (type
== REG_TYPE_VFSD
1276 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1277 || (type
== REG_TYPE_NSDQ
1278 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1279 || reg
->type
== REG_TYPE_NQ
))
1280 || (type
== REG_TYPE_MMXWC
1281 && (reg
->type
== REG_TYPE_MMXWCG
)))
1284 if (type
!= reg
->type
)
1290 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1292 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1294 first_error (_("can't redefine type for operand"));
1297 atype
.defined
|= NTA_HASTYPE
;
1298 atype
.eltype
= parsetype
;
1301 if (skip_past_char (&str
, '[') == SUCCESS
)
1303 if (type
!= REG_TYPE_VFD
)
1305 first_error (_("only D registers may be indexed"));
1309 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1311 first_error (_("can't change index for operand"));
1315 atype
.defined
|= NTA_HASINDEX
;
1317 if (skip_past_char (&str
, ']') == SUCCESS
)
1318 atype
.index
= NEON_ALL_LANES
;
1323 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1325 if (exp
.X_op
!= O_constant
)
1327 first_error (_("constant expression required"));
1331 if (skip_past_char (&str
, ']') == FAIL
)
1334 atype
.index
= exp
.X_add_number
;
1349 /* Like arm_reg_parse, but allow allow the following extra features:
1350 - If RTYPE is non-zero, return the (possibly restricted) type of the
1351 register (e.g. Neon double or quad reg when either has been requested).
1352 - If this is a Neon vector type with additional type information, fill
1353 in the struct pointed to by VECTYPE (if non-NULL).
1354 This function will fault on encountering a scalar.
1358 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1359 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1361 struct neon_typed_alias atype
;
1363 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1368 /* Do not allow a scalar (reg+index) to parse as a register. */
1369 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1371 first_error (_("register operand expected, but got scalar"));
1376 *vectype
= atype
.eltype
;
1383 #define NEON_SCALAR_REG(X) ((X) >> 4)
1384 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1386 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1387 have enough information to be able to do a good job bounds-checking. So, we
1388 just do easy checks here, and do further checks later. */
1391 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1395 struct neon_typed_alias atype
;
1397 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1399 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1402 if (atype
.index
== NEON_ALL_LANES
)
1404 first_error (_("scalar must have an index"));
1407 else if (atype
.index
>= 64 / elsize
)
1409 first_error (_("scalar index out of range"));
1414 *type
= atype
.eltype
;
1418 return reg
* 16 + atype
.index
;
1421 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1423 parse_reg_list (char ** strp
)
1425 char * str
= * strp
;
1429 /* We come back here if we get ranges concatenated by '+' or '|'. */
1444 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1446 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1456 first_error (_("bad range in register list"));
1460 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1462 if (range
& (1 << i
))
1464 (_("Warning: duplicated register (r%d) in register list"),
1472 if (range
& (1 << reg
))
1473 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1475 else if (reg
<= cur_reg
)
1476 as_tsktsk (_("Warning: register range not in ascending order"));
1481 while (skip_past_comma (&str
) != FAIL
1482 || (in_range
= 1, *str
++ == '-'));
1487 first_error (_("missing `}'"));
1495 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1498 if (expr
.X_op
== O_constant
)
1500 if (expr
.X_add_number
1501 != (expr
.X_add_number
& 0x0000ffff))
1503 inst
.error
= _("invalid register mask");
1507 if ((range
& expr
.X_add_number
) != 0)
1509 int regno
= range
& expr
.X_add_number
;
1512 regno
= (1 << regno
) - 1;
1514 (_("Warning: duplicated register (r%d) in register list"),
1518 range
|= expr
.X_add_number
;
1522 if (inst
.reloc
.type
!= 0)
1524 inst
.error
= _("expression too complex");
1528 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1529 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1530 inst
.reloc
.pc_rel
= 0;
1534 if (*str
== '|' || *str
== '+')
1540 while (another_range
);
1546 /* Types of registers in a list. */
1555 /* Parse a VFP register list. If the string is invalid return FAIL.
1556 Otherwise return the number of registers, and set PBASE to the first
1557 register. Parses registers of type ETYPE.
1558 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1559 - Q registers can be used to specify pairs of D registers
1560 - { } can be omitted from around a singleton register list
1561 FIXME: This is not implemented, as it would require backtracking in
1564 This could be done (the meaning isn't really ambiguous), but doesn't
1565 fit in well with the current parsing framework.
1566 - 32 D registers may be used (also true for VFPv3).
1567 FIXME: Types are ignored in these register lists, which is probably a
1571 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1576 enum arm_reg_type regtype
= 0;
1580 unsigned long mask
= 0;
1585 inst
.error
= _("expecting {");
1594 regtype
= REG_TYPE_VFS
;
1599 regtype
= REG_TYPE_VFD
;
1602 case REGLIST_NEON_D
:
1603 regtype
= REG_TYPE_NDQ
;
1607 if (etype
!= REGLIST_VFP_S
)
1609 /* VFPv3 allows 32 D registers. */
1610 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1614 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1617 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1624 base_reg
= max_regs
;
1628 int setmask
= 1, addregs
= 1;
1630 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1632 if (new_base
== FAIL
)
1634 first_error (_(reg_expected_msgs
[regtype
]));
1638 if (new_base
>= max_regs
)
1640 first_error (_("register out of range in list"));
1644 /* Note: a value of 2 * n is returned for the register Q<n>. */
1645 if (regtype
== REG_TYPE_NQ
)
1651 if (new_base
< base_reg
)
1652 base_reg
= new_base
;
1654 if (mask
& (setmask
<< new_base
))
1656 first_error (_("invalid register list"));
1660 if ((mask
>> new_base
) != 0 && ! warned
)
1662 as_tsktsk (_("register list not in ascending order"));
1666 mask
|= setmask
<< new_base
;
1669 if (*str
== '-') /* We have the start of a range expression */
1675 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1678 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1682 if (high_range
>= max_regs
)
1684 first_error (_("register out of range in list"));
1688 if (regtype
== REG_TYPE_NQ
)
1689 high_range
= high_range
+ 1;
1691 if (high_range
<= new_base
)
1693 inst
.error
= _("register range not in ascending order");
1697 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1699 if (mask
& (setmask
<< new_base
))
1701 inst
.error
= _("invalid register list");
1705 mask
|= setmask
<< new_base
;
1710 while (skip_past_comma (&str
) != FAIL
);
1714 /* Sanity check -- should have raised a parse error above. */
1715 if (count
== 0 || count
> max_regs
)
1720 /* Final test -- the registers must be consecutive. */
1722 for (i
= 0; i
< count
; i
++)
1724 if ((mask
& (1u << i
)) == 0)
1726 inst
.error
= _("non-contiguous register range");
1736 /* True if two alias types are the same. */
1739 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1747 if (a
->defined
!= b
->defined
)
1750 if ((a
->defined
& NTA_HASTYPE
) != 0
1751 && (a
->eltype
.type
!= b
->eltype
.type
1752 || a
->eltype
.size
!= b
->eltype
.size
))
1755 if ((a
->defined
& NTA_HASINDEX
) != 0
1756 && (a
->index
!= b
->index
))
1762 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1763 The base register is put in *PBASE.
1764 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1766 The register stride (minus one) is put in bit 4 of the return value.
1767 Bits [6:5] encode the list length (minus one).
1768 The type of the list elements is put in *ELTYPE, if non-NULL. */
1770 #define NEON_LANE(X) ((X) & 0xf)
1771 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1772 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1775 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1776 struct neon_type_el
*eltype
)
1783 int leading_brace
= 0;
1784 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1786 const char *const incr_error
= "register stride must be 1 or 2";
1787 const char *const type_error
= "mismatched element/structure types in list";
1788 struct neon_typed_alias firsttype
;
1790 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1795 struct neon_typed_alias atype
;
1796 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1800 first_error (_(reg_expected_msgs
[rtype
]));
1807 if (rtype
== REG_TYPE_NQ
)
1814 else if (reg_incr
== -1)
1816 reg_incr
= getreg
- base_reg
;
1817 if (reg_incr
< 1 || reg_incr
> 2)
1819 first_error (_(incr_error
));
1823 else if (getreg
!= base_reg
+ reg_incr
* count
)
1825 first_error (_(incr_error
));
1829 if (!neon_alias_types_same (&atype
, &firsttype
))
1831 first_error (_(type_error
));
1835 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1839 struct neon_typed_alias htype
;
1840 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1842 lane
= NEON_INTERLEAVE_LANES
;
1843 else if (lane
!= NEON_INTERLEAVE_LANES
)
1845 first_error (_(type_error
));
1850 else if (reg_incr
!= 1)
1852 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1856 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1859 first_error (_(reg_expected_msgs
[rtype
]));
1862 if (!neon_alias_types_same (&htype
, &firsttype
))
1864 first_error (_(type_error
));
1867 count
+= hireg
+ dregs
- getreg
;
1871 /* If we're using Q registers, we can't use [] or [n] syntax. */
1872 if (rtype
== REG_TYPE_NQ
)
1878 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1882 else if (lane
!= atype
.index
)
1884 first_error (_(type_error
));
1888 else if (lane
== -1)
1889 lane
= NEON_INTERLEAVE_LANES
;
1890 else if (lane
!= NEON_INTERLEAVE_LANES
)
1892 first_error (_(type_error
));
1897 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1899 /* No lane set by [x]. We must be interleaving structures. */
1901 lane
= NEON_INTERLEAVE_LANES
;
1904 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1905 || (count
> 1 && reg_incr
== -1))
1907 first_error (_("error parsing element/structure list"));
1911 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1913 first_error (_("expected }"));
1921 *eltype
= firsttype
.eltype
;
1926 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1929 /* Parse an explicit relocation suffix on an expression. This is
1930 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1931 arm_reloc_hsh contains no entries, so this function can only
1932 succeed if there is no () after the word. Returns -1 on error,
1933 BFD_RELOC_UNUSED if there wasn't any suffix. */
1935 parse_reloc (char **str
)
1937 struct reloc_entry
*r
;
1941 return BFD_RELOC_UNUSED
;
1946 while (*q
&& *q
!= ')' && *q
!= ',')
1951 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1958 /* Directives: register aliases. */
1960 static struct reg_entry
*
1961 insert_reg_alias (char *str
, int number
, int type
)
1963 struct reg_entry
*new;
1966 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1969 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1971 /* Only warn about a redefinition if it's not defined as the
1973 else if (new->number
!= number
|| new->type
!= type
)
1974 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1979 name
= xstrdup (str
);
1980 new = xmalloc (sizeof (struct reg_entry
));
1983 new->number
= number
;
1985 new->builtin
= FALSE
;
1988 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1995 insert_neon_reg_alias (char *str
, int number
, int type
,
1996 struct neon_typed_alias
*atype
)
1998 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2002 first_error (_("attempt to redefine typed alias"));
2008 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2009 *reg
->neon
= *atype
;
2013 /* Look for the .req directive. This is of the form:
2015 new_register_name .req existing_register_name
2017 If we find one, or if it looks sufficiently like one that we want to
2018 handle any error here, return TRUE. Otherwise return FALSE. */
2021 create_register_alias (char * newname
, char *p
)
2023 struct reg_entry
*old
;
2024 char *oldname
, *nbuf
;
2027 /* The input scrubber ensures that whitespace after the mnemonic is
2028 collapsed to single spaces. */
2030 if (strncmp (oldname
, " .req ", 6) != 0)
2034 if (*oldname
== '\0')
2037 old
= hash_find (arm_reg_hsh
, oldname
);
2040 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2044 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2045 the desired alias name, and p points to its end. If not, then
2046 the desired alias name is in the global original_case_string. */
2047 #ifdef TC_CASE_SENSITIVE
2050 newname
= original_case_string
;
2051 nlen
= strlen (newname
);
2054 nbuf
= alloca (nlen
+ 1);
2055 memcpy (nbuf
, newname
, nlen
);
2058 /* Create aliases under the new name as stated; an all-lowercase
2059 version of the new name; and an all-uppercase version of the new
2061 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2063 for (p
= nbuf
; *p
; p
++)
2066 if (strncmp (nbuf
, newname
, nlen
))
2068 /* If this attempt to create an additional alias fails, do not bother
2069 trying to create the all-lower case alias. We will fail and issue
2070 a second, duplicate error message. This situation arises when the
2071 programmer does something like:
2074 The second .req creates the "Foo" alias but then fails to create
2075 the artifical FOO alias because it has already been created by the
2077 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2081 for (p
= nbuf
; *p
; p
++)
2084 if (strncmp (nbuf
, newname
, nlen
))
2085 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2091 /* Create a Neon typed/indexed register alias using directives, e.g.:
2096 These typed registers can be used instead of the types specified after the
2097 Neon mnemonic, so long as all operands given have types. Types can also be
2098 specified directly, e.g.:
2099 vadd d0.s32, d1.s32, d2.s32
2103 create_neon_reg_alias (char *newname
, char *p
)
2105 enum arm_reg_type basetype
;
2106 struct reg_entry
*basereg
;
2107 struct reg_entry mybasereg
;
2108 struct neon_type ntype
;
2109 struct neon_typed_alias typeinfo
;
2110 char *namebuf
, *nameend
;
2113 typeinfo
.defined
= 0;
2114 typeinfo
.eltype
.type
= NT_invtype
;
2115 typeinfo
.eltype
.size
= -1;
2116 typeinfo
.index
= -1;
2120 if (strncmp (p
, " .dn ", 5) == 0)
2121 basetype
= REG_TYPE_VFD
;
2122 else if (strncmp (p
, " .qn ", 5) == 0)
2123 basetype
= REG_TYPE_NQ
;
2132 basereg
= arm_reg_parse_multi (&p
);
2134 if (basereg
&& basereg
->type
!= basetype
)
2136 as_bad (_("bad type for register"));
2140 if (basereg
== NULL
)
2143 /* Try parsing as an integer. */
2144 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2145 if (exp
.X_op
!= O_constant
)
2147 as_bad (_("expression must be constant"));
2150 basereg
= &mybasereg
;
2151 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2157 typeinfo
= *basereg
->neon
;
2159 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2161 /* We got a type. */
2162 if (typeinfo
.defined
& NTA_HASTYPE
)
2164 as_bad (_("can't redefine the type of a register alias"));
2168 typeinfo
.defined
|= NTA_HASTYPE
;
2169 if (ntype
.elems
!= 1)
2171 as_bad (_("you must specify a single type only"));
2174 typeinfo
.eltype
= ntype
.el
[0];
2177 if (skip_past_char (&p
, '[') == SUCCESS
)
2180 /* We got a scalar index. */
2182 if (typeinfo
.defined
& NTA_HASINDEX
)
2184 as_bad (_("can't redefine the index of a scalar alias"));
2188 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2190 if (exp
.X_op
!= O_constant
)
2192 as_bad (_("scalar index must be constant"));
2196 typeinfo
.defined
|= NTA_HASINDEX
;
2197 typeinfo
.index
= exp
.X_add_number
;
2199 if (skip_past_char (&p
, ']') == FAIL
)
2201 as_bad (_("expecting ]"));
2206 namelen
= nameend
- newname
;
2207 namebuf
= alloca (namelen
+ 1);
2208 strncpy (namebuf
, newname
, namelen
);
2209 namebuf
[namelen
] = '\0';
2211 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2212 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2214 /* Insert name in all uppercase. */
2215 for (p
= namebuf
; *p
; p
++)
2218 if (strncmp (namebuf
, newname
, namelen
))
2219 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2220 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2222 /* Insert name in all lowercase. */
2223 for (p
= namebuf
; *p
; p
++)
2226 if (strncmp (namebuf
, newname
, namelen
))
2227 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2228 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2233 /* Should never be called, as .req goes between the alias and the
2234 register name, not at the beginning of the line. */
2236 s_req (int a ATTRIBUTE_UNUSED
)
2238 as_bad (_("invalid syntax for .req directive"));
2242 s_dn (int a ATTRIBUTE_UNUSED
)
2244 as_bad (_("invalid syntax for .dn directive"));
2248 s_qn (int a ATTRIBUTE_UNUSED
)
2250 as_bad (_("invalid syntax for .qn directive"));
2253 /* The .unreq directive deletes an alias which was previously defined
2254 by .req. For example:
2260 s_unreq (int a ATTRIBUTE_UNUSED
)
2265 name
= input_line_pointer
;
2267 while (*input_line_pointer
!= 0
2268 && *input_line_pointer
!= ' '
2269 && *input_line_pointer
!= '\n')
2270 ++input_line_pointer
;
2272 saved_char
= *input_line_pointer
;
2273 *input_line_pointer
= 0;
2276 as_bad (_("invalid syntax for .unreq directive"));
2279 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2282 as_bad (_("unknown register alias '%s'"), name
);
2283 else if (reg
->builtin
)
2284 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2291 hash_delete (arm_reg_hsh
, name
);
2292 free ((char *) reg
->name
);
2297 /* Also locate the all upper case and all lower case versions.
2298 Do not complain if we cannot find one or the other as it
2299 was probably deleted above. */
2301 nbuf
= strdup (name
);
2302 for (p
= nbuf
; *p
; p
++)
2304 reg
= hash_find (arm_reg_hsh
, nbuf
);
2307 hash_delete (arm_reg_hsh
, nbuf
);
2308 free ((char *) reg
->name
);
2314 for (p
= nbuf
; *p
; p
++)
2316 reg
= hash_find (arm_reg_hsh
, nbuf
);
2319 hash_delete (arm_reg_hsh
, nbuf
);
2320 free ((char *) reg
->name
);
2330 *input_line_pointer
= saved_char
;
2331 demand_empty_rest_of_line ();
2334 /* Directives: Instruction set selection. */
2337 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2338 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2339 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2340 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2342 static enum mstate mapstate
= MAP_UNDEFINED
;
2345 mapping_state (enum mstate state
)
2348 const char * symname
;
2351 if (mapstate
== state
)
2352 /* The mapping symbol has already been emitted.
2353 There is nothing else to do. */
2362 type
= BSF_NO_FLAGS
;
2366 type
= BSF_NO_FLAGS
;
2370 type
= BSF_NO_FLAGS
;
2378 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2380 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2381 symbol_table_insert (symbolP
);
2382 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2387 THUMB_SET_FUNC (symbolP
, 0);
2388 ARM_SET_THUMB (symbolP
, 0);
2389 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2393 THUMB_SET_FUNC (symbolP
, 1);
2394 ARM_SET_THUMB (symbolP
, 1);
2395 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2404 #define mapping_state(x) /* nothing */
2407 /* Find the real, Thumb encoded start of a Thumb function. */
2410 find_real_start (symbolS
* symbolP
)
2413 const char * name
= S_GET_NAME (symbolP
);
2414 symbolS
* new_target
;
2416 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2417 #define STUB_NAME ".real_start_of"
2422 /* The compiler may generate BL instructions to local labels because
2423 it needs to perform a branch to a far away location. These labels
2424 do not have a corresponding ".real_start_of" label. We check
2425 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2426 the ".real_start_of" convention for nonlocal branches. */
2427 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2430 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2431 new_target
= symbol_find (real_start
);
2433 if (new_target
== NULL
)
2435 as_warn (_("Failed to find real start of function: %s\n"), name
);
2436 new_target
= symbolP
;
2443 opcode_select (int width
)
2450 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2451 as_bad (_("selected processor does not support THUMB opcodes"));
2454 /* No need to force the alignment, since we will have been
2455 coming from ARM mode, which is word-aligned. */
2456 record_alignment (now_seg
, 1);
2458 mapping_state (MAP_THUMB
);
2464 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2465 as_bad (_("selected processor does not support ARM opcodes"));
2470 frag_align (2, 0, 0);
2472 record_alignment (now_seg
, 1);
2474 mapping_state (MAP_ARM
);
2478 as_bad (_("invalid instruction size selected (%d)"), width
);
2483 s_arm (int ignore ATTRIBUTE_UNUSED
)
2486 demand_empty_rest_of_line ();
2490 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2493 demand_empty_rest_of_line ();
2497 s_code (int unused ATTRIBUTE_UNUSED
)
2501 temp
= get_absolute_expression ();
2506 opcode_select (temp
);
2510 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2515 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2517 /* If we are not already in thumb mode go into it, EVEN if
2518 the target processor does not support thumb instructions.
2519 This is used by gcc/config/arm/lib1funcs.asm for example
2520 to compile interworking support functions even if the
2521 target processor should not support interworking. */
2525 record_alignment (now_seg
, 1);
2528 demand_empty_rest_of_line ();
2532 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2536 /* The following label is the name/address of the start of a Thumb function.
2537 We need to know this for the interworking support. */
2538 label_is_thumb_function_name
= TRUE
;
2541 /* Perform a .set directive, but also mark the alias as
2542 being a thumb function. */
2545 s_thumb_set (int equiv
)
2547 /* XXX the following is a duplicate of the code for s_set() in read.c
2548 We cannot just call that code as we need to get at the symbol that
2555 /* Especial apologies for the random logic:
2556 This just grew, and could be parsed much more simply!
2558 name
= input_line_pointer
;
2559 delim
= get_symbol_end ();
2560 end_name
= input_line_pointer
;
2563 if (*input_line_pointer
!= ',')
2566 as_bad (_("expected comma after name \"%s\""), name
);
2568 ignore_rest_of_line ();
2572 input_line_pointer
++;
2575 if (name
[0] == '.' && name
[1] == '\0')
2577 /* XXX - this should not happen to .thumb_set. */
2581 if ((symbolP
= symbol_find (name
)) == NULL
2582 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2585 /* When doing symbol listings, play games with dummy fragments living
2586 outside the normal fragment chain to record the file and line info
2588 if (listing
& LISTING_SYMBOLS
)
2590 extern struct list_info_struct
* listing_tail
;
2591 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2593 memset (dummy_frag
, 0, sizeof (fragS
));
2594 dummy_frag
->fr_type
= rs_fill
;
2595 dummy_frag
->line
= listing_tail
;
2596 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2597 dummy_frag
->fr_symbol
= symbolP
;
2601 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2604 /* "set" symbols are local unless otherwise specified. */
2605 SF_SET_LOCAL (symbolP
);
2606 #endif /* OBJ_COFF */
2607 } /* Make a new symbol. */
2609 symbol_table_insert (symbolP
);
2614 && S_IS_DEFINED (symbolP
)
2615 && S_GET_SEGMENT (symbolP
) != reg_section
)
2616 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2618 pseudo_set (symbolP
);
2620 demand_empty_rest_of_line ();
2622 /* XXX Now we come to the Thumb specific bit of code. */
2624 THUMB_SET_FUNC (symbolP
, 1);
2625 ARM_SET_THUMB (symbolP
, 1);
2626 #if defined OBJ_ELF || defined OBJ_COFF
2627 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2631 /* Directives: Mode selection. */
2633 /* .syntax [unified|divided] - choose the new unified syntax
2634 (same for Arm and Thumb encoding, modulo slight differences in what
2635 can be represented) or the old divergent syntax for each mode. */
2637 s_syntax (int unused ATTRIBUTE_UNUSED
)
2641 name
= input_line_pointer
;
2642 delim
= get_symbol_end ();
2644 if (!strcasecmp (name
, "unified"))
2645 unified_syntax
= TRUE
;
2646 else if (!strcasecmp (name
, "divided"))
2647 unified_syntax
= FALSE
;
2650 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2653 *input_line_pointer
= delim
;
2654 demand_empty_rest_of_line ();
2657 /* Directives: sectioning and alignment. */
2659 /* Same as s_align_ptwo but align 0 => align 2. */
2662 s_align (int unused ATTRIBUTE_UNUSED
)
2667 long max_alignment
= 15;
2669 temp
= get_absolute_expression ();
2670 if (temp
> max_alignment
)
2671 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2674 as_bad (_("alignment negative. 0 assumed."));
2678 if (*input_line_pointer
== ',')
2680 input_line_pointer
++;
2681 temp_fill
= get_absolute_expression ();
2693 /* Only make a frag if we HAVE to. */
2694 if (temp
&& !need_pass_2
)
2696 if (!fill_p
&& subseg_text_p (now_seg
))
2697 frag_align_code (temp
, 0);
2699 frag_align (temp
, (int) temp_fill
, 0);
2701 demand_empty_rest_of_line ();
2703 record_alignment (now_seg
, temp
);
2707 s_bss (int ignore ATTRIBUTE_UNUSED
)
2709 /* We don't support putting frags in the BSS segment, we fake it by
2710 marking in_bss, then looking at s_skip for clues. */
2711 subseg_set (bss_section
, 0);
2712 demand_empty_rest_of_line ();
2713 mapping_state (MAP_DATA
);
2717 s_even (int ignore ATTRIBUTE_UNUSED
)
2719 /* Never make frag if expect extra pass. */
2721 frag_align (1, 0, 0);
2723 record_alignment (now_seg
, 1);
2725 demand_empty_rest_of_line ();
2728 /* Directives: Literal pools. */
2730 static literal_pool
*
2731 find_literal_pool (void)
2733 literal_pool
* pool
;
2735 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2737 if (pool
->section
== now_seg
2738 && pool
->sub_section
== now_subseg
)
2745 static literal_pool
*
2746 find_or_make_literal_pool (void)
2748 /* Next literal pool ID number. */
2749 static unsigned int latest_pool_num
= 1;
2750 literal_pool
* pool
;
2752 pool
= find_literal_pool ();
2756 /* Create a new pool. */
2757 pool
= xmalloc (sizeof (* pool
));
2761 pool
->next_free_entry
= 0;
2762 pool
->section
= now_seg
;
2763 pool
->sub_section
= now_subseg
;
2764 pool
->next
= list_of_pools
;
2765 pool
->symbol
= NULL
;
2767 /* Add it to the list. */
2768 list_of_pools
= pool
;
2771 /* New pools, and emptied pools, will have a NULL symbol. */
2772 if (pool
->symbol
== NULL
)
2774 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2775 (valueT
) 0, &zero_address_frag
);
2776 pool
->id
= latest_pool_num
++;
2783 /* Add the literal in the global 'inst'
2784 structure to the relevent literal pool. */
2787 add_to_lit_pool (void)
2789 literal_pool
* pool
;
2792 pool
= find_or_make_literal_pool ();
2794 /* Check if this literal value is already in the pool. */
2795 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2797 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2798 && (inst
.reloc
.exp
.X_op
== O_constant
)
2799 && (pool
->literals
[entry
].X_add_number
2800 == inst
.reloc
.exp
.X_add_number
)
2801 && (pool
->literals
[entry
].X_unsigned
2802 == inst
.reloc
.exp
.X_unsigned
))
2805 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2806 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2807 && (pool
->literals
[entry
].X_add_number
2808 == inst
.reloc
.exp
.X_add_number
)
2809 && (pool
->literals
[entry
].X_add_symbol
2810 == inst
.reloc
.exp
.X_add_symbol
)
2811 && (pool
->literals
[entry
].X_op_symbol
2812 == inst
.reloc
.exp
.X_op_symbol
))
2816 /* Do we need to create a new entry? */
2817 if (entry
== pool
->next_free_entry
)
2819 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2821 inst
.error
= _("literal pool overflow");
2825 pool
->literals
[entry
] = inst
.reloc
.exp
;
2826 pool
->next_free_entry
+= 1;
2829 inst
.reloc
.exp
.X_op
= O_symbol
;
2830 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2831 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2836 /* Can't use symbol_new here, so have to create a symbol and then at
2837 a later date assign it a value. Thats what these functions do. */
2840 symbol_locate (symbolS
* symbolP
,
2841 const char * name
, /* It is copied, the caller can modify. */
2842 segT segment
, /* Segment identifier (SEG_<something>). */
2843 valueT valu
, /* Symbol value. */
2844 fragS
* frag
) /* Associated fragment. */
2846 unsigned int name_length
;
2847 char * preserved_copy_of_name
;
2849 name_length
= strlen (name
) + 1; /* +1 for \0. */
2850 obstack_grow (¬es
, name
, name_length
);
2851 preserved_copy_of_name
= obstack_finish (¬es
);
2853 #ifdef tc_canonicalize_symbol_name
2854 preserved_copy_of_name
=
2855 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2858 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2860 S_SET_SEGMENT (symbolP
, segment
);
2861 S_SET_VALUE (symbolP
, valu
);
2862 symbol_clear_list_pointers (symbolP
);
2864 symbol_set_frag (symbolP
, frag
);
2866 /* Link to end of symbol chain. */
2868 extern int symbol_table_frozen
;
2870 if (symbol_table_frozen
)
2874 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2876 obj_symbol_new_hook (symbolP
);
2878 #ifdef tc_symbol_new_hook
2879 tc_symbol_new_hook (symbolP
);
2883 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2884 #endif /* DEBUG_SYMS */
2889 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2892 literal_pool
* pool
;
2895 pool
= find_literal_pool ();
2897 || pool
->symbol
== NULL
2898 || pool
->next_free_entry
== 0)
2901 mapping_state (MAP_DATA
);
2903 /* Align pool as you have word accesses.
2904 Only make a frag if we have to. */
2906 frag_align (2, 0, 0);
2908 record_alignment (now_seg
, 2);
2910 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2912 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2913 (valueT
) frag_now_fix (), frag_now
);
2914 symbol_table_insert (pool
->symbol
);
2916 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2918 #if defined OBJ_COFF || defined OBJ_ELF
2919 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2922 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2923 /* First output the expression in the instruction to the pool. */
2924 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2926 /* Mark the pool as empty. */
2927 pool
->next_free_entry
= 0;
2928 pool
->symbol
= NULL
;
2932 /* Forward declarations for functions below, in the MD interface
2934 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2935 static valueT
create_unwind_entry (int);
2936 static void start_unwind_section (const segT
, int);
2937 static void add_unwind_opcode (valueT
, int);
2938 static void flush_pending_unwind (void);
2940 /* Directives: Data. */
2943 s_arm_elf_cons (int nbytes
)
2947 #ifdef md_flush_pending_output
2948 md_flush_pending_output ();
2951 if (is_it_end_of_statement ())
2953 demand_empty_rest_of_line ();
2957 #ifdef md_cons_align
2958 md_cons_align (nbytes
);
2961 mapping_state (MAP_DATA
);
2965 char *base
= input_line_pointer
;
2969 if (exp
.X_op
!= O_symbol
)
2970 emit_expr (&exp
, (unsigned int) nbytes
);
2973 char *before_reloc
= input_line_pointer
;
2974 reloc
= parse_reloc (&input_line_pointer
);
2977 as_bad (_("unrecognized relocation suffix"));
2978 ignore_rest_of_line ();
2981 else if (reloc
== BFD_RELOC_UNUSED
)
2982 emit_expr (&exp
, (unsigned int) nbytes
);
2985 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2986 int size
= bfd_get_reloc_size (howto
);
2988 if (reloc
== BFD_RELOC_ARM_PLT32
)
2990 as_bad (_("(plt) is only valid on branch targets"));
2991 reloc
= BFD_RELOC_UNUSED
;
2996 as_bad (_("%s relocations do not fit in %d bytes"),
2997 howto
->name
, nbytes
);
3000 /* We've parsed an expression stopping at O_symbol.
3001 But there may be more expression left now that we
3002 have parsed the relocation marker. Parse it again.
3003 XXX Surely there is a cleaner way to do this. */
3004 char *p
= input_line_pointer
;
3006 char *save_buf
= alloca (input_line_pointer
- base
);
3007 memcpy (save_buf
, base
, input_line_pointer
- base
);
3008 memmove (base
+ (input_line_pointer
- before_reloc
),
3009 base
, before_reloc
- base
);
3011 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3013 memcpy (base
, save_buf
, p
- base
);
3015 offset
= nbytes
- size
;
3016 p
= frag_more ((int) nbytes
);
3017 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3018 size
, &exp
, 0, reloc
);
3023 while (*input_line_pointer
++ == ',');
3025 /* Put terminator back into stream. */
3026 input_line_pointer
--;
3027 demand_empty_rest_of_line ();
3031 /* Parse a .rel31 directive. */
3034 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3041 if (*input_line_pointer
== '1')
3042 highbit
= 0x80000000;
3043 else if (*input_line_pointer
!= '0')
3044 as_bad (_("expected 0 or 1"));
3046 input_line_pointer
++;
3047 if (*input_line_pointer
!= ',')
3048 as_bad (_("missing comma"));
3049 input_line_pointer
++;
3051 #ifdef md_flush_pending_output
3052 md_flush_pending_output ();
3055 #ifdef md_cons_align
3059 mapping_state (MAP_DATA
);
3064 md_number_to_chars (p
, highbit
, 4);
3065 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3066 BFD_RELOC_ARM_PREL31
);
3068 demand_empty_rest_of_line ();
3071 /* Directives: AEABI stack-unwind tables. */
3073 /* Parse an unwind_fnstart directive. Simply records the current location. */
3076 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3078 demand_empty_rest_of_line ();
3079 /* Mark the start of the function. */
3080 unwind
.proc_start
= expr_build_dot ();
3082 /* Reset the rest of the unwind info. */
3083 unwind
.opcode_count
= 0;
3084 unwind
.table_entry
= NULL
;
3085 unwind
.personality_routine
= NULL
;
3086 unwind
.personality_index
= -1;
3087 unwind
.frame_size
= 0;
3088 unwind
.fp_offset
= 0;
3091 unwind
.sp_restored
= 0;
3095 /* Parse a handlerdata directive. Creates the exception handling table entry
3096 for the function. */
3099 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3101 demand_empty_rest_of_line ();
3102 if (unwind
.table_entry
)
3103 as_bad (_("duplicate .handlerdata directive"));
3105 create_unwind_entry (1);
3108 /* Parse an unwind_fnend directive. Generates the index table entry. */
3111 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3117 demand_empty_rest_of_line ();
3119 /* Add eh table entry. */
3120 if (unwind
.table_entry
== NULL
)
3121 val
= create_unwind_entry (0);
3125 /* Add index table entry. This is two words. */
3126 start_unwind_section (unwind
.saved_seg
, 1);
3127 frag_align (2, 0, 0);
3128 record_alignment (now_seg
, 2);
3130 ptr
= frag_more (8);
3131 where
= frag_now_fix () - 8;
3133 /* Self relative offset of the function start. */
3134 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3135 BFD_RELOC_ARM_PREL31
);
3137 /* Indicate dependency on EHABI-defined personality routines to the
3138 linker, if it hasn't been done already. */
3139 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3140 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3142 static const char *const name
[] = {
3143 "__aeabi_unwind_cpp_pr0",
3144 "__aeabi_unwind_cpp_pr1",
3145 "__aeabi_unwind_cpp_pr2"
3147 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3148 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3149 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3150 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3151 = marked_pr_dependency
;
3155 /* Inline exception table entry. */
3156 md_number_to_chars (ptr
+ 4, val
, 4);
3158 /* Self relative offset of the table entry. */
3159 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3160 BFD_RELOC_ARM_PREL31
);
3162 /* Restore the original section. */
3163 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3167 /* Parse an unwind_cantunwind directive. */
3170 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3172 demand_empty_rest_of_line ();
3173 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3174 as_bad (_("personality routine specified for cantunwind frame"));
3176 unwind
.personality_index
= -2;
3180 /* Parse a personalityindex directive. */
3183 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3187 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3188 as_bad (_("duplicate .personalityindex directive"));
3192 if (exp
.X_op
!= O_constant
3193 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3195 as_bad (_("bad personality routine number"));
3196 ignore_rest_of_line ();
3200 unwind
.personality_index
= exp
.X_add_number
;
3202 demand_empty_rest_of_line ();
3206 /* Parse a personality directive. */
3209 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3213 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3214 as_bad (_("duplicate .personality directive"));
3216 name
= input_line_pointer
;
3217 c
= get_symbol_end ();
3218 p
= input_line_pointer
;
3219 unwind
.personality_routine
= symbol_find_or_make (name
);
3221 demand_empty_rest_of_line ();
3225 /* Parse a directive saving core registers. */
3228 s_arm_unwind_save_core (void)
3234 range
= parse_reg_list (&input_line_pointer
);
3237 as_bad (_("expected register list"));
3238 ignore_rest_of_line ();
3242 demand_empty_rest_of_line ();
3244 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3245 into .unwind_save {..., sp...}. We aren't bothered about the value of
3246 ip because it is clobbered by calls. */
3247 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3248 && (range
& 0x3000) == 0x1000)
3250 unwind
.opcode_count
--;
3251 unwind
.sp_restored
= 0;
3252 range
= (range
| 0x2000) & ~0x1000;
3253 unwind
.pending_offset
= 0;
3259 /* See if we can use the short opcodes. These pop a block of up to 8
3260 registers starting with r4, plus maybe r14. */
3261 for (n
= 0; n
< 8; n
++)
3263 /* Break at the first non-saved register. */
3264 if ((range
& (1 << (n
+ 4))) == 0)
3267 /* See if there are any other bits set. */
3268 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3270 /* Use the long form. */
3271 op
= 0x8000 | ((range
>> 4) & 0xfff);
3272 add_unwind_opcode (op
, 2);
3276 /* Use the short form. */
3278 op
= 0xa8; /* Pop r14. */
3280 op
= 0xa0; /* Do not pop r14. */
3282 add_unwind_opcode (op
, 1);
3289 op
= 0xb100 | (range
& 0xf);
3290 add_unwind_opcode (op
, 2);
3293 /* Record the number of bytes pushed. */
3294 for (n
= 0; n
< 16; n
++)
3296 if (range
& (1 << n
))
3297 unwind
.frame_size
+= 4;
3302 /* Parse a directive saving FPA registers. */
3305 s_arm_unwind_save_fpa (int reg
)
3311 /* Get Number of registers to transfer. */
3312 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3315 exp
.X_op
= O_illegal
;
3317 if (exp
.X_op
!= O_constant
)
3319 as_bad (_("expected , <constant>"));
3320 ignore_rest_of_line ();
3324 num_regs
= exp
.X_add_number
;
3326 if (num_regs
< 1 || num_regs
> 4)
3328 as_bad (_("number of registers must be in the range [1:4]"));
3329 ignore_rest_of_line ();
3333 demand_empty_rest_of_line ();
3338 op
= 0xb4 | (num_regs
- 1);
3339 add_unwind_opcode (op
, 1);
3344 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3345 add_unwind_opcode (op
, 2);
3347 unwind
.frame_size
+= num_regs
* 12;
3351 /* Parse a directive saving VFP registers for ARMv6 and above. */
3354 s_arm_unwind_save_vfp_armv6 (void)
3359 int num_vfpv3_regs
= 0;
3360 int num_regs_below_16
;
3362 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3365 as_bad (_("expected register list"));
3366 ignore_rest_of_line ();
3370 demand_empty_rest_of_line ();
3372 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3373 than FSTMX/FLDMX-style ones). */
3375 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3377 num_vfpv3_regs
= count
;
3378 else if (start
+ count
> 16)
3379 num_vfpv3_regs
= start
+ count
- 16;
3381 if (num_vfpv3_regs
> 0)
3383 int start_offset
= start
> 16 ? start
- 16 : 0;
3384 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3385 add_unwind_opcode (op
, 2);
3388 /* Generate opcode for registers numbered in the range 0 .. 15. */
3389 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3390 assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3391 if (num_regs_below_16
> 0)
3393 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3394 add_unwind_opcode (op
, 2);
3397 unwind
.frame_size
+= count
* 8;
3401 /* Parse a directive saving VFP registers for pre-ARMv6. */
3404 s_arm_unwind_save_vfp (void)
3410 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3413 as_bad (_("expected register list"));
3414 ignore_rest_of_line ();
3418 demand_empty_rest_of_line ();
3423 op
= 0xb8 | (count
- 1);
3424 add_unwind_opcode (op
, 1);
3429 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3430 add_unwind_opcode (op
, 2);
3432 unwind
.frame_size
+= count
* 8 + 4;
3436 /* Parse a directive saving iWMMXt data registers. */
3439 s_arm_unwind_save_mmxwr (void)
3447 if (*input_line_pointer
== '{')
3448 input_line_pointer
++;
3452 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3456 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3461 as_tsktsk (_("register list not in ascending order"));
3464 if (*input_line_pointer
== '-')
3466 input_line_pointer
++;
3467 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3470 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3473 else if (reg
>= hi_reg
)
3475 as_bad (_("bad register range"));
3478 for (; reg
< hi_reg
; reg
++)
3482 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3484 if (*input_line_pointer
== '}')
3485 input_line_pointer
++;
3487 demand_empty_rest_of_line ();
3489 /* Generate any deferred opcodes because we're going to be looking at
3491 flush_pending_unwind ();
3493 for (i
= 0; i
< 16; i
++)
3495 if (mask
& (1 << i
))
3496 unwind
.frame_size
+= 8;
3499 /* Attempt to combine with a previous opcode. We do this because gcc
3500 likes to output separate unwind directives for a single block of
3502 if (unwind
.opcode_count
> 0)
3504 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3505 if ((i
& 0xf8) == 0xc0)
3508 /* Only merge if the blocks are contiguous. */
3511 if ((mask
& 0xfe00) == (1 << 9))
3513 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3514 unwind
.opcode_count
--;
3517 else if (i
== 6 && unwind
.opcode_count
>= 2)
3519 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3523 op
= 0xffff << (reg
- 1);
3525 && ((mask
& op
) == (1u << (reg
- 1))))
3527 op
= (1 << (reg
+ i
+ 1)) - 1;
3528 op
&= ~((1 << reg
) - 1);
3530 unwind
.opcode_count
-= 2;
3537 /* We want to generate opcodes in the order the registers have been
3538 saved, ie. descending order. */
3539 for (reg
= 15; reg
>= -1; reg
--)
3541 /* Save registers in blocks. */
3543 || !(mask
& (1 << reg
)))
3545 /* We found an unsaved reg. Generate opcodes to save the
3546 preceeding block. */
3552 op
= 0xc0 | (hi_reg
- 10);
3553 add_unwind_opcode (op
, 1);
3558 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3559 add_unwind_opcode (op
, 2);
3568 ignore_rest_of_line ();
3572 s_arm_unwind_save_mmxwcg (void)
3579 if (*input_line_pointer
== '{')
3580 input_line_pointer
++;
3584 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3588 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3594 as_tsktsk (_("register list not in ascending order"));
3597 if (*input_line_pointer
== '-')
3599 input_line_pointer
++;
3600 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3603 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3606 else if (reg
>= hi_reg
)
3608 as_bad (_("bad register range"));
3611 for (; reg
< hi_reg
; reg
++)
3615 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3617 if (*input_line_pointer
== '}')
3618 input_line_pointer
++;
3620 demand_empty_rest_of_line ();
3622 /* Generate any deferred opcodes because we're going to be looking at
3624 flush_pending_unwind ();
3626 for (reg
= 0; reg
< 16; reg
++)
3628 if (mask
& (1 << reg
))
3629 unwind
.frame_size
+= 4;
3632 add_unwind_opcode (op
, 2);
3635 ignore_rest_of_line ();
3639 /* Parse an unwind_save directive.
3640 If the argument is non-zero, this is a .vsave directive. */
3643 s_arm_unwind_save (int arch_v6
)
3646 struct reg_entry
*reg
;
3647 bfd_boolean had_brace
= FALSE
;
3649 /* Figure out what sort of save we have. */
3650 peek
= input_line_pointer
;
3658 reg
= arm_reg_parse_multi (&peek
);
3662 as_bad (_("register expected"));
3663 ignore_rest_of_line ();
3672 as_bad (_("FPA .unwind_save does not take a register list"));
3673 ignore_rest_of_line ();
3676 s_arm_unwind_save_fpa (reg
->number
);
3679 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3682 s_arm_unwind_save_vfp_armv6 ();
3684 s_arm_unwind_save_vfp ();
3686 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3687 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3690 as_bad (_(".unwind_save does not support this kind of register"));
3691 ignore_rest_of_line ();
3696 /* Parse an unwind_movsp directive. */
3699 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3705 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3708 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3709 ignore_rest_of_line ();
3713 /* Optional constant. */
3714 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3716 if (immediate_for_directive (&offset
) == FAIL
)
3722 demand_empty_rest_of_line ();
3724 if (reg
== REG_SP
|| reg
== REG_PC
)
3726 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3730 if (unwind
.fp_reg
!= REG_SP
)
3731 as_bad (_("unexpected .unwind_movsp directive"));
3733 /* Generate opcode to restore the value. */
3735 add_unwind_opcode (op
, 1);
3737 /* Record the information for later. */
3738 unwind
.fp_reg
= reg
;
3739 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3740 unwind
.sp_restored
= 1;
3743 /* Parse an unwind_pad directive. */
3746 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3750 if (immediate_for_directive (&offset
) == FAIL
)
3755 as_bad (_("stack increment must be multiple of 4"));
3756 ignore_rest_of_line ();
3760 /* Don't generate any opcodes, just record the details for later. */
3761 unwind
.frame_size
+= offset
;
3762 unwind
.pending_offset
+= offset
;
3764 demand_empty_rest_of_line ();
3767 /* Parse an unwind_setfp directive. */
3770 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3776 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3777 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3780 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3782 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3784 as_bad (_("expected <reg>, <reg>"));
3785 ignore_rest_of_line ();
3789 /* Optional constant. */
3790 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3792 if (immediate_for_directive (&offset
) == FAIL
)
3798 demand_empty_rest_of_line ();
3800 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3802 as_bad (_("register must be either sp or set by a previous"
3803 "unwind_movsp directive"));
3807 /* Don't generate any opcodes, just record the information for later. */
3808 unwind
.fp_reg
= fp_reg
;
3811 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3813 unwind
.fp_offset
-= offset
;
3816 /* Parse an unwind_raw directive. */
3819 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3822 /* This is an arbitrary limit. */
3823 unsigned char op
[16];
3827 if (exp
.X_op
== O_constant
3828 && skip_past_comma (&input_line_pointer
) != FAIL
)
3830 unwind
.frame_size
+= exp
.X_add_number
;
3834 exp
.X_op
= O_illegal
;
3836 if (exp
.X_op
!= O_constant
)
3838 as_bad (_("expected <offset>, <opcode>"));
3839 ignore_rest_of_line ();
3845 /* Parse the opcode. */
3850 as_bad (_("unwind opcode too long"));
3851 ignore_rest_of_line ();
3853 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3855 as_bad (_("invalid unwind opcode"));
3856 ignore_rest_of_line ();
3859 op
[count
++] = exp
.X_add_number
;
3861 /* Parse the next byte. */
3862 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3868 /* Add the opcode bytes in reverse order. */
3870 add_unwind_opcode (op
[count
], 1);
3872 demand_empty_rest_of_line ();
3876 /* Parse a .eabi_attribute directive. */
3879 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3881 s_vendor_attribute (OBJ_ATTR_PROC
);
3883 #endif /* OBJ_ELF */
3885 static void s_arm_arch (int);
3886 static void s_arm_object_arch (int);
3887 static void s_arm_cpu (int);
3888 static void s_arm_fpu (int);
3893 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
3900 if (exp
.X_op
== O_symbol
)
3901 exp
.X_op
= O_secrel
;
3903 emit_expr (&exp
, 4);
3905 while (*input_line_pointer
++ == ',');
3907 input_line_pointer
--;
3908 demand_empty_rest_of_line ();
3912 /* This table describes all the machine specific pseudo-ops the assembler
3913 has to support. The fields are:
3914 pseudo-op name without dot
3915 function to call to execute this pseudo-op
3916 Integer arg to pass to the function. */
3918 const pseudo_typeS md_pseudo_table
[] =
3920 /* Never called because '.req' does not start a line. */
3921 { "req", s_req
, 0 },
3922 /* Following two are likewise never called. */
3925 { "unreq", s_unreq
, 0 },
3926 { "bss", s_bss
, 0 },
3927 { "align", s_align
, 0 },
3928 { "arm", s_arm
, 0 },
3929 { "thumb", s_thumb
, 0 },
3930 { "code", s_code
, 0 },
3931 { "force_thumb", s_force_thumb
, 0 },
3932 { "thumb_func", s_thumb_func
, 0 },
3933 { "thumb_set", s_thumb_set
, 0 },
3934 { "even", s_even
, 0 },
3935 { "ltorg", s_ltorg
, 0 },
3936 { "pool", s_ltorg
, 0 },
3937 { "syntax", s_syntax
, 0 },
3938 { "cpu", s_arm_cpu
, 0 },
3939 { "arch", s_arm_arch
, 0 },
3940 { "object_arch", s_arm_object_arch
, 0 },
3941 { "fpu", s_arm_fpu
, 0 },
3943 { "word", s_arm_elf_cons
, 4 },
3944 { "long", s_arm_elf_cons
, 4 },
3945 { "rel31", s_arm_rel31
, 0 },
3946 { "fnstart", s_arm_unwind_fnstart
, 0 },
3947 { "fnend", s_arm_unwind_fnend
, 0 },
3948 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3949 { "personality", s_arm_unwind_personality
, 0 },
3950 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3951 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3952 { "save", s_arm_unwind_save
, 0 },
3953 { "vsave", s_arm_unwind_save
, 1 },
3954 { "movsp", s_arm_unwind_movsp
, 0 },
3955 { "pad", s_arm_unwind_pad
, 0 },
3956 { "setfp", s_arm_unwind_setfp
, 0 },
3957 { "unwind_raw", s_arm_unwind_raw
, 0 },
3958 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3962 /* These are used for dwarf. */
3966 /* These are used for dwarf2. */
3967 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
3968 { "loc", dwarf2_directive_loc
, 0 },
3969 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
3971 { "extend", float_cons
, 'x' },
3972 { "ldouble", float_cons
, 'x' },
3973 { "packed", float_cons
, 'p' },
3975 {"secrel32", pe_directive_secrel
, 0},
3980 /* Parser functions used exclusively in instruction operands. */
3982 /* Generic immediate-value read function for use in insn parsing.
3983 STR points to the beginning of the immediate (the leading #);
3984 VAL receives the value; if the value is outside [MIN, MAX]
3985 issue an error. PREFIX_OPT is true if the immediate prefix is
3989 parse_immediate (char **str
, int *val
, int min
, int max
,
3990 bfd_boolean prefix_opt
)
3993 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3994 if (exp
.X_op
!= O_constant
)
3996 inst
.error
= _("constant expression required");
4000 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4002 inst
.error
= _("immediate value out of range");
4006 *val
= exp
.X_add_number
;
4010 /* Less-generic immediate-value read function with the possibility of loading a
4011 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4012 instructions. Puts the result directly in inst.operands[i]. */
4015 parse_big_immediate (char **str
, int i
)
4020 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4022 if (exp
.X_op
== O_constant
)
4024 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4025 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4026 O_constant. We have to be careful not to break compilation for
4027 32-bit X_add_number, though. */
4028 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4030 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4031 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4032 inst
.operands
[i
].regisimm
= 1;
4035 else if (exp
.X_op
== O_big
4036 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4037 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4039 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4040 /* Bignums have their least significant bits in
4041 generic_bignum[0]. Make sure we put 32 bits in imm and
4042 32 bits in reg, in a (hopefully) portable way. */
4043 assert (parts
!= 0);
4044 inst
.operands
[i
].imm
= 0;
4045 for (j
= 0; j
< parts
; j
++, idx
++)
4046 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4047 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4048 inst
.operands
[i
].reg
= 0;
4049 for (j
= 0; j
< parts
; j
++, idx
++)
4050 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4051 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4052 inst
.operands
[i
].regisimm
= 1;
4062 /* Returns the pseudo-register number of an FPA immediate constant,
4063 or FAIL if there isn't a valid constant here. */
4066 parse_fpa_immediate (char ** str
)
4068 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4074 /* First try and match exact strings, this is to guarantee
4075 that some formats will work even for cross assembly. */
4077 for (i
= 0; fp_const
[i
]; i
++)
4079 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4083 *str
+= strlen (fp_const
[i
]);
4084 if (is_end_of_line
[(unsigned char) **str
])
4090 /* Just because we didn't get a match doesn't mean that the constant
4091 isn't valid, just that it is in a format that we don't
4092 automatically recognize. Try parsing it with the standard
4093 expression routines. */
4095 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4097 /* Look for a raw floating point number. */
4098 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4099 && is_end_of_line
[(unsigned char) *save_in
])
4101 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4103 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4105 if (words
[j
] != fp_values
[i
][j
])
4109 if (j
== MAX_LITTLENUMS
)
4117 /* Try and parse a more complex expression, this will probably fail
4118 unless the code uses a floating point prefix (eg "0f"). */
4119 save_in
= input_line_pointer
;
4120 input_line_pointer
= *str
;
4121 if (expression (&exp
) == absolute_section
4122 && exp
.X_op
== O_big
4123 && exp
.X_add_number
< 0)
4125 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4127 if (gen_to_words (words
, 5, (long) 15) == 0)
4129 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4131 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4133 if (words
[j
] != fp_values
[i
][j
])
4137 if (j
== MAX_LITTLENUMS
)
4139 *str
= input_line_pointer
;
4140 input_line_pointer
= save_in
;
4147 *str
= input_line_pointer
;
4148 input_line_pointer
= save_in
;
4149 inst
.error
= _("invalid FPA immediate expression");
4153 /* Returns 1 if a number has "quarter-precision" float format
4154 0baBbbbbbc defgh000 00000000 00000000. */
4157 is_quarter_float (unsigned imm
)
4159 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4160 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4163 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4164 0baBbbbbbc defgh000 00000000 00000000.
4165 The zero and minus-zero cases need special handling, since they can't be
4166 encoded in the "quarter-precision" float format, but can nonetheless be
4167 loaded as integer constants. */
4170 parse_qfloat_immediate (char **ccp
, int *immed
)
4174 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4175 int found_fpchar
= 0;
4177 skip_past_char (&str
, '#');
4179 /* We must not accidentally parse an integer as a floating-point number. Make
4180 sure that the value we parse is not an integer by checking for special
4181 characters '.' or 'e'.
4182 FIXME: This is a horrible hack, but doing better is tricky because type
4183 information isn't in a very usable state at parse time. */
4185 skip_whitespace (fpnum
);
4187 if (strncmp (fpnum
, "0x", 2) == 0)
4191 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4192 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4202 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4204 unsigned fpword
= 0;
4207 /* Our FP word must be 32 bits (single-precision FP). */
4208 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4210 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4214 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4227 /* Shift operands. */
4230 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4233 struct asm_shift_name
4236 enum shift_kind kind
;
4239 /* Third argument to parse_shift. */
4240 enum parse_shift_mode
4242 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4243 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4244 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4245 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4246 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4249 /* Parse a <shift> specifier on an ARM data processing instruction.
4250 This has three forms:
4252 (LSL|LSR|ASL|ASR|ROR) Rs
4253 (LSL|LSR|ASL|ASR|ROR) #imm
4256 Note that ASL is assimilated to LSL in the instruction encoding, and
4257 RRX to ROR #0 (which cannot be written as such). */
4260 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4262 const struct asm_shift_name
*shift_name
;
4263 enum shift_kind shift
;
4268 for (p
= *str
; ISALPHA (*p
); p
++)
4273 inst
.error
= _("shift expression expected");
4277 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4279 if (shift_name
== NULL
)
4281 inst
.error
= _("shift expression expected");
4285 shift
= shift_name
->kind
;
4289 case NO_SHIFT_RESTRICT
:
4290 case SHIFT_IMMEDIATE
: break;
4292 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4293 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4295 inst
.error
= _("'LSL' or 'ASR' required");
4300 case SHIFT_LSL_IMMEDIATE
:
4301 if (shift
!= SHIFT_LSL
)
4303 inst
.error
= _("'LSL' required");
4308 case SHIFT_ASR_IMMEDIATE
:
4309 if (shift
!= SHIFT_ASR
)
4311 inst
.error
= _("'ASR' required");
4319 if (shift
!= SHIFT_RRX
)
4321 /* Whitespace can appear here if the next thing is a bare digit. */
4322 skip_whitespace (p
);
4324 if (mode
== NO_SHIFT_RESTRICT
4325 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4327 inst
.operands
[i
].imm
= reg
;
4328 inst
.operands
[i
].immisreg
= 1;
4330 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4333 inst
.operands
[i
].shift_kind
= shift
;
4334 inst
.operands
[i
].shifted
= 1;
4339 /* Parse a <shifter_operand> for an ARM data processing instruction:
4342 #<immediate>, <rotate>
4346 where <shift> is defined by parse_shift above, and <rotate> is a
4347 multiple of 2 between 0 and 30. Validation of immediate operands
4348 is deferred to md_apply_fix. */
4351 parse_shifter_operand (char **str
, int i
)
4356 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4358 inst
.operands
[i
].reg
= value
;
4359 inst
.operands
[i
].isreg
= 1;
4361 /* parse_shift will override this if appropriate */
4362 inst
.reloc
.exp
.X_op
= O_constant
;
4363 inst
.reloc
.exp
.X_add_number
= 0;
4365 if (skip_past_comma (str
) == FAIL
)
4368 /* Shift operation on register. */
4369 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4372 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4375 if (skip_past_comma (str
) == SUCCESS
)
4377 /* #x, y -- ie explicit rotation by Y. */
4378 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4381 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4383 inst
.error
= _("constant expression expected");
4387 value
= expr
.X_add_number
;
4388 if (value
< 0 || value
> 30 || value
% 2 != 0)
4390 inst
.error
= _("invalid rotation");
4393 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4395 inst
.error
= _("invalid constant");
4399 /* Convert to decoded value. md_apply_fix will put it back. */
4400 inst
.reloc
.exp
.X_add_number
4401 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4402 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4405 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4406 inst
.reloc
.pc_rel
= 0;
4410 /* Group relocation information. Each entry in the table contains the
4411 textual name of the relocation as may appear in assembler source
4412 and must end with a colon.
4413 Along with this textual name are the relocation codes to be used if
4414 the corresponding instruction is an ALU instruction (ADD or SUB only),
4415 an LDR, an LDRS, or an LDC. */
4417 struct group_reloc_table_entry
4428 /* Varieties of non-ALU group relocation. */
4435 static struct group_reloc_table_entry group_reloc_table
[] =
4436 { /* Program counter relative: */
4438 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4443 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4444 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4445 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4446 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4448 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4453 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4454 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4455 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4456 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4458 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4459 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4460 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4461 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4462 /* Section base relative */
4464 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4469 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4470 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4471 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4472 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4474 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4479 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4480 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4481 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4482 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4484 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4485 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4486 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4487 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4489 /* Given the address of a pointer pointing to the textual name of a group
4490 relocation as may appear in assembler source, attempt to find its details
4491 in group_reloc_table. The pointer will be updated to the character after
4492 the trailing colon. On failure, FAIL will be returned; SUCCESS
4493 otherwise. On success, *entry will be updated to point at the relevant
4494 group_reloc_table entry. */
4497 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4500 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4502 int length
= strlen (group_reloc_table
[i
].name
);
4504 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0 &&
4505 (*str
)[length
] == ':')
4507 *out
= &group_reloc_table
[i
];
4508 *str
+= (length
+ 1);
4516 /* Parse a <shifter_operand> for an ARM data processing instruction
4517 (as for parse_shifter_operand) where group relocations are allowed:
4520 #<immediate>, <rotate>
4521 #:<group_reloc>:<expression>
4525 where <group_reloc> is one of the strings defined in group_reloc_table.
4526 The hashes are optional.
4528 Everything else is as for parse_shifter_operand. */
4530 static parse_operand_result
4531 parse_shifter_operand_group_reloc (char **str
, int i
)
4533 /* Determine if we have the sequence of characters #: or just :
4534 coming next. If we do, then we check for a group relocation.
4535 If we don't, punt the whole lot to parse_shifter_operand. */
4537 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4538 || (*str
)[0] == ':')
4540 struct group_reloc_table_entry
*entry
;
4542 if ((*str
)[0] == '#')
4547 /* Try to parse a group relocation. Anything else is an error. */
4548 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4550 inst
.error
= _("unknown group relocation");
4551 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4554 /* We now have the group relocation table entry corresponding to
4555 the name in the assembler source. Next, we parse the expression. */
4556 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4557 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4559 /* Record the relocation type (always the ALU variant here). */
4560 inst
.reloc
.type
= entry
->alu_code
;
4561 assert (inst
.reloc
.type
!= 0);
4563 return PARSE_OPERAND_SUCCESS
;
4566 return parse_shifter_operand (str
, i
) == SUCCESS
4567 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4569 /* Never reached. */
4572 /* Parse all forms of an ARM address expression. Information is written
4573 to inst.operands[i] and/or inst.reloc.
4575 Preindexed addressing (.preind=1):
4577 [Rn, #offset] .reg=Rn .reloc.exp=offset
4578 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4579 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4580 .shift_kind=shift .reloc.exp=shift_imm
4582 These three may have a trailing ! which causes .writeback to be set also.
4584 Postindexed addressing (.postind=1, .writeback=1):
4586 [Rn], #offset .reg=Rn .reloc.exp=offset
4587 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4588 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4589 .shift_kind=shift .reloc.exp=shift_imm
4591 Unindexed addressing (.preind=0, .postind=0):
4593 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4597 [Rn]{!} shorthand for [Rn,#0]{!}
4598 =immediate .isreg=0 .reloc.exp=immediate
4599 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4601 It is the caller's responsibility to check for addressing modes not
4602 supported by the instruction, and to set inst.reloc.type. */
4604 static parse_operand_result
4605 parse_address_main (char **str
, int i
, int group_relocations
,
4606 group_reloc_type group_type
)
4611 if (skip_past_char (&p
, '[') == FAIL
)
4613 if (skip_past_char (&p
, '=') == FAIL
)
4615 /* bare address - translate to PC-relative offset */
4616 inst
.reloc
.pc_rel
= 1;
4617 inst
.operands
[i
].reg
= REG_PC
;
4618 inst
.operands
[i
].isreg
= 1;
4619 inst
.operands
[i
].preind
= 1;
4621 /* else a load-constant pseudo op, no special treatment needed here */
4623 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4624 return PARSE_OPERAND_FAIL
;
4627 return PARSE_OPERAND_SUCCESS
;
4630 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4632 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4633 return PARSE_OPERAND_FAIL
;
4635 inst
.operands
[i
].reg
= reg
;
4636 inst
.operands
[i
].isreg
= 1;
4638 if (skip_past_comma (&p
) == SUCCESS
)
4640 inst
.operands
[i
].preind
= 1;
4643 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4645 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4647 inst
.operands
[i
].imm
= reg
;
4648 inst
.operands
[i
].immisreg
= 1;
4650 if (skip_past_comma (&p
) == SUCCESS
)
4651 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4652 return PARSE_OPERAND_FAIL
;
4654 else if (skip_past_char (&p
, ':') == SUCCESS
)
4656 /* FIXME: '@' should be used here, but it's filtered out by generic
4657 code before we get to see it here. This may be subject to
4660 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4661 if (exp
.X_op
!= O_constant
)
4663 inst
.error
= _("alignment must be constant");
4664 return PARSE_OPERAND_FAIL
;
4666 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4667 inst
.operands
[i
].immisalign
= 1;
4668 /* Alignments are not pre-indexes. */
4669 inst
.operands
[i
].preind
= 0;
4673 if (inst
.operands
[i
].negative
)
4675 inst
.operands
[i
].negative
= 0;
4679 if (group_relocations
&&
4680 ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
4683 struct group_reloc_table_entry
*entry
;
4685 /* Skip over the #: or : sequence. */
4691 /* Try to parse a group relocation. Anything else is an
4693 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
4695 inst
.error
= _("unknown group relocation");
4696 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4699 /* We now have the group relocation table entry corresponding to
4700 the name in the assembler source. Next, we parse the
4702 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4703 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4705 /* Record the relocation type. */
4709 inst
.reloc
.type
= entry
->ldr_code
;
4713 inst
.reloc
.type
= entry
->ldrs_code
;
4717 inst
.reloc
.type
= entry
->ldc_code
;
4724 if (inst
.reloc
.type
== 0)
4726 inst
.error
= _("this group relocation is not allowed on this instruction");
4727 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4731 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4732 return PARSE_OPERAND_FAIL
;
4736 if (skip_past_char (&p
, ']') == FAIL
)
4738 inst
.error
= _("']' expected");
4739 return PARSE_OPERAND_FAIL
;
4742 if (skip_past_char (&p
, '!') == SUCCESS
)
4743 inst
.operands
[i
].writeback
= 1;
4745 else if (skip_past_comma (&p
) == SUCCESS
)
4747 if (skip_past_char (&p
, '{') == SUCCESS
)
4749 /* [Rn], {expr} - unindexed, with option */
4750 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4751 0, 255, TRUE
) == FAIL
)
4752 return PARSE_OPERAND_FAIL
;
4754 if (skip_past_char (&p
, '}') == FAIL
)
4756 inst
.error
= _("'}' expected at end of 'option' field");
4757 return PARSE_OPERAND_FAIL
;
4759 if (inst
.operands
[i
].preind
)
4761 inst
.error
= _("cannot combine index with option");
4762 return PARSE_OPERAND_FAIL
;
4765 return PARSE_OPERAND_SUCCESS
;
4769 inst
.operands
[i
].postind
= 1;
4770 inst
.operands
[i
].writeback
= 1;
4772 if (inst
.operands
[i
].preind
)
4774 inst
.error
= _("cannot combine pre- and post-indexing");
4775 return PARSE_OPERAND_FAIL
;
4779 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4781 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4783 /* We might be using the immediate for alignment already. If we
4784 are, OR the register number into the low-order bits. */
4785 if (inst
.operands
[i
].immisalign
)
4786 inst
.operands
[i
].imm
|= reg
;
4788 inst
.operands
[i
].imm
= reg
;
4789 inst
.operands
[i
].immisreg
= 1;
4791 if (skip_past_comma (&p
) == SUCCESS
)
4792 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4793 return PARSE_OPERAND_FAIL
;
4797 if (inst
.operands
[i
].negative
)
4799 inst
.operands
[i
].negative
= 0;
4802 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4803 return PARSE_OPERAND_FAIL
;
4808 /* If at this point neither .preind nor .postind is set, we have a
4809 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4810 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4812 inst
.operands
[i
].preind
= 1;
4813 inst
.reloc
.exp
.X_op
= O_constant
;
4814 inst
.reloc
.exp
.X_add_number
= 0;
4817 return PARSE_OPERAND_SUCCESS
;
4821 parse_address (char **str
, int i
)
4823 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
4827 static parse_operand_result
4828 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
4830 return parse_address_main (str
, i
, 1, type
);
4833 /* Parse an operand for a MOVW or MOVT instruction. */
4835 parse_half (char **str
)
4840 skip_past_char (&p
, '#');
4841 if (strncasecmp (p
, ":lower16:", 9) == 0)
4842 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4843 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4844 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4846 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4852 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4855 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4857 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4859 inst
.error
= _("constant expression expected");
4862 if (inst
.reloc
.exp
.X_add_number
< 0
4863 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4865 inst
.error
= _("immediate value out of range");
4873 /* Miscellaneous. */
4875 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4876 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4878 parse_psr (char **str
)
4881 unsigned long psr_field
;
4882 const struct asm_psr
*psr
;
4885 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4886 feature for ease of use and backwards compatibility. */
4888 if (strncasecmp (p
, "SPSR", 4) == 0)
4889 psr_field
= SPSR_BIT
;
4890 else if (strncasecmp (p
, "CPSR", 4) == 0)
4897 while (ISALNUM (*p
) || *p
== '_');
4899 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4910 /* A suffix follows. */
4916 while (ISALNUM (*p
) || *p
== '_');
4918 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4922 psr_field
|= psr
->field
;
4927 goto error
; /* Garbage after "[CS]PSR". */
4929 psr_field
|= (PSR_c
| PSR_f
);
4935 inst
.error
= _("flag for {c}psr instruction expected");
4939 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4940 value suitable for splatting into the AIF field of the instruction. */
4943 parse_cps_flags (char **str
)
4952 case '\0': case ',':
4955 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4956 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4957 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4960 inst
.error
= _("unrecognized CPS flag");
4965 if (saw_a_flag
== 0)
4967 inst
.error
= _("missing CPS flags");
4975 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4976 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4979 parse_endian_specifier (char **str
)
4984 if (strncasecmp (s
, "BE", 2))
4986 else if (strncasecmp (s
, "LE", 2))
4990 inst
.error
= _("valid endian specifiers are be or le");
4994 if (ISALNUM (s
[2]) || s
[2] == '_')
4996 inst
.error
= _("valid endian specifiers are be or le");
5001 return little_endian
;
5004 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5005 value suitable for poking into the rotate field of an sxt or sxta
5006 instruction, or FAIL on error. */
5009 parse_ror (char **str
)
5014 if (strncasecmp (s
, "ROR", 3) == 0)
5018 inst
.error
= _("missing rotation field after comma");
5022 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5027 case 0: *str
= s
; return 0x0;
5028 case 8: *str
= s
; return 0x1;
5029 case 16: *str
= s
; return 0x2;
5030 case 24: *str
= s
; return 0x3;
5033 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5038 /* Parse a conditional code (from conds[] below). The value returned is in the
5039 range 0 .. 14, or FAIL. */
5041 parse_cond (char **str
)
5044 const struct asm_cond
*c
;
5047 while (ISALPHA (*q
))
5050 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
5053 inst
.error
= _("condition required");
5061 /* Parse an option for a barrier instruction. Returns the encoding for the
5064 parse_barrier (char **str
)
5067 const struct asm_barrier_opt
*o
;
5070 while (ISALPHA (*q
))
5073 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5081 /* Parse the operands of a table branch instruction. Similar to a memory
5084 parse_tb (char **str
)
5089 if (skip_past_char (&p
, '[') == FAIL
)
5091 inst
.error
= _("'[' expected");
5095 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5097 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5100 inst
.operands
[0].reg
= reg
;
5102 if (skip_past_comma (&p
) == FAIL
)
5104 inst
.error
= _("',' expected");
5108 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5110 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5113 inst
.operands
[0].imm
= reg
;
5115 if (skip_past_comma (&p
) == SUCCESS
)
5117 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5119 if (inst
.reloc
.exp
.X_add_number
!= 1)
5121 inst
.error
= _("invalid shift");
5124 inst
.operands
[0].shifted
= 1;
5127 if (skip_past_char (&p
, ']') == FAIL
)
5129 inst
.error
= _("']' expected");
5136 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5137 information on the types the operands can take and how they are encoded.
5138 Up to four operands may be read; this function handles setting the
5139 ".present" field for each read operand itself.
5140 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5141 else returns FAIL. */
5144 parse_neon_mov (char **str
, int *which_operand
)
5146 int i
= *which_operand
, val
;
5147 enum arm_reg_type rtype
;
5149 struct neon_type_el optype
;
5151 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5153 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5154 inst
.operands
[i
].reg
= val
;
5155 inst
.operands
[i
].isscalar
= 1;
5156 inst
.operands
[i
].vectype
= optype
;
5157 inst
.operands
[i
++].present
= 1;
5159 if (skip_past_comma (&ptr
) == FAIL
)
5162 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5165 inst
.operands
[i
].reg
= val
;
5166 inst
.operands
[i
].isreg
= 1;
5167 inst
.operands
[i
].present
= 1;
5169 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5172 /* Cases 0, 1, 2, 3, 5 (D only). */
5173 if (skip_past_comma (&ptr
) == FAIL
)
5176 inst
.operands
[i
].reg
= val
;
5177 inst
.operands
[i
].isreg
= 1;
5178 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5179 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5180 inst
.operands
[i
].isvec
= 1;
5181 inst
.operands
[i
].vectype
= optype
;
5182 inst
.operands
[i
++].present
= 1;
5184 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5186 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5187 Case 13: VMOV <Sd>, <Rm> */
5188 inst
.operands
[i
].reg
= val
;
5189 inst
.operands
[i
].isreg
= 1;
5190 inst
.operands
[i
].present
= 1;
5192 if (rtype
== REG_TYPE_NQ
)
5194 first_error (_("can't use Neon quad register here"));
5197 else if (rtype
!= REG_TYPE_VFS
)
5200 if (skip_past_comma (&ptr
) == FAIL
)
5202 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5204 inst
.operands
[i
].reg
= val
;
5205 inst
.operands
[i
].isreg
= 1;
5206 inst
.operands
[i
].present
= 1;
5209 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5210 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5211 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5212 Case 10: VMOV.F32 <Sd>, #<imm>
5213 Case 11: VMOV.F64 <Dd>, #<imm> */
5214 inst
.operands
[i
].immisfloat
= 1;
5215 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5216 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5217 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5219 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5222 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5223 Case 1: VMOV<c><q> <Dd>, <Dm>
5224 Case 8: VMOV.F32 <Sd>, <Sm>
5225 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5227 inst
.operands
[i
].reg
= val
;
5228 inst
.operands
[i
].isreg
= 1;
5229 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5230 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5231 inst
.operands
[i
].isvec
= 1;
5232 inst
.operands
[i
].vectype
= optype
;
5233 inst
.operands
[i
].present
= 1;
5235 if (skip_past_comma (&ptr
) == SUCCESS
)
5240 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5243 inst
.operands
[i
].reg
= val
;
5244 inst
.operands
[i
].isreg
= 1;
5245 inst
.operands
[i
++].present
= 1;
5247 if (skip_past_comma (&ptr
) == FAIL
)
5250 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5253 inst
.operands
[i
].reg
= val
;
5254 inst
.operands
[i
].isreg
= 1;
5255 inst
.operands
[i
++].present
= 1;
5260 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5264 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5267 inst
.operands
[i
].reg
= val
;
5268 inst
.operands
[i
].isreg
= 1;
5269 inst
.operands
[i
++].present
= 1;
5271 if (skip_past_comma (&ptr
) == FAIL
)
5274 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5276 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5277 inst
.operands
[i
].reg
= val
;
5278 inst
.operands
[i
].isscalar
= 1;
5279 inst
.operands
[i
].present
= 1;
5280 inst
.operands
[i
].vectype
= optype
;
5282 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5284 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5285 inst
.operands
[i
].reg
= val
;
5286 inst
.operands
[i
].isreg
= 1;
5287 inst
.operands
[i
++].present
= 1;
5289 if (skip_past_comma (&ptr
) == FAIL
)
5292 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5295 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5299 inst
.operands
[i
].reg
= val
;
5300 inst
.operands
[i
].isreg
= 1;
5301 inst
.operands
[i
].isvec
= 1;
5302 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5303 inst
.operands
[i
].vectype
= optype
;
5304 inst
.operands
[i
].present
= 1;
5306 if (rtype
== REG_TYPE_VFS
)
5310 if (skip_past_comma (&ptr
) == FAIL
)
5312 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5315 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5318 inst
.operands
[i
].reg
= val
;
5319 inst
.operands
[i
].isreg
= 1;
5320 inst
.operands
[i
].isvec
= 1;
5321 inst
.operands
[i
].issingle
= 1;
5322 inst
.operands
[i
].vectype
= optype
;
5323 inst
.operands
[i
].present
= 1;
5326 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5330 inst
.operands
[i
].reg
= val
;
5331 inst
.operands
[i
].isreg
= 1;
5332 inst
.operands
[i
].isvec
= 1;
5333 inst
.operands
[i
].issingle
= 1;
5334 inst
.operands
[i
].vectype
= optype
;
5335 inst
.operands
[i
++].present
= 1;
5340 first_error (_("parse error"));
5344 /* Successfully parsed the operands. Update args. */
5350 first_error (_("expected comma"));
5354 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5358 /* Matcher codes for parse_operands. */
5359 enum operand_parse_code
5361 OP_stop
, /* end of line */
5363 OP_RR
, /* ARM register */
5364 OP_RRnpc
, /* ARM register, not r15 */
5365 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5366 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5367 OP_RCP
, /* Coprocessor number */
5368 OP_RCN
, /* Coprocessor register */
5369 OP_RF
, /* FPA register */
5370 OP_RVS
, /* VFP single precision register */
5371 OP_RVD
, /* VFP double precision register (0..15) */
5372 OP_RND
, /* Neon double precision register (0..31) */
5373 OP_RNQ
, /* Neon quad precision register */
5374 OP_RVSD
, /* VFP single or double precision register */
5375 OP_RNDQ
, /* Neon double or quad precision register */
5376 OP_RNSDQ
, /* Neon single, double or quad precision register */
5377 OP_RNSC
, /* Neon scalar D[X] */
5378 OP_RVC
, /* VFP control register */
5379 OP_RMF
, /* Maverick F register */
5380 OP_RMD
, /* Maverick D register */
5381 OP_RMFX
, /* Maverick FX register */
5382 OP_RMDX
, /* Maverick DX register */
5383 OP_RMAX
, /* Maverick AX register */
5384 OP_RMDS
, /* Maverick DSPSC register */
5385 OP_RIWR
, /* iWMMXt wR register */
5386 OP_RIWC
, /* iWMMXt wC register */
5387 OP_RIWG
, /* iWMMXt wCG register */
5388 OP_RXA
, /* XScale accumulator register */
5390 OP_REGLST
, /* ARM register list */
5391 OP_VRSLST
, /* VFP single-precision register list */
5392 OP_VRDLST
, /* VFP double-precision register list */
5393 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5394 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5395 OP_NSTRLST
, /* Neon element/structure list */
5397 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5398 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5399 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5400 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5401 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5402 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5403 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5404 OP_VMOV
, /* Neon VMOV operands. */
5405 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5406 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5407 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5409 OP_I0
, /* immediate zero */
5410 OP_I7
, /* immediate value 0 .. 7 */
5411 OP_I15
, /* 0 .. 15 */
5412 OP_I16
, /* 1 .. 16 */
5413 OP_I16z
, /* 0 .. 16 */
5414 OP_I31
, /* 0 .. 31 */
5415 OP_I31w
, /* 0 .. 31, optional trailing ! */
5416 OP_I32
, /* 1 .. 32 */
5417 OP_I32z
, /* 0 .. 32 */
5418 OP_I63
, /* 0 .. 63 */
5419 OP_I63s
, /* -64 .. 63 */
5420 OP_I64
, /* 1 .. 64 */
5421 OP_I64z
, /* 0 .. 64 */
5422 OP_I255
, /* 0 .. 255 */
5424 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5425 OP_I7b
, /* 0 .. 7 */
5426 OP_I15b
, /* 0 .. 15 */
5427 OP_I31b
, /* 0 .. 31 */
5429 OP_SH
, /* shifter operand */
5430 OP_SHG
, /* shifter operand with possible group relocation */
5431 OP_ADDR
, /* Memory address expression (any mode) */
5432 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5433 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5434 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5435 OP_EXP
, /* arbitrary expression */
5436 OP_EXPi
, /* same, with optional immediate prefix */
5437 OP_EXPr
, /* same, with optional relocation suffix */
5438 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5440 OP_CPSF
, /* CPS flags */
5441 OP_ENDI
, /* Endianness specifier */
5442 OP_PSR
, /* CPSR/SPSR mask for msr */
5443 OP_COND
, /* conditional code */
5444 OP_TB
, /* Table branch. */
5446 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5447 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5449 OP_RRnpc_I0
, /* ARM register or literal 0 */
5450 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5451 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5452 OP_RF_IF
, /* FPA register or immediate */
5453 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5454 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5456 /* Optional operands. */
5457 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5458 OP_oI31b
, /* 0 .. 31 */
5459 OP_oI32b
, /* 1 .. 32 */
5460 OP_oIffffb
, /* 0 .. 65535 */
5461 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5463 OP_oRR
, /* ARM register */
5464 OP_oRRnpc
, /* ARM register, not the PC */
5465 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5466 OP_oRND
, /* Optional Neon double precision register */
5467 OP_oRNQ
, /* Optional Neon quad precision register */
5468 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5469 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5470 OP_oSHll
, /* LSL immediate */
5471 OP_oSHar
, /* ASR immediate */
5472 OP_oSHllar
, /* LSL or ASR immediate */
5473 OP_oROR
, /* ROR 0/8/16/24 */
5474 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5476 OP_FIRST_OPTIONAL
= OP_oI7b
5479 /* Generic instruction operand parser. This does no encoding and no
5480 semantic validation; it merely squirrels values away in the inst
5481 structure. Returns SUCCESS or FAIL depending on whether the
5482 specified grammar matched. */
5484 parse_operands (char *str
, const unsigned char *pattern
)
5486 unsigned const char *upat
= pattern
;
5487 char *backtrack_pos
= 0;
5488 const char *backtrack_error
= 0;
5489 int i
, val
, backtrack_index
= 0;
5490 enum arm_reg_type rtype
;
5491 parse_operand_result result
;
5493 #define po_char_or_fail(chr) do { \
5494 if (skip_past_char (&str, chr) == FAIL) \
5498 #define po_reg_or_fail(regtype) do { \
5499 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5500 &inst.operands[i].vectype); \
5503 first_error (_(reg_expected_msgs[regtype])); \
5506 inst.operands[i].reg = val; \
5507 inst.operands[i].isreg = 1; \
5508 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5509 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5510 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5511 || rtype == REG_TYPE_VFD \
5512 || rtype == REG_TYPE_NQ); \
5515 #define po_reg_or_goto(regtype, label) do { \
5516 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5517 &inst.operands[i].vectype); \
5521 inst.operands[i].reg = val; \
5522 inst.operands[i].isreg = 1; \
5523 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5524 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5525 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5526 || rtype == REG_TYPE_VFD \
5527 || rtype == REG_TYPE_NQ); \
5530 #define po_imm_or_fail(min, max, popt) do { \
5531 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5533 inst.operands[i].imm = val; \
5536 #define po_scalar_or_goto(elsz, label) do { \
5537 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5540 inst.operands[i].reg = val; \
5541 inst.operands[i].isscalar = 1; \
5544 #define po_misc_or_fail(expr) do { \
5549 #define po_misc_or_fail_no_backtrack(expr) do { \
5551 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5552 backtrack_pos = 0; \
5553 if (result != PARSE_OPERAND_SUCCESS) \
5557 skip_whitespace (str
);
5559 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5561 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5563 /* Remember where we are in case we need to backtrack. */
5564 assert (!backtrack_pos
);
5565 backtrack_pos
= str
;
5566 backtrack_error
= inst
.error
;
5567 backtrack_index
= i
;
5570 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5571 po_char_or_fail (',');
5579 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5580 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5581 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5582 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5583 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5584 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5586 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5588 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
5590 /* Also accept generic coprocessor regs for unknown registers. */
5592 po_reg_or_fail (REG_TYPE_CN
);
5594 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5595 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5596 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5597 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5598 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5599 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5600 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5601 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5602 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5603 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5605 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5607 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5608 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5610 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5612 /* Neon scalar. Using an element size of 8 means that some invalid
5613 scalars are accepted here, so deal with those in later code. */
5614 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5616 /* WARNING: We can expand to two operands here. This has the potential
5617 to totally confuse the backtracking mechanism! It will be OK at
5618 least as long as we don't try to use optional args as well,
5622 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5623 inst
.operands
[i
].present
= 1;
5625 skip_past_comma (&str
);
5626 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5629 /* Optional register operand was omitted. Unfortunately, it's in
5630 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5631 here (this is a bit grotty). */
5632 inst
.operands
[i
] = inst
.operands
[i
-1];
5633 inst
.operands
[i
-1].present
= 0;
5636 /* There's a possibility of getting a 64-bit immediate here, so
5637 we need special handling. */
5638 if (parse_big_immediate (&str
, i
) == FAIL
)
5640 inst
.error
= _("immediate value is out of range");
5648 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5651 po_imm_or_fail (0, 0, TRUE
);
5656 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5661 po_scalar_or_goto (8, try_rr
);
5664 po_reg_or_fail (REG_TYPE_RN
);
5670 po_scalar_or_goto (8, try_nsdq
);
5673 po_reg_or_fail (REG_TYPE_NSDQ
);
5679 po_scalar_or_goto (8, try_ndq
);
5682 po_reg_or_fail (REG_TYPE_NDQ
);
5688 po_scalar_or_goto (8, try_vfd
);
5691 po_reg_or_fail (REG_TYPE_VFD
);
5696 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5697 not careful then bad things might happen. */
5698 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5703 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5706 /* There's a possibility of getting a 64-bit immediate here, so
5707 we need special handling. */
5708 if (parse_big_immediate (&str
, i
) == FAIL
)
5710 inst
.error
= _("immediate value is out of range");
5718 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5721 po_imm_or_fail (0, 63, TRUE
);
5726 po_char_or_fail ('[');
5727 po_reg_or_fail (REG_TYPE_RN
);
5728 po_char_or_fail (']');
5733 po_reg_or_fail (REG_TYPE_RN
);
5734 if (skip_past_char (&str
, '!') == SUCCESS
)
5735 inst
.operands
[i
].writeback
= 1;
5739 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5740 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5741 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5742 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5743 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5744 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5745 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5746 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5747 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5748 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5749 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5750 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5752 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5754 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5755 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5757 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5758 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5759 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5761 /* Immediate variants */
5763 po_char_or_fail ('{');
5764 po_imm_or_fail (0, 255, TRUE
);
5765 po_char_or_fail ('}');
5769 /* The expression parser chokes on a trailing !, so we have
5770 to find it first and zap it. */
5773 while (*s
&& *s
!= ',')
5778 inst
.operands
[i
].writeback
= 1;
5780 po_imm_or_fail (0, 31, TRUE
);
5788 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5793 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5798 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5800 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5802 val
= parse_reloc (&str
);
5805 inst
.error
= _("unrecognized relocation suffix");
5808 else if (val
!= BFD_RELOC_UNUSED
)
5810 inst
.operands
[i
].imm
= val
;
5811 inst
.operands
[i
].hasreloc
= 1;
5816 /* Operand for MOVW or MOVT. */
5818 po_misc_or_fail (parse_half (&str
));
5821 /* Register or expression */
5822 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5823 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5825 /* Register or immediate */
5826 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5827 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5829 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5831 if (!is_immediate_prefix (*str
))
5834 val
= parse_fpa_immediate (&str
);
5837 /* FPA immediates are encoded as registers 8-15.
5838 parse_fpa_immediate has already applied the offset. */
5839 inst
.operands
[i
].reg
= val
;
5840 inst
.operands
[i
].isreg
= 1;
5843 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
5844 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
5846 /* Two kinds of register */
5849 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5851 || (rege
->type
!= REG_TYPE_MMXWR
5852 && rege
->type
!= REG_TYPE_MMXWC
5853 && rege
->type
!= REG_TYPE_MMXWCG
))
5855 inst
.error
= _("iWMMXt data or control register expected");
5858 inst
.operands
[i
].reg
= rege
->number
;
5859 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5865 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5867 || (rege
->type
!= REG_TYPE_MMXWC
5868 && rege
->type
!= REG_TYPE_MMXWCG
))
5870 inst
.error
= _("iWMMXt control register expected");
5873 inst
.operands
[i
].reg
= rege
->number
;
5874 inst
.operands
[i
].isreg
= 1;
5879 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5880 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5881 case OP_oROR
: val
= parse_ror (&str
); break;
5882 case OP_PSR
: val
= parse_psr (&str
); break;
5883 case OP_COND
: val
= parse_cond (&str
); break;
5884 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5887 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5888 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5891 val
= parse_psr (&str
);
5895 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5898 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5900 if (strncasecmp (str
, "APSR_", 5) == 0)
5907 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5908 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5909 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5910 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5911 default: found
= 16;
5915 inst
.operands
[i
].isvec
= 1;
5922 po_misc_or_fail (parse_tb (&str
));
5925 /* Register lists */
5927 val
= parse_reg_list (&str
);
5930 inst
.operands
[1].writeback
= 1;
5936 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5940 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5944 /* Allow Q registers too. */
5945 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5950 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5952 inst
.operands
[i
].issingle
= 1;
5957 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5962 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5963 &inst
.operands
[i
].vectype
);
5966 /* Addressing modes */
5968 po_misc_or_fail (parse_address (&str
, i
));
5972 po_misc_or_fail_no_backtrack (
5973 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
5977 po_misc_or_fail_no_backtrack (
5978 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
5982 po_misc_or_fail_no_backtrack (
5983 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
5987 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5991 po_misc_or_fail_no_backtrack (
5992 parse_shifter_operand_group_reloc (&str
, i
));
5996 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6000 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6004 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6008 as_fatal (_("unhandled operand code %d"), upat
[i
]);
6011 /* Various value-based sanity checks and shared operations. We
6012 do not signal immediate failures for the register constraints;
6013 this allows a syntax error to take precedence. */
6022 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6023 inst
.error
= BAD_PC
;
6041 inst
.operands
[i
].imm
= val
;
6048 /* If we get here, this operand was successfully parsed. */
6049 inst
.operands
[i
].present
= 1;
6053 inst
.error
= BAD_ARGS
;
6058 /* The parse routine should already have set inst.error, but set a
6059 defaut here just in case. */
6061 inst
.error
= _("syntax error");
6065 /* Do not backtrack over a trailing optional argument that
6066 absorbed some text. We will only fail again, with the
6067 'garbage following instruction' error message, which is
6068 probably less helpful than the current one. */
6069 if (backtrack_index
== i
&& backtrack_pos
!= str
6070 && upat
[i
+1] == OP_stop
)
6073 inst
.error
= _("syntax error");
6077 /* Try again, skipping the optional argument at backtrack_pos. */
6078 str
= backtrack_pos
;
6079 inst
.error
= backtrack_error
;
6080 inst
.operands
[backtrack_index
].present
= 0;
6081 i
= backtrack_index
;
6085 /* Check that we have parsed all the arguments. */
6086 if (*str
!= '\0' && !inst
.error
)
6087 inst
.error
= _("garbage following instruction");
6089 return inst
.error
? FAIL
: SUCCESS
;
6092 #undef po_char_or_fail
6093 #undef po_reg_or_fail
6094 #undef po_reg_or_goto
6095 #undef po_imm_or_fail
6096 #undef po_scalar_or_fail
6098 /* Shorthand macro for instruction encoding functions issuing errors. */
6099 #define constraint(expr, err) do { \
6107 /* Functions for operand encoding. ARM, then Thumb. */
6109 #define rotate_left(v, n) (v << n | v >> (32 - n))
6111 /* If VAL can be encoded in the immediate field of an ARM instruction,
6112 return the encoded form. Otherwise, return FAIL. */
6115 encode_arm_immediate (unsigned int val
)
6119 for (i
= 0; i
< 32; i
+= 2)
6120 if ((a
= rotate_left (val
, i
)) <= 0xff)
6121 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6126 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6127 return the encoded form. Otherwise, return FAIL. */
6129 encode_thumb32_immediate (unsigned int val
)
6136 for (i
= 1; i
<= 24; i
++)
6139 if ((val
& ~(0xff << i
)) == 0)
6140 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6144 if (val
== ((a
<< 16) | a
))
6146 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6150 if (val
== ((a
<< 16) | a
))
6151 return 0x200 | (a
>> 8);
6155 /* Encode a VFP SP or DP register number into inst.instruction. */
6158 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6160 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6163 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
6166 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6169 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6174 first_error (_("D register out of range for selected VFP version"));
6182 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6186 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6190 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6194 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6198 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6202 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6210 /* Encode a <shift> in an ARM-format instruction. The immediate,
6211 if any, is handled by md_apply_fix. */
6213 encode_arm_shift (int i
)
6215 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6216 inst
.instruction
|= SHIFT_ROR
<< 5;
6219 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6220 if (inst
.operands
[i
].immisreg
)
6222 inst
.instruction
|= SHIFT_BY_REG
;
6223 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6226 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6231 encode_arm_shifter_operand (int i
)
6233 if (inst
.operands
[i
].isreg
)
6235 inst
.instruction
|= inst
.operands
[i
].reg
;
6236 encode_arm_shift (i
);
6239 inst
.instruction
|= INST_IMMEDIATE
;
6242 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6244 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6246 assert (inst
.operands
[i
].isreg
);
6247 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6249 if (inst
.operands
[i
].preind
)
6253 inst
.error
= _("instruction does not accept preindexed addressing");
6256 inst
.instruction
|= PRE_INDEX
;
6257 if (inst
.operands
[i
].writeback
)
6258 inst
.instruction
|= WRITE_BACK
;
6261 else if (inst
.operands
[i
].postind
)
6263 assert (inst
.operands
[i
].writeback
);
6265 inst
.instruction
|= WRITE_BACK
;
6267 else /* unindexed - only for coprocessor */
6269 inst
.error
= _("instruction does not accept unindexed addressing");
6273 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6274 && (((inst
.instruction
& 0x000f0000) >> 16)
6275 == ((inst
.instruction
& 0x0000f000) >> 12)))
6276 as_warn ((inst
.instruction
& LOAD_BIT
)
6277 ? _("destination register same as write-back base")
6278 : _("source register same as write-back base"));
6281 /* inst.operands[i] was set up by parse_address. Encode it into an
6282 ARM-format mode 2 load or store instruction. If is_t is true,
6283 reject forms that cannot be used with a T instruction (i.e. not
6286 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6288 encode_arm_addr_mode_common (i
, is_t
);
6290 if (inst
.operands
[i
].immisreg
)
6292 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6293 inst
.instruction
|= inst
.operands
[i
].imm
;
6294 if (!inst
.operands
[i
].negative
)
6295 inst
.instruction
|= INDEX_UP
;
6296 if (inst
.operands
[i
].shifted
)
6298 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6299 inst
.instruction
|= SHIFT_ROR
<< 5;
6302 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6303 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6307 else /* immediate offset in inst.reloc */
6309 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6310 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6314 /* inst.operands[i] was set up by parse_address. Encode it into an
6315 ARM-format mode 3 load or store instruction. Reject forms that
6316 cannot be used with such instructions. If is_t is true, reject
6317 forms that cannot be used with a T instruction (i.e. not
6320 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6322 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6324 inst
.error
= _("instruction does not accept scaled register index");
6328 encode_arm_addr_mode_common (i
, is_t
);
6330 if (inst
.operands
[i
].immisreg
)
6332 inst
.instruction
|= inst
.operands
[i
].imm
;
6333 if (!inst
.operands
[i
].negative
)
6334 inst
.instruction
|= INDEX_UP
;
6336 else /* immediate offset in inst.reloc */
6338 inst
.instruction
|= HWOFFSET_IMM
;
6339 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6340 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6344 /* inst.operands[i] was set up by parse_address. Encode it into an
6345 ARM-format instruction. Reject all forms which cannot be encoded
6346 into a coprocessor load/store instruction. If wb_ok is false,
6347 reject use of writeback; if unind_ok is false, reject use of
6348 unindexed addressing. If reloc_override is not 0, use it instead
6349 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6350 (in which case it is preserved). */
6353 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6355 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6357 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6359 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6361 assert (!inst
.operands
[i
].writeback
);
6364 inst
.error
= _("instruction does not support unindexed addressing");
6367 inst
.instruction
|= inst
.operands
[i
].imm
;
6368 inst
.instruction
|= INDEX_UP
;
6372 if (inst
.operands
[i
].preind
)
6373 inst
.instruction
|= PRE_INDEX
;
6375 if (inst
.operands
[i
].writeback
)
6377 if (inst
.operands
[i
].reg
== REG_PC
)
6379 inst
.error
= _("pc may not be used with write-back");
6384 inst
.error
= _("instruction does not support writeback");
6387 inst
.instruction
|= WRITE_BACK
;
6391 inst
.reloc
.type
= reloc_override
;
6392 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6393 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6394 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6397 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6399 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6405 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6406 Determine whether it can be performed with a move instruction; if
6407 it can, convert inst.instruction to that move instruction and
6408 return 1; if it can't, convert inst.instruction to a literal-pool
6409 load and return 0. If this is not a valid thing to do in the
6410 current context, set inst.error and return 1.
6412 inst.operands[i] describes the destination register. */
6415 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6420 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6424 if ((inst
.instruction
& tbit
) == 0)
6426 inst
.error
= _("invalid pseudo operation");
6429 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6431 inst
.error
= _("constant expression expected");
6434 if (inst
.reloc
.exp
.X_op
== O_constant
)
6438 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6440 /* This can be done with a mov(1) instruction. */
6441 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6442 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6448 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6451 /* This can be done with a mov instruction. */
6452 inst
.instruction
&= LITERAL_MASK
;
6453 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6454 inst
.instruction
|= value
& 0xfff;
6458 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6461 /* This can be done with a mvn instruction. */
6462 inst
.instruction
&= LITERAL_MASK
;
6463 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6464 inst
.instruction
|= value
& 0xfff;
6470 if (add_to_lit_pool () == FAIL
)
6472 inst
.error
= _("literal pool insertion failed");
6475 inst
.operands
[1].reg
= REG_PC
;
6476 inst
.operands
[1].isreg
= 1;
6477 inst
.operands
[1].preind
= 1;
6478 inst
.reloc
.pc_rel
= 1;
6479 inst
.reloc
.type
= (thumb_p
6480 ? BFD_RELOC_ARM_THUMB_OFFSET
6482 ? BFD_RELOC_ARM_HWLITERAL
6483 : BFD_RELOC_ARM_LITERAL
));
6487 /* Functions for instruction encoding, sorted by subarchitecture.
6488 First some generics; their names are taken from the conventional
6489 bit positions for register arguments in ARM format instructions. */
6499 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6505 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6506 inst
.instruction
|= inst
.operands
[1].reg
;
6512 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6513 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6519 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6520 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6526 unsigned Rn
= inst
.operands
[2].reg
;
6527 /* Enforce restrictions on SWP instruction. */
6528 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6529 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6530 _("Rn must not overlap other operands"));
6531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6532 inst
.instruction
|= inst
.operands
[1].reg
;
6533 inst
.instruction
|= Rn
<< 16;
6539 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6540 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6541 inst
.instruction
|= inst
.operands
[2].reg
;
6547 inst
.instruction
|= inst
.operands
[0].reg
;
6548 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6549 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6555 inst
.instruction
|= inst
.operands
[0].imm
;
6561 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6562 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6565 /* ARM instructions, in alphabetical order by function name (except
6566 that wrapper functions appear immediately after the function they
6569 /* This is a pseudo-op of the form "adr rd, label" to be converted
6570 into a relative address of the form "add rd, pc, #label-.-8". */
6575 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6577 /* Frag hacking will turn this into a sub instruction if the offset turns
6578 out to be negative. */
6579 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6580 inst
.reloc
.pc_rel
= 1;
6581 inst
.reloc
.exp
.X_add_number
-= 8;
6584 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6585 into a relative address of the form:
6586 add rd, pc, #low(label-.-8)"
6587 add rd, rd, #high(label-.-8)" */
6592 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6594 /* Frag hacking will turn this into a sub instruction if the offset turns
6595 out to be negative. */
6596 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6597 inst
.reloc
.pc_rel
= 1;
6598 inst
.size
= INSN_SIZE
* 2;
6599 inst
.reloc
.exp
.X_add_number
-= 8;
6605 if (!inst
.operands
[1].present
)
6606 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6607 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6608 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6609 encode_arm_shifter_operand (2);
6615 if (inst
.operands
[0].present
)
6617 constraint ((inst
.instruction
& 0xf0) != 0x40
6618 && inst
.operands
[0].imm
!= 0xf,
6619 _("bad barrier type"));
6620 inst
.instruction
|= inst
.operands
[0].imm
;
6623 inst
.instruction
|= 0xf;
6629 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6630 constraint (msb
> 32, _("bit-field extends past end of register"));
6631 /* The instruction encoding stores the LSB and MSB,
6632 not the LSB and width. */
6633 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6634 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6635 inst
.instruction
|= (msb
- 1) << 16;
6643 /* #0 in second position is alternative syntax for bfc, which is
6644 the same instruction but with REG_PC in the Rm field. */
6645 if (!inst
.operands
[1].isreg
)
6646 inst
.operands
[1].reg
= REG_PC
;
6648 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6649 constraint (msb
> 32, _("bit-field extends past end of register"));
6650 /* The instruction encoding stores the LSB and MSB,
6651 not the LSB and width. */
6652 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6653 inst
.instruction
|= inst
.operands
[1].reg
;
6654 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6655 inst
.instruction
|= (msb
- 1) << 16;
6661 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6662 _("bit-field extends past end of register"));
6663 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6664 inst
.instruction
|= inst
.operands
[1].reg
;
6665 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6666 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6669 /* ARM V5 breakpoint instruction (argument parse)
6670 BKPT <16 bit unsigned immediate>
6671 Instruction is not conditional.
6672 The bit pattern given in insns[] has the COND_ALWAYS condition,
6673 and it is an error if the caller tried to override that. */
6678 /* Top 12 of 16 bits to bits 19:8. */
6679 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6681 /* Bottom 4 of 16 bits to bits 3:0. */
6682 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6686 encode_branch (int default_reloc
)
6688 if (inst
.operands
[0].hasreloc
)
6690 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6691 _("the only suffix valid here is '(plt)'"));
6692 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6696 inst
.reloc
.type
= default_reloc
;
6698 inst
.reloc
.pc_rel
= 1;
6705 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6706 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6709 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6716 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6718 if (inst
.cond
== COND_ALWAYS
)
6719 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6721 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6725 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6728 /* ARM V5 branch-link-exchange instruction (argument parse)
6729 BLX <target_addr> ie BLX(1)
6730 BLX{<condition>} <Rm> ie BLX(2)
6731 Unfortunately, there are two different opcodes for this mnemonic.
6732 So, the insns[].value is not used, and the code here zaps values
6733 into inst.instruction.
6734 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6739 if (inst
.operands
[0].isreg
)
6741 /* Arg is a register; the opcode provided by insns[] is correct.
6742 It is not illegal to do "blx pc", just useless. */
6743 if (inst
.operands
[0].reg
== REG_PC
)
6744 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6746 inst
.instruction
|= inst
.operands
[0].reg
;
6750 /* Arg is an address; this instruction cannot be executed
6751 conditionally, and the opcode must be adjusted. */
6752 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6753 inst
.instruction
= 0xfa000000;
6755 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6756 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6759 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6766 if (inst
.operands
[0].reg
== REG_PC
)
6767 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6769 inst
.instruction
|= inst
.operands
[0].reg
;
6773 /* ARM v5TEJ. Jump to Jazelle code. */
6778 if (inst
.operands
[0].reg
== REG_PC
)
6779 as_tsktsk (_("use of r15 in bxj is not really useful"));
6781 inst
.instruction
|= inst
.operands
[0].reg
;
6784 /* Co-processor data operation:
6785 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6786 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6790 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6791 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6792 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6793 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6794 inst
.instruction
|= inst
.operands
[4].reg
;
6795 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6801 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6802 encode_arm_shifter_operand (1);
6805 /* Transfer between coprocessor and ARM registers.
6806 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6811 No special properties. */
6816 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6817 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6818 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6819 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6820 inst
.instruction
|= inst
.operands
[4].reg
;
6821 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6824 /* Transfer between coprocessor register and pair of ARM registers.
6825 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6830 Two XScale instructions are special cases of these:
6832 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6833 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6835 Result unpredicatable if Rd or Rn is R15. */
6840 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6841 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6842 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6843 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6844 inst
.instruction
|= inst
.operands
[4].reg
;
6850 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6851 if (inst
.operands
[1].present
)
6853 inst
.instruction
|= CPSI_MMOD
;
6854 inst
.instruction
|= inst
.operands
[1].imm
;
6861 inst
.instruction
|= inst
.operands
[0].imm
;
6867 /* There is no IT instruction in ARM mode. We
6868 process it but do not generate code for it. */
6875 int base_reg
= inst
.operands
[0].reg
;
6876 int range
= inst
.operands
[1].imm
;
6878 inst
.instruction
|= base_reg
<< 16;
6879 inst
.instruction
|= range
;
6881 if (inst
.operands
[1].writeback
)
6882 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6884 if (inst
.operands
[0].writeback
)
6886 inst
.instruction
|= WRITE_BACK
;
6887 /* Check for unpredictable uses of writeback. */
6888 if (inst
.instruction
& LOAD_BIT
)
6890 /* Not allowed in LDM type 2. */
6891 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6892 && ((range
& (1 << REG_PC
)) == 0))
6893 as_warn (_("writeback of base register is UNPREDICTABLE"));
6894 /* Only allowed if base reg not in list for other types. */
6895 else if (range
& (1 << base_reg
))
6896 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6900 /* Not allowed for type 2. */
6901 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6902 as_warn (_("writeback of base register is UNPREDICTABLE"));
6903 /* Only allowed if base reg not in list, or first in list. */
6904 else if ((range
& (1 << base_reg
))
6905 && (range
& ((1 << base_reg
) - 1)))
6906 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6911 /* ARMv5TE load-consecutive (argument parse)
6920 constraint (inst
.operands
[0].reg
% 2 != 0,
6921 _("first destination register must be even"));
6922 constraint (inst
.operands
[1].present
6923 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6924 _("can only load two consecutive registers"));
6925 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6926 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6928 if (!inst
.operands
[1].present
)
6929 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6931 if (inst
.instruction
& LOAD_BIT
)
6933 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6934 register and the first register written; we have to diagnose
6935 overlap between the base and the second register written here. */
6937 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6938 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6939 as_warn (_("base register written back, and overlaps "
6940 "second destination register"));
6942 /* For an index-register load, the index register must not overlap the
6943 destination (even if not write-back). */
6944 else if (inst
.operands
[2].immisreg
6945 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6946 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6947 as_warn (_("index register overlaps destination register"));
6950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6951 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6957 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6958 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6959 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6960 || inst
.operands
[1].negative
6961 /* This can arise if the programmer has written
6963 or if they have mistakenly used a register name as the last
6966 It is very difficult to distinguish between these two cases
6967 because "rX" might actually be a label. ie the register
6968 name has been occluded by a symbol of the same name. So we
6969 just generate a general 'bad addressing mode' type error
6970 message and leave it up to the programmer to discover the
6971 true cause and fix their mistake. */
6972 || (inst
.operands
[1].reg
== REG_PC
),
6975 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6976 || inst
.reloc
.exp
.X_add_number
!= 0,
6977 _("offset must be zero in ARM encoding"));
6979 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6980 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6981 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6987 constraint (inst
.operands
[0].reg
% 2 != 0,
6988 _("even register required"));
6989 constraint (inst
.operands
[1].present
6990 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6991 _("can only load two consecutive registers"));
6992 /* If op 1 were present and equal to PC, this function wouldn't
6993 have been called in the first place. */
6994 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6997 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7003 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7004 if (!inst
.operands
[1].isreg
)
7005 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7007 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7013 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7015 if (inst
.operands
[1].preind
)
7017 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7018 || inst
.reloc
.exp
.X_add_number
!= 0,
7019 _("this instruction requires a post-indexed address"));
7021 inst
.operands
[1].preind
= 0;
7022 inst
.operands
[1].postind
= 1;
7023 inst
.operands
[1].writeback
= 1;
7025 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7026 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7029 /* Halfword and signed-byte load/store operations. */
7034 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7035 if (!inst
.operands
[1].isreg
)
7036 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7038 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7044 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7046 if (inst
.operands
[1].preind
)
7048 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7049 || inst
.reloc
.exp
.X_add_number
!= 0,
7050 _("this instruction requires a post-indexed address"));
7052 inst
.operands
[1].preind
= 0;
7053 inst
.operands
[1].postind
= 1;
7054 inst
.operands
[1].writeback
= 1;
7056 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7057 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7060 /* Co-processor register load/store.
7061 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7065 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7066 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7067 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7073 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7074 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7075 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7076 && !(inst
.instruction
& 0x00400000))
7077 as_tsktsk (_("Rd and Rm should be different in mla"));
7079 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7080 inst
.instruction
|= inst
.operands
[1].reg
;
7081 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7082 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7088 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7089 encode_arm_shifter_operand (1);
7092 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7099 top
= (inst
.instruction
& 0x00400000) != 0;
7100 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7101 _(":lower16: not allowed this instruction"));
7102 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7103 _(":upper16: not allowed instruction"));
7104 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7105 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7107 imm
= inst
.reloc
.exp
.X_add_number
;
7108 /* The value is in two pieces: 0:11, 16:19. */
7109 inst
.instruction
|= (imm
& 0x00000fff);
7110 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7114 static void do_vfp_nsyn_opcode (const char *);
7117 do_vfp_nsyn_mrs (void)
7119 if (inst
.operands
[0].isvec
)
7121 if (inst
.operands
[1].reg
!= 1)
7122 first_error (_("operand 1 must be FPSCR"));
7123 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7124 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7125 do_vfp_nsyn_opcode ("fmstat");
7127 else if (inst
.operands
[1].isvec
)
7128 do_vfp_nsyn_opcode ("fmrx");
7136 do_vfp_nsyn_msr (void)
7138 if (inst
.operands
[0].isvec
)
7139 do_vfp_nsyn_opcode ("fmxr");
7149 if (do_vfp_nsyn_mrs () == SUCCESS
)
7152 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7153 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7155 _("'CPSR' or 'SPSR' expected"));
7156 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7157 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7160 /* Two possible forms:
7161 "{C|S}PSR_<field>, Rm",
7162 "{C|S}PSR_f, #expression". */
7167 if (do_vfp_nsyn_msr () == SUCCESS
)
7170 inst
.instruction
|= inst
.operands
[0].imm
;
7171 if (inst
.operands
[1].isreg
)
7172 inst
.instruction
|= inst
.operands
[1].reg
;
7175 inst
.instruction
|= INST_IMMEDIATE
;
7176 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7177 inst
.reloc
.pc_rel
= 0;
7184 if (!inst
.operands
[2].present
)
7185 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7186 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7187 inst
.instruction
|= inst
.operands
[1].reg
;
7188 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7190 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7191 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7192 as_tsktsk (_("Rd and Rm should be different in mul"));
7195 /* Long Multiply Parser
7196 UMULL RdLo, RdHi, Rm, Rs
7197 SMULL RdLo, RdHi, Rm, Rs
7198 UMLAL RdLo, RdHi, Rm, Rs
7199 SMLAL RdLo, RdHi, Rm, Rs. */
7204 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7205 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7206 inst
.instruction
|= inst
.operands
[2].reg
;
7207 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7209 /* rdhi, rdlo and rm must all be different. */
7210 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7211 || inst
.operands
[0].reg
== inst
.operands
[2].reg
7212 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7213 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7219 if (inst
.operands
[0].present
)
7221 /* Architectural NOP hints are CPSR sets with no bits selected. */
7222 inst
.instruction
&= 0xf0000000;
7223 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
7227 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7228 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7229 Condition defaults to COND_ALWAYS.
7230 Error if Rd, Rn or Rm are R15. */
7235 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7236 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7237 inst
.instruction
|= inst
.operands
[2].reg
;
7238 if (inst
.operands
[3].present
)
7239 encode_arm_shift (3);
7242 /* ARM V6 PKHTB (Argument Parse). */
7247 if (!inst
.operands
[3].present
)
7249 /* If the shift specifier is omitted, turn the instruction
7250 into pkhbt rd, rm, rn. */
7251 inst
.instruction
&= 0xfff00010;
7252 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7253 inst
.instruction
|= inst
.operands
[1].reg
;
7254 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7258 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7259 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7260 inst
.instruction
|= inst
.operands
[2].reg
;
7261 encode_arm_shift (3);
7265 /* ARMv5TE: Preload-Cache
7269 Syntactically, like LDR with B=1, W=0, L=1. */
7274 constraint (!inst
.operands
[0].isreg
,
7275 _("'[' expected after PLD mnemonic"));
7276 constraint (inst
.operands
[0].postind
,
7277 _("post-indexed expression used in preload instruction"));
7278 constraint (inst
.operands
[0].writeback
,
7279 _("writeback used in preload instruction"));
7280 constraint (!inst
.operands
[0].preind
,
7281 _("unindexed addressing used in preload instruction"));
7282 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7285 /* ARMv7: PLI <addr_mode> */
7289 constraint (!inst
.operands
[0].isreg
,
7290 _("'[' expected after PLI mnemonic"));
7291 constraint (inst
.operands
[0].postind
,
7292 _("post-indexed expression used in preload instruction"));
7293 constraint (inst
.operands
[0].writeback
,
7294 _("writeback used in preload instruction"));
7295 constraint (!inst
.operands
[0].preind
,
7296 _("unindexed addressing used in preload instruction"));
7297 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7298 inst
.instruction
&= ~PRE_INDEX
;
7304 inst
.operands
[1] = inst
.operands
[0];
7305 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7306 inst
.operands
[0].isreg
= 1;
7307 inst
.operands
[0].writeback
= 1;
7308 inst
.operands
[0].reg
= REG_SP
;
7312 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7313 word at the specified address and the following word
7315 Unconditionally executed.
7316 Error if Rn is R15. */
7321 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7322 if (inst
.operands
[0].writeback
)
7323 inst
.instruction
|= WRITE_BACK
;
7326 /* ARM V6 ssat (argument parse). */
7331 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7332 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7333 inst
.instruction
|= inst
.operands
[2].reg
;
7335 if (inst
.operands
[3].present
)
7336 encode_arm_shift (3);
7339 /* ARM V6 usat (argument parse). */
7344 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7345 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7346 inst
.instruction
|= inst
.operands
[2].reg
;
7348 if (inst
.operands
[3].present
)
7349 encode_arm_shift (3);
7352 /* ARM V6 ssat16 (argument parse). */
7357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7358 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7359 inst
.instruction
|= inst
.operands
[2].reg
;
7365 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7366 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7367 inst
.instruction
|= inst
.operands
[2].reg
;
7370 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7371 preserving the other bits.
7373 setend <endian_specifier>, where <endian_specifier> is either
7379 if (inst
.operands
[0].imm
)
7380 inst
.instruction
|= 0x200;
7386 unsigned int Rm
= (inst
.operands
[1].present
7387 ? inst
.operands
[1].reg
7388 : inst
.operands
[0].reg
);
7390 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7391 inst
.instruction
|= Rm
;
7392 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7394 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7395 inst
.instruction
|= SHIFT_BY_REG
;
7398 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7404 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7405 inst
.reloc
.pc_rel
= 0;
7411 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7412 inst
.reloc
.pc_rel
= 0;
7415 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7416 SMLAxy{cond} Rd,Rm,Rs,Rn
7417 SMLAWy{cond} Rd,Rm,Rs,Rn
7418 Error if any register is R15. */
7423 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7424 inst
.instruction
|= inst
.operands
[1].reg
;
7425 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7426 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7429 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7430 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7431 Error if any register is R15.
7432 Warning if Rdlo == Rdhi. */
7437 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7438 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7439 inst
.instruction
|= inst
.operands
[2].reg
;
7440 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7442 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7443 as_tsktsk (_("rdhi and rdlo must be different"));
7446 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7447 SMULxy{cond} Rd,Rm,Rs
7448 Error if any register is R15. */
7453 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7454 inst
.instruction
|= inst
.operands
[1].reg
;
7455 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7458 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7459 the same for both ARM and Thumb-2. */
7466 if (inst
.operands
[0].present
)
7468 reg
= inst
.operands
[0].reg
;
7469 constraint (reg
!= 13, _("SRS base register must be r13"));
7474 inst
.instruction
|= reg
<< 16;
7475 inst
.instruction
|= inst
.operands
[1].imm
;
7476 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
7477 inst
.instruction
|= WRITE_BACK
;
7480 /* ARM V6 strex (argument parse). */
7485 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7486 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7487 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7488 || inst
.operands
[2].negative
7489 /* See comment in do_ldrex(). */
7490 || (inst
.operands
[2].reg
== REG_PC
),
7493 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7494 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7496 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7497 || inst
.reloc
.exp
.X_add_number
!= 0,
7498 _("offset must be zero in ARM encoding"));
7500 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7501 inst
.instruction
|= inst
.operands
[1].reg
;
7502 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7503 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7509 constraint (inst
.operands
[1].reg
% 2 != 0,
7510 _("even register required"));
7511 constraint (inst
.operands
[2].present
7512 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7513 _("can only store two consecutive registers"));
7514 /* If op 2 were present and equal to PC, this function wouldn't
7515 have been called in the first place. */
7516 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7518 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7519 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7520 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7523 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7524 inst
.instruction
|= inst
.operands
[1].reg
;
7525 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7528 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7529 extends it to 32-bits, and adds the result to a value in another
7530 register. You can specify a rotation by 0, 8, 16, or 24 bits
7531 before extracting the 16-bit value.
7532 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7533 Condition defaults to COND_ALWAYS.
7534 Error if any register uses R15. */
7539 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7540 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7541 inst
.instruction
|= inst
.operands
[2].reg
;
7542 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7547 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7548 Condition defaults to COND_ALWAYS.
7549 Error if any register uses R15. */
7554 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7555 inst
.instruction
|= inst
.operands
[1].reg
;
7556 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7559 /* VFP instructions. In a logical order: SP variant first, monad
7560 before dyad, arithmetic then move then load/store. */
7563 do_vfp_sp_monadic (void)
7565 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7566 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7570 do_vfp_sp_dyadic (void)
7572 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7573 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7574 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7578 do_vfp_sp_compare_z (void)
7580 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7584 do_vfp_dp_sp_cvt (void)
7586 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7587 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7591 do_vfp_sp_dp_cvt (void)
7593 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7594 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7598 do_vfp_reg_from_sp (void)
7600 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7601 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7605 do_vfp_reg2_from_sp2 (void)
7607 constraint (inst
.operands
[2].imm
!= 2,
7608 _("only two consecutive VFP SP registers allowed here"));
7609 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7610 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7611 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7615 do_vfp_sp_from_reg (void)
7617 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7618 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7622 do_vfp_sp2_from_reg2 (void)
7624 constraint (inst
.operands
[0].imm
!= 2,
7625 _("only two consecutive VFP SP registers allowed here"));
7626 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7627 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7628 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7632 do_vfp_sp_ldst (void)
7634 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7635 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7639 do_vfp_dp_ldst (void)
7641 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7642 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7647 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7649 if (inst
.operands
[0].writeback
)
7650 inst
.instruction
|= WRITE_BACK
;
7652 constraint (ldstm_type
!= VFP_LDSTMIA
,
7653 _("this addressing mode requires base-register writeback"));
7654 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7655 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7656 inst
.instruction
|= inst
.operands
[1].imm
;
7660 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7664 if (inst
.operands
[0].writeback
)
7665 inst
.instruction
|= WRITE_BACK
;
7667 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7668 _("this addressing mode requires base-register writeback"));
7670 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7671 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7673 count
= inst
.operands
[1].imm
<< 1;
7674 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7677 inst
.instruction
|= count
;
7681 do_vfp_sp_ldstmia (void)
7683 vfp_sp_ldstm (VFP_LDSTMIA
);
7687 do_vfp_sp_ldstmdb (void)
7689 vfp_sp_ldstm (VFP_LDSTMDB
);
7693 do_vfp_dp_ldstmia (void)
7695 vfp_dp_ldstm (VFP_LDSTMIA
);
7699 do_vfp_dp_ldstmdb (void)
7701 vfp_dp_ldstm (VFP_LDSTMDB
);
7705 do_vfp_xp_ldstmia (void)
7707 vfp_dp_ldstm (VFP_LDSTMIAX
);
7711 do_vfp_xp_ldstmdb (void)
7713 vfp_dp_ldstm (VFP_LDSTMDBX
);
7717 do_vfp_dp_rd_rm (void)
7719 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7720 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7724 do_vfp_dp_rn_rd (void)
7726 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7727 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7731 do_vfp_dp_rd_rn (void)
7733 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7734 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7738 do_vfp_dp_rd_rn_rm (void)
7740 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7741 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7742 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7748 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7752 do_vfp_dp_rm_rd_rn (void)
7754 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7755 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7756 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7759 /* VFPv3 instructions. */
7761 do_vfp_sp_const (void)
7763 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7764 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7765 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7769 do_vfp_dp_const (void)
7771 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7772 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7773 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7777 vfp_conv (int srcsize
)
7779 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7780 inst
.instruction
|= (immbits
& 1) << 5;
7781 inst
.instruction
|= (immbits
>> 1);
7785 do_vfp_sp_conv_16 (void)
7787 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7792 do_vfp_dp_conv_16 (void)
7794 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7799 do_vfp_sp_conv_32 (void)
7801 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7806 do_vfp_dp_conv_32 (void)
7808 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7813 /* FPA instructions. Also in a logical order. */
7818 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7819 inst
.instruction
|= inst
.operands
[1].reg
;
7823 do_fpa_ldmstm (void)
7825 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7826 switch (inst
.operands
[1].imm
)
7828 case 1: inst
.instruction
|= CP_T_X
; break;
7829 case 2: inst
.instruction
|= CP_T_Y
; break;
7830 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7835 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7837 /* The instruction specified "ea" or "fd", so we can only accept
7838 [Rn]{!}. The instruction does not really support stacking or
7839 unstacking, so we have to emulate these by setting appropriate
7840 bits and offsets. */
7841 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7842 || inst
.reloc
.exp
.X_add_number
!= 0,
7843 _("this instruction does not support indexing"));
7845 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7846 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7848 if (!(inst
.instruction
& INDEX_UP
))
7849 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7851 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7853 inst
.operands
[2].preind
= 0;
7854 inst
.operands
[2].postind
= 1;
7858 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7862 /* iWMMXt instructions: strictly in alphabetical order. */
7865 do_iwmmxt_tandorc (void)
7867 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7871 do_iwmmxt_textrc (void)
7873 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7874 inst
.instruction
|= inst
.operands
[1].imm
;
7878 do_iwmmxt_textrm (void)
7880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7881 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7882 inst
.instruction
|= inst
.operands
[2].imm
;
7886 do_iwmmxt_tinsr (void)
7888 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7889 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7890 inst
.instruction
|= inst
.operands
[2].imm
;
7894 do_iwmmxt_tmia (void)
7896 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7897 inst
.instruction
|= inst
.operands
[1].reg
;
7898 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7902 do_iwmmxt_waligni (void)
7904 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7905 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7906 inst
.instruction
|= inst
.operands
[2].reg
;
7907 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7911 do_iwmmxt_wmerge (void)
7913 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7914 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7915 inst
.instruction
|= inst
.operands
[2].reg
;
7916 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
7920 do_iwmmxt_wmov (void)
7922 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7923 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7924 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7925 inst
.instruction
|= inst
.operands
[1].reg
;
7929 do_iwmmxt_wldstbh (void)
7932 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7934 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7936 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7937 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7941 do_iwmmxt_wldstw (void)
7943 /* RIWR_RIWC clears .isreg for a control register. */
7944 if (!inst
.operands
[0].isreg
)
7946 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7947 inst
.instruction
|= 0xf0000000;
7950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7951 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7955 do_iwmmxt_wldstd (void)
7957 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7958 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
7959 && inst
.operands
[1].immisreg
)
7961 inst
.instruction
&= ~0x1a000ff;
7962 inst
.instruction
|= (0xf << 28);
7963 if (inst
.operands
[1].preind
)
7964 inst
.instruction
|= PRE_INDEX
;
7965 if (!inst
.operands
[1].negative
)
7966 inst
.instruction
|= INDEX_UP
;
7967 if (inst
.operands
[1].writeback
)
7968 inst
.instruction
|= WRITE_BACK
;
7969 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7970 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7971 inst
.instruction
|= inst
.operands
[1].imm
;
7974 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7978 do_iwmmxt_wshufh (void)
7980 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7981 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7982 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7983 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7987 do_iwmmxt_wzero (void)
7989 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7990 inst
.instruction
|= inst
.operands
[0].reg
;
7991 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7992 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7996 do_iwmmxt_wrwrwr_or_imm5 (void)
7998 if (inst
.operands
[2].isreg
)
8001 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8002 _("immediate operand requires iWMMXt2"));
8004 if (inst
.operands
[2].imm
== 0)
8006 switch ((inst
.instruction
>> 20) & 0xf)
8012 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8013 inst
.operands
[2].imm
= 16;
8014 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8020 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8021 inst
.operands
[2].imm
= 32;
8022 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8029 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8031 wrn
= (inst
.instruction
>> 16) & 0xf;
8032 inst
.instruction
&= 0xff0fff0f;
8033 inst
.instruction
|= wrn
;
8034 /* Bail out here; the instruction is now assembled. */
8039 /* Map 32 -> 0, etc. */
8040 inst
.operands
[2].imm
&= 0x1f;
8041 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8045 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8046 operations first, then control, shift, and load/store. */
8048 /* Insns like "foo X,Y,Z". */
8051 do_mav_triple (void)
8053 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8054 inst
.instruction
|= inst
.operands
[1].reg
;
8055 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8058 /* Insns like "foo W,X,Y,Z".
8059 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8064 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8065 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8066 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8067 inst
.instruction
|= inst
.operands
[3].reg
;
8070 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8074 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8077 /* Maverick shift immediate instructions.
8078 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8079 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8084 int imm
= inst
.operands
[2].imm
;
8086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8087 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8089 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8090 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8091 Bit 4 should be 0. */
8092 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8094 inst
.instruction
|= imm
;
8097 /* XScale instructions. Also sorted arithmetic before move. */
8099 /* Xscale multiply-accumulate (argument parse)
8102 MIAxycc acc0,Rm,Rs. */
8107 inst
.instruction
|= inst
.operands
[1].reg
;
8108 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8111 /* Xscale move-accumulator-register (argument parse)
8113 MARcc acc0,RdLo,RdHi. */
8118 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8119 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8122 /* Xscale move-register-accumulator (argument parse)
8124 MRAcc RdLo,RdHi,acc0. */
8129 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8130 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8131 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8134 /* Encoding functions relevant only to Thumb. */
8136 /* inst.operands[i] is a shifted-register operand; encode
8137 it into inst.instruction in the format used by Thumb32. */
8140 encode_thumb32_shifted_operand (int i
)
8142 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8143 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8145 constraint (inst
.operands
[i
].immisreg
,
8146 _("shift by register not allowed in thumb mode"));
8147 inst
.instruction
|= inst
.operands
[i
].reg
;
8148 if (shift
== SHIFT_RRX
)
8149 inst
.instruction
|= SHIFT_ROR
<< 4;
8152 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8153 _("expression too complex"));
8155 constraint (value
> 32
8156 || (value
== 32 && (shift
== SHIFT_LSL
8157 || shift
== SHIFT_ROR
)),
8158 _("shift expression is too large"));
8162 else if (value
== 32)
8165 inst
.instruction
|= shift
<< 4;
8166 inst
.instruction
|= (value
& 0x1c) << 10;
8167 inst
.instruction
|= (value
& 0x03) << 6;
8172 /* inst.operands[i] was set up by parse_address. Encode it into a
8173 Thumb32 format load or store instruction. Reject forms that cannot
8174 be used with such instructions. If is_t is true, reject forms that
8175 cannot be used with a T instruction; if is_d is true, reject forms
8176 that cannot be used with a D instruction. */
8179 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8181 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8183 constraint (!inst
.operands
[i
].isreg
,
8184 _("Instruction does not support =N addresses"));
8186 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8187 if (inst
.operands
[i
].immisreg
)
8189 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8190 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8191 constraint (inst
.operands
[i
].negative
,
8192 _("Thumb does not support negative register indexing"));
8193 constraint (inst
.operands
[i
].postind
,
8194 _("Thumb does not support register post-indexing"));
8195 constraint (inst
.operands
[i
].writeback
,
8196 _("Thumb does not support register indexing with writeback"));
8197 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8198 _("Thumb supports only LSL in shifted register indexing"));
8200 inst
.instruction
|= inst
.operands
[i
].imm
;
8201 if (inst
.operands
[i
].shifted
)
8203 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8204 _("expression too complex"));
8205 constraint (inst
.reloc
.exp
.X_add_number
< 0
8206 || inst
.reloc
.exp
.X_add_number
> 3,
8207 _("shift out of range"));
8208 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8210 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8212 else if (inst
.operands
[i
].preind
)
8214 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8215 _("cannot use writeback with PC-relative addressing"));
8216 constraint (is_t
&& inst
.operands
[i
].writeback
,
8217 _("cannot use writeback with this instruction"));
8221 inst
.instruction
|= 0x01000000;
8222 if (inst
.operands
[i
].writeback
)
8223 inst
.instruction
|= 0x00200000;
8227 inst
.instruction
|= 0x00000c00;
8228 if (inst
.operands
[i
].writeback
)
8229 inst
.instruction
|= 0x00000100;
8231 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8233 else if (inst
.operands
[i
].postind
)
8235 assert (inst
.operands
[i
].writeback
);
8236 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8237 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8240 inst
.instruction
|= 0x00200000;
8242 inst
.instruction
|= 0x00000900;
8243 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8245 else /* unindexed - only for coprocessor */
8246 inst
.error
= _("instruction does not accept unindexed addressing");
8249 /* Table of Thumb instructions which exist in both 16- and 32-bit
8250 encodings (the latter only in post-V6T2 cores). The index is the
8251 value used in the insns table below. When there is more than one
8252 possible 16-bit encoding for the instruction, this table always
8254 Also contains several pseudo-instructions used during relaxation. */
8255 #define T16_32_TAB \
8256 X(adc, 4140, eb400000), \
8257 X(adcs, 4140, eb500000), \
8258 X(add, 1c00, eb000000), \
8259 X(adds, 1c00, eb100000), \
8260 X(addi, 0000, f1000000), \
8261 X(addis, 0000, f1100000), \
8262 X(add_pc,000f, f20f0000), \
8263 X(add_sp,000d, f10d0000), \
8264 X(adr, 000f, f20f0000), \
8265 X(and, 4000, ea000000), \
8266 X(ands, 4000, ea100000), \
8267 X(asr, 1000, fa40f000), \
8268 X(asrs, 1000, fa50f000), \
8269 X(b, e000, f000b000), \
8270 X(bcond, d000, f0008000), \
8271 X(bic, 4380, ea200000), \
8272 X(bics, 4380, ea300000), \
8273 X(cmn, 42c0, eb100f00), \
8274 X(cmp, 2800, ebb00f00), \
8275 X(cpsie, b660, f3af8400), \
8276 X(cpsid, b670, f3af8600), \
8277 X(cpy, 4600, ea4f0000), \
8278 X(dec_sp,80dd, f1ad0d00), \
8279 X(eor, 4040, ea800000), \
8280 X(eors, 4040, ea900000), \
8281 X(inc_sp,00dd, f10d0d00), \
8282 X(ldmia, c800, e8900000), \
8283 X(ldr, 6800, f8500000), \
8284 X(ldrb, 7800, f8100000), \
8285 X(ldrh, 8800, f8300000), \
8286 X(ldrsb, 5600, f9100000), \
8287 X(ldrsh, 5e00, f9300000), \
8288 X(ldr_pc,4800, f85f0000), \
8289 X(ldr_pc2,4800, f85f0000), \
8290 X(ldr_sp,9800, f85d0000), \
8291 X(lsl, 0000, fa00f000), \
8292 X(lsls, 0000, fa10f000), \
8293 X(lsr, 0800, fa20f000), \
8294 X(lsrs, 0800, fa30f000), \
8295 X(mov, 2000, ea4f0000), \
8296 X(movs, 2000, ea5f0000), \
8297 X(mul, 4340, fb00f000), \
8298 X(muls, 4340, ffffffff), /* no 32b muls */ \
8299 X(mvn, 43c0, ea6f0000), \
8300 X(mvns, 43c0, ea7f0000), \
8301 X(neg, 4240, f1c00000), /* rsb #0 */ \
8302 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8303 X(orr, 4300, ea400000), \
8304 X(orrs, 4300, ea500000), \
8305 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8306 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8307 X(rev, ba00, fa90f080), \
8308 X(rev16, ba40, fa90f090), \
8309 X(revsh, bac0, fa90f0b0), \
8310 X(ror, 41c0, fa60f000), \
8311 X(rors, 41c0, fa70f000), \
8312 X(sbc, 4180, eb600000), \
8313 X(sbcs, 4180, eb700000), \
8314 X(stmia, c000, e8800000), \
8315 X(str, 6000, f8400000), \
8316 X(strb, 7000, f8000000), \
8317 X(strh, 8000, f8200000), \
8318 X(str_sp,9000, f84d0000), \
8319 X(sub, 1e00, eba00000), \
8320 X(subs, 1e00, ebb00000), \
8321 X(subi, 8000, f1a00000), \
8322 X(subis, 8000, f1b00000), \
8323 X(sxtb, b240, fa4ff080), \
8324 X(sxth, b200, fa0ff080), \
8325 X(tst, 4200, ea100f00), \
8326 X(uxtb, b2c0, fa5ff080), \
8327 X(uxth, b280, fa1ff080), \
8328 X(nop, bf00, f3af8000), \
8329 X(yield, bf10, f3af8001), \
8330 X(wfe, bf20, f3af8002), \
8331 X(wfi, bf30, f3af8003), \
8332 X(sev, bf40, f3af9004), /* typo, 8004? */
8334 /* To catch errors in encoding functions, the codes are all offset by
8335 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8336 as 16-bit instructions. */
8337 #define X(a,b,c) T_MNEM_##a
8338 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8341 #define X(a,b,c) 0x##b
8342 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8343 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8346 #define X(a,b,c) 0x##c
8347 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8348 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8349 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8353 /* Thumb instruction encoders, in alphabetical order. */
8357 do_t_add_sub_w (void)
8361 Rd
= inst
.operands
[0].reg
;
8362 Rn
= inst
.operands
[1].reg
;
8364 constraint (Rd
== 15, _("PC not allowed as destination"));
8365 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8366 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8369 /* Parse an add or subtract instruction. We get here with inst.instruction
8370 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8377 Rd
= inst
.operands
[0].reg
;
8378 Rs
= (inst
.operands
[1].present
8379 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8380 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8388 flags
= (inst
.instruction
== T_MNEM_adds
8389 || inst
.instruction
== T_MNEM_subs
);
8391 narrow
= (current_it_mask
== 0);
8393 narrow
= (current_it_mask
!= 0);
8394 if (!inst
.operands
[2].isreg
)
8398 add
= (inst
.instruction
== T_MNEM_add
8399 || inst
.instruction
== T_MNEM_adds
);
8401 if (inst
.size_req
!= 4)
8403 /* Attempt to use a narrow opcode, with relaxation if
8405 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8406 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8407 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8408 opcode
= T_MNEM_add_sp
;
8409 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8410 opcode
= T_MNEM_add_pc
;
8411 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8414 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8416 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8420 inst
.instruction
= THUMB_OP16(opcode
);
8421 inst
.instruction
|= (Rd
<< 4) | Rs
;
8422 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8423 if (inst
.size_req
!= 2)
8424 inst
.relax
= opcode
;
8427 constraint (inst
.size_req
== 2, BAD_HIREG
);
8429 if (inst
.size_req
== 4
8430 || (inst
.size_req
!= 2 && !opcode
))
8434 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
8435 _("only SUBS PC, LR, #const allowed"));
8436 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8437 _("expression too complex"));
8438 constraint (inst
.reloc
.exp
.X_add_number
< 0
8439 || inst
.reloc
.exp
.X_add_number
> 0xff,
8440 _("immediate value out of range"));
8441 inst
.instruction
= T2_SUBS_PC_LR
8442 | inst
.reloc
.exp
.X_add_number
;
8443 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8446 else if (Rs
== REG_PC
)
8448 /* Always use addw/subw. */
8449 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8450 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8454 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8455 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8458 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8460 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8462 inst
.instruction
|= Rd
<< 8;
8463 inst
.instruction
|= Rs
<< 16;
8468 Rn
= inst
.operands
[2].reg
;
8469 /* See if we can do this with a 16-bit instruction. */
8470 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8472 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8477 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8478 || inst
.instruction
== T_MNEM_add
)
8481 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8485 if (inst
.instruction
== T_MNEM_add
)
8489 inst
.instruction
= T_OPCODE_ADD_HI
;
8490 inst
.instruction
|= (Rd
& 8) << 4;
8491 inst
.instruction
|= (Rd
& 7);
8492 inst
.instruction
|= Rn
<< 3;
8495 /* ... because addition is commutative! */
8498 inst
.instruction
= T_OPCODE_ADD_HI
;
8499 inst
.instruction
|= (Rd
& 8) << 4;
8500 inst
.instruction
|= (Rd
& 7);
8501 inst
.instruction
|= Rs
<< 3;
8506 /* If we get here, it can't be done in 16 bits. */
8507 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8508 _("shift must be constant"));
8509 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8510 inst
.instruction
|= Rd
<< 8;
8511 inst
.instruction
|= Rs
<< 16;
8512 encode_thumb32_shifted_operand (2);
8517 constraint (inst
.instruction
== T_MNEM_adds
8518 || inst
.instruction
== T_MNEM_subs
,
8521 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
8523 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
8524 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
8527 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8529 inst
.instruction
|= (Rd
<< 4) | Rs
;
8530 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8534 Rn
= inst
.operands
[2].reg
;
8535 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
8537 /* We now have Rd, Rs, and Rn set to registers. */
8538 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8540 /* Can't do this for SUB. */
8541 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
8542 inst
.instruction
= T_OPCODE_ADD_HI
;
8543 inst
.instruction
|= (Rd
& 8) << 4;
8544 inst
.instruction
|= (Rd
& 7);
8546 inst
.instruction
|= Rn
<< 3;
8548 inst
.instruction
|= Rs
<< 3;
8550 constraint (1, _("dest must overlap one source register"));
8554 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8555 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
8556 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8564 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
8566 /* Defer to section relaxation. */
8567 inst
.relax
= inst
.instruction
;
8568 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8571 else if (unified_syntax
&& inst
.size_req
!= 2)
8573 /* Generate a 32-bit opcode. */
8574 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8575 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8576 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8577 inst
.reloc
.pc_rel
= 1;
8581 /* Generate a 16-bit opcode. */
8582 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8583 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8584 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8585 inst
.reloc
.pc_rel
= 1;
8587 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8591 /* Arithmetic instructions for which there is just one 16-bit
8592 instruction encoding, and it allows only two low registers.
8593 For maximal compatibility with ARM syntax, we allow three register
8594 operands even when Thumb-32 instructions are not available, as long
8595 as the first two are identical. For instance, both "sbc r0,r1" and
8596 "sbc r0,r0,r1" are allowed. */
8602 Rd
= inst
.operands
[0].reg
;
8603 Rs
= (inst
.operands
[1].present
8604 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8605 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8606 Rn
= inst
.operands
[2].reg
;
8610 if (!inst
.operands
[2].isreg
)
8612 /* For an immediate, we always generate a 32-bit opcode;
8613 section relaxation will shrink it later if possible. */
8614 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8615 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8616 inst
.instruction
|= Rd
<< 8;
8617 inst
.instruction
|= Rs
<< 16;
8618 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8624 /* See if we can do this with a 16-bit instruction. */
8625 if (THUMB_SETS_FLAGS (inst
.instruction
))
8626 narrow
= current_it_mask
== 0;
8628 narrow
= current_it_mask
!= 0;
8630 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8632 if (inst
.operands
[2].shifted
)
8634 if (inst
.size_req
== 4)
8640 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8641 inst
.instruction
|= Rd
;
8642 inst
.instruction
|= Rn
<< 3;
8646 /* If we get here, it can't be done in 16 bits. */
8647 constraint (inst
.operands
[2].shifted
8648 && inst
.operands
[2].immisreg
,
8649 _("shift must be constant"));
8650 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8651 inst
.instruction
|= Rd
<< 8;
8652 inst
.instruction
|= Rs
<< 16;
8653 encode_thumb32_shifted_operand (2);
8658 /* On its face this is a lie - the instruction does set the
8659 flags. However, the only supported mnemonic in this mode
8661 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8663 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8664 _("unshifted register required"));
8665 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8666 constraint (Rd
!= Rs
,
8667 _("dest and source1 must be the same register"));
8669 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8670 inst
.instruction
|= Rd
;
8671 inst
.instruction
|= Rn
<< 3;
8675 /* Similarly, but for instructions where the arithmetic operation is
8676 commutative, so we can allow either of them to be different from
8677 the destination operand in a 16-bit instruction. For instance, all
8678 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8685 Rd
= inst
.operands
[0].reg
;
8686 Rs
= (inst
.operands
[1].present
8687 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8688 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8689 Rn
= inst
.operands
[2].reg
;
8693 if (!inst
.operands
[2].isreg
)
8695 /* For an immediate, we always generate a 32-bit opcode;
8696 section relaxation will shrink it later if possible. */
8697 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8698 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8699 inst
.instruction
|= Rd
<< 8;
8700 inst
.instruction
|= Rs
<< 16;
8701 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8707 /* See if we can do this with a 16-bit instruction. */
8708 if (THUMB_SETS_FLAGS (inst
.instruction
))
8709 narrow
= current_it_mask
== 0;
8711 narrow
= current_it_mask
!= 0;
8713 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8715 if (inst
.operands
[2].shifted
)
8717 if (inst
.size_req
== 4)
8724 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8725 inst
.instruction
|= Rd
;
8726 inst
.instruction
|= Rn
<< 3;
8731 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8732 inst
.instruction
|= Rd
;
8733 inst
.instruction
|= Rs
<< 3;
8738 /* If we get here, it can't be done in 16 bits. */
8739 constraint (inst
.operands
[2].shifted
8740 && inst
.operands
[2].immisreg
,
8741 _("shift must be constant"));
8742 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8743 inst
.instruction
|= Rd
<< 8;
8744 inst
.instruction
|= Rs
<< 16;
8745 encode_thumb32_shifted_operand (2);
8750 /* On its face this is a lie - the instruction does set the
8751 flags. However, the only supported mnemonic in this mode
8753 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8755 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8756 _("unshifted register required"));
8757 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8759 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8760 inst
.instruction
|= Rd
;
8763 inst
.instruction
|= Rn
<< 3;
8765 inst
.instruction
|= Rs
<< 3;
8767 constraint (1, _("dest must overlap one source register"));
8774 if (inst
.operands
[0].present
)
8776 constraint ((inst
.instruction
& 0xf0) != 0x40
8777 && inst
.operands
[0].imm
!= 0xf,
8778 _("bad barrier type"));
8779 inst
.instruction
|= inst
.operands
[0].imm
;
8782 inst
.instruction
|= 0xf;
8788 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8789 constraint (msb
> 32, _("bit-field extends past end of register"));
8790 /* The instruction encoding stores the LSB and MSB,
8791 not the LSB and width. */
8792 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8793 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8794 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8795 inst
.instruction
|= msb
- 1;
8803 /* #0 in second position is alternative syntax for bfc, which is
8804 the same instruction but with REG_PC in the Rm field. */
8805 if (!inst
.operands
[1].isreg
)
8806 inst
.operands
[1].reg
= REG_PC
;
8808 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8809 constraint (msb
> 32, _("bit-field extends past end of register"));
8810 /* The instruction encoding stores the LSB and MSB,
8811 not the LSB and width. */
8812 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8813 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8814 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8815 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8816 inst
.instruction
|= msb
- 1;
8822 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8823 _("bit-field extends past end of register"));
8824 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8825 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8826 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8827 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8828 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8831 /* ARM V5 Thumb BLX (argument parse)
8832 BLX <target_addr> which is BLX(1)
8833 BLX <Rm> which is BLX(2)
8834 Unfortunately, there are two different opcodes for this mnemonic.
8835 So, the insns[].value is not used, and the code here zaps values
8836 into inst.instruction.
8838 ??? How to take advantage of the additional two bits of displacement
8839 available in Thumb32 mode? Need new relocation? */
8844 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8845 if (inst
.operands
[0].isreg
)
8846 /* We have a register, so this is BLX(2). */
8847 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8850 /* No register. This must be BLX(1). */
8851 inst
.instruction
= 0xf000e800;
8853 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8854 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8857 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8858 inst
.reloc
.pc_rel
= 1;
8868 if (current_it_mask
)
8870 /* Conditional branches inside IT blocks are encoded as unconditional
8873 /* A branch must be the last instruction in an IT block. */
8874 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8879 if (cond
!= COND_ALWAYS
)
8880 opcode
= T_MNEM_bcond
;
8882 opcode
= inst
.instruction
;
8884 if (unified_syntax
&& inst
.size_req
== 4)
8886 inst
.instruction
= THUMB_OP32(opcode
);
8887 if (cond
== COND_ALWAYS
)
8888 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8891 assert (cond
!= 0xF);
8892 inst
.instruction
|= cond
<< 22;
8893 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8898 inst
.instruction
= THUMB_OP16(opcode
);
8899 if (cond
== COND_ALWAYS
)
8900 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8903 inst
.instruction
|= cond
<< 8;
8904 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8906 /* Allow section relaxation. */
8907 if (unified_syntax
&& inst
.size_req
!= 2)
8908 inst
.relax
= opcode
;
8911 inst
.reloc
.pc_rel
= 1;
8917 constraint (inst
.cond
!= COND_ALWAYS
,
8918 _("instruction is always unconditional"));
8919 if (inst
.operands
[0].present
)
8921 constraint (inst
.operands
[0].imm
> 255,
8922 _("immediate value out of range"));
8923 inst
.instruction
|= inst
.operands
[0].imm
;
8928 do_t_branch23 (void)
8930 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8931 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8932 inst
.reloc
.pc_rel
= 1;
8934 /* If the destination of the branch is a defined symbol which does not have
8935 the THUMB_FUNC attribute, then we must be calling a function which has
8936 the (interfacearm) attribute. We look for the Thumb entry point to that
8937 function and change the branch to refer to that function instead. */
8938 if ( inst
.reloc
.exp
.X_op
== O_symbol
8939 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8940 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8941 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8942 inst
.reloc
.exp
.X_add_symbol
=
8943 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8949 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8950 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8951 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8952 should cause the alignment to be checked once it is known. This is
8953 because BX PC only works if the instruction is word aligned. */
8959 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8960 if (inst
.operands
[0].reg
== REG_PC
)
8961 as_tsktsk (_("use of r15 in bxj is not really useful"));
8963 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8969 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8970 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8971 inst
.instruction
|= inst
.operands
[1].reg
;
8977 constraint (current_it_mask
, BAD_NOT_IT
);
8978 inst
.instruction
|= inst
.operands
[0].imm
;
8984 constraint (current_it_mask
, BAD_NOT_IT
);
8986 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8987 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8989 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8990 inst
.instruction
= 0xf3af8000;
8991 inst
.instruction
|= imod
<< 9;
8992 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8993 if (inst
.operands
[1].present
)
8994 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8998 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8999 && (inst
.operands
[0].imm
& 4),
9000 _("selected processor does not support 'A' form "
9001 "of this instruction"));
9002 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9003 _("Thumb does not support the 2-argument "
9004 "form of this instruction"));
9005 inst
.instruction
|= inst
.operands
[0].imm
;
9009 /* THUMB CPY instruction (argument parse). */
9014 if (inst
.size_req
== 4)
9016 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9017 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9018 inst
.instruction
|= inst
.operands
[1].reg
;
9022 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9023 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9024 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9031 constraint (current_it_mask
, BAD_NOT_IT
);
9032 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9033 inst
.instruction
|= inst
.operands
[0].reg
;
9034 inst
.reloc
.pc_rel
= 1;
9035 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9041 inst
.instruction
|= inst
.operands
[0].imm
;
9047 if (!inst
.operands
[1].present
)
9048 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9049 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9050 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9051 inst
.instruction
|= inst
.operands
[2].reg
;
9057 if (unified_syntax
&& inst
.size_req
== 4)
9058 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9060 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9066 unsigned int cond
= inst
.operands
[0].imm
;
9068 constraint (current_it_mask
, BAD_NOT_IT
);
9069 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
9072 /* If the condition is a negative condition, invert the mask. */
9073 if ((cond
& 0x1) == 0x0)
9075 unsigned int mask
= inst
.instruction
& 0x000f;
9077 if ((mask
& 0x7) == 0)
9078 /* no conversion needed */;
9079 else if ((mask
& 0x3) == 0)
9081 else if ((mask
& 0x1) == 0)
9086 inst
.instruction
&= 0xfff0;
9087 inst
.instruction
|= mask
;
9090 inst
.instruction
|= cond
<< 4;
9093 /* Helper function used for both push/pop and ldm/stm. */
9095 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9099 load
= (inst
.instruction
& (1 << 20)) != 0;
9101 if (mask
& (1 << 13))
9102 inst
.error
= _("SP not allowed in register list");
9105 if (mask
& (1 << 14)
9106 && mask
& (1 << 15))
9107 inst
.error
= _("LR and PC should not both be in register list");
9109 if ((mask
& (1 << base
)) != 0
9111 as_warn (_("base register should not be in register list "
9112 "when written back"));
9116 if (mask
& (1 << 15))
9117 inst
.error
= _("PC not allowed in register list");
9119 if (mask
& (1 << base
))
9120 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9123 if ((mask
& (mask
- 1)) == 0)
9125 /* Single register transfers implemented as str/ldr. */
9128 if (inst
.instruction
& (1 << 23))
9129 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9131 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9135 if (inst
.instruction
& (1 << 23))
9136 inst
.instruction
= 0x00800000; /* ia -> [base] */
9138 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9141 inst
.instruction
|= 0xf8400000;
9143 inst
.instruction
|= 0x00100000;
9145 mask
= ffs(mask
) - 1;
9149 inst
.instruction
|= WRITE_BACK
;
9151 inst
.instruction
|= mask
;
9152 inst
.instruction
|= base
<< 16;
9158 /* This really doesn't seem worth it. */
9159 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9160 _("expression too complex"));
9161 constraint (inst
.operands
[1].writeback
,
9162 _("Thumb load/store multiple does not support {reglist}^"));
9170 /* See if we can use a 16-bit instruction. */
9171 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9172 && inst
.size_req
!= 4
9173 && !(inst
.operands
[1].imm
& ~0xff))
9175 mask
= 1 << inst
.operands
[0].reg
;
9177 if (inst
.operands
[0].reg
<= 7
9178 && (inst
.instruction
== T_MNEM_stmia
9179 ? inst
.operands
[0].writeback
9180 : (inst
.operands
[0].writeback
9181 == !(inst
.operands
[1].imm
& mask
))))
9183 if (inst
.instruction
== T_MNEM_stmia
9184 && (inst
.operands
[1].imm
& mask
)
9185 && (inst
.operands
[1].imm
& (mask
- 1)))
9186 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9187 inst
.operands
[0].reg
);
9189 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9190 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9191 inst
.instruction
|= inst
.operands
[1].imm
;
9194 else if (inst
.operands
[0] .reg
== REG_SP
9195 && inst
.operands
[0].writeback
)
9197 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9198 ? T_MNEM_push
: T_MNEM_pop
);
9199 inst
.instruction
|= inst
.operands
[1].imm
;
9206 if (inst
.instruction
< 0xffff)
9207 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9209 encode_thumb2_ldmstm(inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9210 inst
.operands
[0].writeback
);
9215 constraint (inst
.operands
[0].reg
> 7
9216 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9217 constraint (inst
.instruction
!= T_MNEM_ldmia
9218 && inst
.instruction
!= T_MNEM_stmia
,
9219 _("Thumb-2 instruction only valid in unified syntax"));
9220 if (inst
.instruction
== T_MNEM_stmia
)
9222 if (!inst
.operands
[0].writeback
)
9223 as_warn (_("this instruction will write back the base register"));
9224 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9225 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9226 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9227 inst
.operands
[0].reg
);
9231 if (!inst
.operands
[0].writeback
9232 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9233 as_warn (_("this instruction will write back the base register"));
9234 else if (inst
.operands
[0].writeback
9235 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9236 as_warn (_("this instruction will not write back the base register"));
9239 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9240 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9241 inst
.instruction
|= inst
.operands
[1].imm
;
9248 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9249 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9250 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9251 || inst
.operands
[1].negative
,
9254 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9255 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9256 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9262 if (!inst
.operands
[1].present
)
9264 constraint (inst
.operands
[0].reg
== REG_LR
,
9265 _("r14 not allowed as first register "
9266 "when second register is omitted"));
9267 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9269 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9272 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9273 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9274 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9280 unsigned long opcode
;
9283 opcode
= inst
.instruction
;
9286 if (!inst
.operands
[1].isreg
)
9288 if (opcode
<= 0xffff)
9289 inst
.instruction
= THUMB_OP32 (opcode
);
9290 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9293 if (inst
.operands
[1].isreg
9294 && !inst
.operands
[1].writeback
9295 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9296 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9298 && inst
.size_req
!= 4)
9300 /* Insn may have a 16-bit form. */
9301 Rn
= inst
.operands
[1].reg
;
9302 if (inst
.operands
[1].immisreg
)
9304 inst
.instruction
= THUMB_OP16 (opcode
);
9306 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9309 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9310 && opcode
!= T_MNEM_ldrsb
)
9311 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9312 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9319 if (inst
.reloc
.pc_rel
)
9320 opcode
= T_MNEM_ldr_pc2
;
9322 opcode
= T_MNEM_ldr_pc
;
9326 if (opcode
== T_MNEM_ldr
)
9327 opcode
= T_MNEM_ldr_sp
;
9329 opcode
= T_MNEM_str_sp
;
9331 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9335 inst
.instruction
= inst
.operands
[0].reg
;
9336 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9338 inst
.instruction
|= THUMB_OP16 (opcode
);
9339 if (inst
.size_req
== 2)
9340 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9342 inst
.relax
= opcode
;
9346 /* Definitely a 32-bit variant. */
9347 inst
.instruction
= THUMB_OP32 (opcode
);
9348 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9349 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9353 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9355 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9357 /* Only [Rn,Rm] is acceptable. */
9358 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9359 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9360 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9361 || inst
.operands
[1].negative
,
9362 _("Thumb does not support this addressing mode"));
9363 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9367 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9368 if (!inst
.operands
[1].isreg
)
9369 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9372 constraint (!inst
.operands
[1].preind
9373 || inst
.operands
[1].shifted
9374 || inst
.operands
[1].writeback
,
9375 _("Thumb does not support this addressing mode"));
9376 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9378 constraint (inst
.instruction
& 0x0600,
9379 _("byte or halfword not valid for base register"));
9380 constraint (inst
.operands
[1].reg
== REG_PC
9381 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9382 _("r15 based store not allowed"));
9383 constraint (inst
.operands
[1].immisreg
,
9384 _("invalid base register for register offset"));
9386 if (inst
.operands
[1].reg
== REG_PC
)
9387 inst
.instruction
= T_OPCODE_LDR_PC
;
9388 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9389 inst
.instruction
= T_OPCODE_LDR_SP
;
9391 inst
.instruction
= T_OPCODE_STR_SP
;
9393 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9394 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9398 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9399 if (!inst
.operands
[1].immisreg
)
9401 /* Immediate offset. */
9402 inst
.instruction
|= inst
.operands
[0].reg
;
9403 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9404 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9408 /* Register offset. */
9409 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9410 constraint (inst
.operands
[1].negative
,
9411 _("Thumb does not support this addressing mode"));
9414 switch (inst
.instruction
)
9416 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9417 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9418 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9419 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9420 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9421 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9422 case 0x5600 /* ldrsb */:
9423 case 0x5e00 /* ldrsh */: break;
9427 inst
.instruction
|= inst
.operands
[0].reg
;
9428 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9429 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9435 if (!inst
.operands
[1].present
)
9437 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9438 constraint (inst
.operands
[0].reg
== REG_LR
,
9439 _("r14 not allowed here"));
9441 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9442 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9443 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9450 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9451 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
9457 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9458 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9459 inst
.instruction
|= inst
.operands
[2].reg
;
9460 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9466 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9467 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9468 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9469 inst
.instruction
|= inst
.operands
[3].reg
;
9477 int r0off
= (inst
.instruction
== T_MNEM_mov
9478 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
9479 unsigned long opcode
;
9481 bfd_boolean low_regs
;
9483 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
9484 opcode
= inst
.instruction
;
9485 if (current_it_mask
)
9486 narrow
= opcode
!= T_MNEM_movs
;
9488 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
9489 if (inst
.size_req
== 4
9490 || inst
.operands
[1].shifted
)
9493 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9494 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
9495 && !inst
.operands
[1].shifted
9496 && inst
.operands
[0].reg
== REG_PC
9497 && inst
.operands
[1].reg
== REG_LR
)
9499 inst
.instruction
= T2_SUBS_PC_LR
;
9503 if (!inst
.operands
[1].isreg
)
9505 /* Immediate operand. */
9506 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
9508 if (low_regs
&& narrow
)
9510 inst
.instruction
= THUMB_OP16 (opcode
);
9511 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9512 if (inst
.size_req
== 2)
9513 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9515 inst
.relax
= opcode
;
9519 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9520 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9521 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9522 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9525 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
9526 && (inst
.instruction
== T_MNEM_mov
9527 || inst
.instruction
== T_MNEM_movs
))
9529 /* Register shifts are encoded as separate shift instructions. */
9530 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
9532 if (current_it_mask
)
9537 if (inst
.size_req
== 4)
9540 if (!low_regs
|| inst
.operands
[1].imm
> 7)
9543 if (inst
.operands
[0].reg
!= inst
.operands
[1].reg
)
9546 switch (inst
.operands
[1].shift_kind
)
9549 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
9552 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
9555 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
9558 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
9564 inst
.instruction
= opcode
;
9567 inst
.instruction
|= inst
.operands
[0].reg
;
9568 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
9573 inst
.instruction
|= CONDS_BIT
;
9575 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9576 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9577 inst
.instruction
|= inst
.operands
[1].imm
;
9582 /* Some mov with immediate shift have narrow variants.
9583 Register shifts are handled above. */
9584 if (low_regs
&& inst
.operands
[1].shifted
9585 && (inst
.instruction
== T_MNEM_mov
9586 || inst
.instruction
== T_MNEM_movs
))
9588 if (current_it_mask
)
9589 narrow
= (inst
.instruction
== T_MNEM_mov
);
9591 narrow
= (inst
.instruction
== T_MNEM_movs
);
9596 switch (inst
.operands
[1].shift_kind
)
9598 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9599 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9600 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9601 default: narrow
= FALSE
; break;
9607 inst
.instruction
|= inst
.operands
[0].reg
;
9608 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9609 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9613 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9614 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9615 encode_thumb32_shifted_operand (1);
9619 switch (inst
.instruction
)
9622 inst
.instruction
= T_OPCODE_MOV_HR
;
9623 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9624 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9625 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9629 /* We know we have low registers at this point.
9630 Generate ADD Rd, Rs, #0. */
9631 inst
.instruction
= T_OPCODE_ADD_I3
;
9632 inst
.instruction
|= inst
.operands
[0].reg
;
9633 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9639 inst
.instruction
= T_OPCODE_CMP_LR
;
9640 inst
.instruction
|= inst
.operands
[0].reg
;
9641 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9645 inst
.instruction
= T_OPCODE_CMP_HR
;
9646 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9647 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9648 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9655 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9656 if (inst
.operands
[1].isreg
)
9658 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
9660 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9661 since a MOV instruction produces unpredictable results. */
9662 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9663 inst
.instruction
= T_OPCODE_ADD_I3
;
9665 inst
.instruction
= T_OPCODE_CMP_LR
;
9667 inst
.instruction
|= inst
.operands
[0].reg
;
9668 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9672 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9673 inst
.instruction
= T_OPCODE_MOV_HR
;
9675 inst
.instruction
= T_OPCODE_CMP_HR
;
9681 constraint (inst
.operands
[0].reg
> 7,
9682 _("only lo regs allowed with immediate"));
9683 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9684 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9694 top
= (inst
.instruction
& 0x00800000) != 0;
9695 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
9697 constraint (top
, _(":lower16: not allowed this instruction"));
9698 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
9700 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
9702 constraint (!top
, _(":upper16: not allowed this instruction"));
9703 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
9706 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9707 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9709 imm
= inst
.reloc
.exp
.X_add_number
;
9710 inst
.instruction
|= (imm
& 0xf000) << 4;
9711 inst
.instruction
|= (imm
& 0x0800) << 15;
9712 inst
.instruction
|= (imm
& 0x0700) << 4;
9713 inst
.instruction
|= (imm
& 0x00ff);
9722 int r0off
= (inst
.instruction
== T_MNEM_mvn
9723 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
9726 if (inst
.size_req
== 4
9727 || inst
.instruction
> 0xffff
9728 || inst
.operands
[1].shifted
9729 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9731 else if (inst
.instruction
== T_MNEM_cmn
)
9733 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9734 narrow
= (current_it_mask
== 0);
9736 narrow
= (current_it_mask
!= 0);
9738 if (!inst
.operands
[1].isreg
)
9740 /* For an immediate, we always generate a 32-bit opcode;
9741 section relaxation will shrink it later if possible. */
9742 if (inst
.instruction
< 0xffff)
9743 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9744 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9745 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9746 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9750 /* See if we can do this with a 16-bit instruction. */
9753 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9754 inst
.instruction
|= inst
.operands
[0].reg
;
9755 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9759 constraint (inst
.operands
[1].shifted
9760 && inst
.operands
[1].immisreg
,
9761 _("shift must be constant"));
9762 if (inst
.instruction
< 0xffff)
9763 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9764 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9765 encode_thumb32_shifted_operand (1);
9771 constraint (inst
.instruction
> 0xffff
9772 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
9773 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
9774 _("unshifted register required"));
9775 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9778 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9779 inst
.instruction
|= inst
.operands
[0].reg
;
9780 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9789 if (do_vfp_nsyn_mrs () == SUCCESS
)
9792 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
9795 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9796 _("selected processor does not support "
9797 "requested special purpose register"));
9801 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9802 _("selected processor does not support "
9803 "requested special purpose register %x"));
9804 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9805 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
9806 _("'CPSR' or 'SPSR' expected"));
9809 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9810 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9811 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
9819 if (do_vfp_nsyn_msr () == SUCCESS
)
9822 constraint (!inst
.operands
[1].isreg
,
9823 _("Thumb encoding does not support an immediate here"));
9824 flags
= inst
.operands
[0].imm
;
9827 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9828 _("selected processor does not support "
9829 "requested special purpose register"));
9833 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9834 _("selected processor does not support "
9835 "requested special purpose register"));
9838 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9839 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
9840 inst
.instruction
|= (flags
& 0xff);
9841 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9847 if (!inst
.operands
[2].present
)
9848 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9850 /* There is no 32-bit MULS and no 16-bit MUL. */
9851 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
9853 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9854 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9855 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9856 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
9860 constraint (!unified_syntax
9861 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
9862 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9865 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9866 inst
.instruction
|= inst
.operands
[0].reg
;
9868 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9869 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9870 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
9871 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9873 constraint (1, _("dest must overlap one source register"));
9880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9881 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9882 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9883 inst
.instruction
|= inst
.operands
[3].reg
;
9885 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9886 as_tsktsk (_("rdhi and rdlo must be different"));
9894 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
9896 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9897 inst
.instruction
|= inst
.operands
[0].imm
;
9901 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9902 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
9907 constraint (inst
.operands
[0].present
,
9908 _("Thumb does not support NOP with hints"));
9909 inst
.instruction
= 0x46c0;
9920 if (THUMB_SETS_FLAGS (inst
.instruction
))
9921 narrow
= (current_it_mask
== 0);
9923 narrow
= (current_it_mask
!= 0);
9924 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9926 if (inst
.size_req
== 4)
9931 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9932 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9933 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9937 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9938 inst
.instruction
|= inst
.operands
[0].reg
;
9939 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9944 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9946 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9948 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9949 inst
.instruction
|= inst
.operands
[0].reg
;
9950 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9957 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9958 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9959 inst
.instruction
|= inst
.operands
[2].reg
;
9960 if (inst
.operands
[3].present
)
9962 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9963 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9964 _("expression too complex"));
9965 inst
.instruction
|= (val
& 0x1c) << 10;
9966 inst
.instruction
|= (val
& 0x03) << 6;
9973 if (!inst
.operands
[3].present
)
9974 inst
.instruction
&= ~0x00000020;
9981 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9985 do_t_push_pop (void)
9989 constraint (inst
.operands
[0].writeback
,
9990 _("push/pop do not support {reglist}^"));
9991 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9992 _("expression too complex"));
9994 mask
= inst
.operands
[0].imm
;
9995 if ((mask
& ~0xff) == 0)
9996 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
9997 else if ((inst
.instruction
== T_MNEM_push
9998 && (mask
& ~0xff) == 1 << REG_LR
)
9999 || (inst
.instruction
== T_MNEM_pop
10000 && (mask
& ~0xff) == 1 << REG_PC
))
10002 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10003 inst
.instruction
|= THUMB_PP_PC_LR
;
10004 inst
.instruction
|= mask
& 0xff;
10006 else if (unified_syntax
)
10008 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10009 encode_thumb2_ldmstm(13, mask
, TRUE
);
10013 inst
.error
= _("invalid register list to push/pop instruction");
10021 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10022 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10028 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10029 && inst
.size_req
!= 4)
10031 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10032 inst
.instruction
|= inst
.operands
[0].reg
;
10033 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10035 else if (unified_syntax
)
10037 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10038 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10039 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10040 inst
.instruction
|= inst
.operands
[1].reg
;
10043 inst
.error
= BAD_HIREG
;
10051 Rd
= inst
.operands
[0].reg
;
10052 Rs
= (inst
.operands
[1].present
10053 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10054 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10056 inst
.instruction
|= Rd
<< 8;
10057 inst
.instruction
|= Rs
<< 16;
10058 if (!inst
.operands
[2].isreg
)
10060 bfd_boolean narrow
;
10062 if ((inst
.instruction
& 0x00100000) != 0)
10063 narrow
= (current_it_mask
== 0);
10065 narrow
= (current_it_mask
!= 0);
10067 if (Rd
> 7 || Rs
> 7)
10070 if (inst
.size_req
== 4 || !unified_syntax
)
10073 if (inst
.reloc
.exp
.X_op
!= O_constant
10074 || inst
.reloc
.exp
.X_add_number
!= 0)
10077 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10078 relaxation, but it doesn't seem worth the hassle. */
10081 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10082 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10083 inst
.instruction
|= Rs
<< 3;
10084 inst
.instruction
|= Rd
;
10088 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10089 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10093 encode_thumb32_shifted_operand (2);
10099 constraint (current_it_mask
, BAD_NOT_IT
);
10100 if (inst
.operands
[0].imm
)
10101 inst
.instruction
|= 0x8;
10107 if (!inst
.operands
[1].present
)
10108 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
10110 if (unified_syntax
)
10112 bfd_boolean narrow
;
10115 switch (inst
.instruction
)
10118 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
10120 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
10122 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
10124 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
10128 if (THUMB_SETS_FLAGS (inst
.instruction
))
10129 narrow
= (current_it_mask
== 0);
10131 narrow
= (current_it_mask
!= 0);
10132 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10134 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
10136 if (inst
.operands
[2].isreg
10137 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
10138 || inst
.operands
[2].reg
> 7))
10140 if (inst
.size_req
== 4)
10145 if (inst
.operands
[2].isreg
)
10147 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10148 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10149 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10150 inst
.instruction
|= inst
.operands
[2].reg
;
10154 inst
.operands
[1].shifted
= 1;
10155 inst
.operands
[1].shift_kind
= shift_kind
;
10156 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
10157 ? T_MNEM_movs
: T_MNEM_mov
);
10158 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10159 encode_thumb32_shifted_operand (1);
10160 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10161 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10166 if (inst
.operands
[2].isreg
)
10168 switch (shift_kind
)
10170 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10171 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10172 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10173 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10177 inst
.instruction
|= inst
.operands
[0].reg
;
10178 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10182 switch (shift_kind
)
10184 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10185 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10186 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10189 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10190 inst
.instruction
|= inst
.operands
[0].reg
;
10191 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10197 constraint (inst
.operands
[0].reg
> 7
10198 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
10199 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10201 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
10203 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
10204 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
10205 _("source1 and dest must be same register"));
10207 switch (inst
.instruction
)
10209 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10210 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10211 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10212 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10216 inst
.instruction
|= inst
.operands
[0].reg
;
10217 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10221 switch (inst
.instruction
)
10223 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10224 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10225 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10226 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
10229 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10230 inst
.instruction
|= inst
.operands
[0].reg
;
10231 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10239 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10240 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10241 inst
.instruction
|= inst
.operands
[2].reg
;
10247 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10248 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10249 _("expression too complex"));
10250 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10251 inst
.instruction
|= (value
& 0xf000) >> 12;
10252 inst
.instruction
|= (value
& 0x0ff0);
10253 inst
.instruction
|= (value
& 0x000f) << 16;
10259 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10260 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10261 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10263 if (inst
.operands
[3].present
)
10265 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10266 _("expression too complex"));
10268 if (inst
.reloc
.exp
.X_add_number
!= 0)
10270 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10271 inst
.instruction
|= 0x00200000; /* sh bit */
10272 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10273 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10275 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10282 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10283 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10284 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10290 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10291 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10292 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10293 || inst
.operands
[2].negative
,
10296 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10297 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10298 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10299 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10305 if (!inst
.operands
[2].present
)
10306 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
10308 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10309 || inst
.operands
[0].reg
== inst
.operands
[2].reg
10310 || inst
.operands
[0].reg
== inst
.operands
[3].reg
10311 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
10314 inst
.instruction
|= inst
.operands
[0].reg
;
10315 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10316 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10317 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10323 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10324 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10325 inst
.instruction
|= inst
.operands
[2].reg
;
10326 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
10332 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
10333 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10334 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
10336 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10337 inst
.instruction
|= inst
.operands
[0].reg
;
10338 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10340 else if (unified_syntax
)
10342 if (inst
.instruction
<= 0xffff)
10343 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10344 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10345 inst
.instruction
|= inst
.operands
[1].reg
;
10346 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
10350 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
10351 _("Thumb encoding does not support rotation"));
10352 constraint (1, BAD_HIREG
);
10359 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
10367 half
= (inst
.instruction
& 0x10) != 0;
10368 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
10369 constraint (inst
.operands
[0].immisreg
,
10370 _("instruction requires register index"));
10371 constraint (inst
.operands
[0].imm
== 15,
10372 _("PC is not a valid index register"));
10373 constraint (!half
&& inst
.operands
[0].shifted
,
10374 _("instruction does not allow shifted index"));
10375 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
10381 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10382 inst
.instruction
|= inst
.operands
[1].imm
;
10383 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10385 if (inst
.operands
[3].present
)
10387 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10388 _("expression too complex"));
10389 if (inst
.reloc
.exp
.X_add_number
!= 0)
10391 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10392 inst
.instruction
|= 0x00200000; /* sh bit */
10394 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10395 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10397 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10404 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10405 inst
.instruction
|= inst
.operands
[1].imm
;
10406 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10409 /* Neon instruction encoder helpers. */
10411 /* Encodings for the different types for various Neon opcodes. */
10413 /* An "invalid" code for the following tables. */
10416 struct neon_tab_entry
10419 unsigned float_or_poly
;
10420 unsigned scalar_or_imm
;
10423 /* Map overloaded Neon opcodes to their respective encodings. */
10424 #define NEON_ENC_TAB \
10425 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10426 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10427 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10428 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10429 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10430 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10431 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10432 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10433 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10434 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10435 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10436 /* Register variants of the following two instructions are encoded as
10437 vcge / vcgt with the operands reversed. */ \
10438 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10439 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10440 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10441 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10442 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10443 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10444 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10445 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10446 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10447 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10448 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10449 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10450 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10451 X(vshl, 0x0000400, N_INV, 0x0800510), \
10452 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10453 X(vand, 0x0000110, N_INV, 0x0800030), \
10454 X(vbic, 0x0100110, N_INV, 0x0800030), \
10455 X(veor, 0x1000110, N_INV, N_INV), \
10456 X(vorn, 0x0300110, N_INV, 0x0800010), \
10457 X(vorr, 0x0200110, N_INV, 0x0800010), \
10458 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10459 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10460 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10461 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10462 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10463 X(vst1, 0x0000000, 0x0800000, N_INV), \
10464 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10465 X(vst2, 0x0000100, 0x0800100, N_INV), \
10466 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10467 X(vst3, 0x0000200, 0x0800200, N_INV), \
10468 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10469 X(vst4, 0x0000300, 0x0800300, N_INV), \
10470 X(vmovn, 0x1b20200, N_INV, N_INV), \
10471 X(vtrn, 0x1b20080, N_INV, N_INV), \
10472 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10473 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10474 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10475 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10476 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10477 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10478 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10479 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10480 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10484 #define X(OPC,I,F,S) N_MNEM_##OPC
10489 static const struct neon_tab_entry neon_enc_tab
[] =
10491 #define X(OPC,I,F,S) { (I), (F), (S) }
10496 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10497 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10498 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10499 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10500 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10501 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10502 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10503 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10504 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10505 #define NEON_ENC_SINGLE(X) \
10506 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10507 #define NEON_ENC_DOUBLE(X) \
10508 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10510 /* Define shapes for instruction operands. The following mnemonic characters
10511 are used in this table:
10513 F - VFP S<n> register
10514 D - Neon D<n> register
10515 Q - Neon Q<n> register
10519 L - D<n> register list
10521 This table is used to generate various data:
10522 - enumerations of the form NS_DDR to be used as arguments to
10524 - a table classifying shapes into single, double, quad, mixed.
10525 - a table used to drive neon_select_shape.
10528 #define NEON_SHAPE_DEF \
10529 X(3, (D, D, D), DOUBLE), \
10530 X(3, (Q, Q, Q), QUAD), \
10531 X(3, (D, D, I), DOUBLE), \
10532 X(3, (Q, Q, I), QUAD), \
10533 X(3, (D, D, S), DOUBLE), \
10534 X(3, (Q, Q, S), QUAD), \
10535 X(2, (D, D), DOUBLE), \
10536 X(2, (Q, Q), QUAD), \
10537 X(2, (D, S), DOUBLE), \
10538 X(2, (Q, S), QUAD), \
10539 X(2, (D, R), DOUBLE), \
10540 X(2, (Q, R), QUAD), \
10541 X(2, (D, I), DOUBLE), \
10542 X(2, (Q, I), QUAD), \
10543 X(3, (D, L, D), DOUBLE), \
10544 X(2, (D, Q), MIXED), \
10545 X(2, (Q, D), MIXED), \
10546 X(3, (D, Q, I), MIXED), \
10547 X(3, (Q, D, I), MIXED), \
10548 X(3, (Q, D, D), MIXED), \
10549 X(3, (D, Q, Q), MIXED), \
10550 X(3, (Q, Q, D), MIXED), \
10551 X(3, (Q, D, S), MIXED), \
10552 X(3, (D, Q, S), MIXED), \
10553 X(4, (D, D, D, I), DOUBLE), \
10554 X(4, (Q, Q, Q, I), QUAD), \
10555 X(2, (F, F), SINGLE), \
10556 X(3, (F, F, F), SINGLE), \
10557 X(2, (F, I), SINGLE), \
10558 X(2, (F, D), MIXED), \
10559 X(2, (D, F), MIXED), \
10560 X(3, (F, F, I), MIXED), \
10561 X(4, (R, R, F, F), SINGLE), \
10562 X(4, (F, F, R, R), SINGLE), \
10563 X(3, (D, R, R), DOUBLE), \
10564 X(3, (R, R, D), DOUBLE), \
10565 X(2, (S, R), SINGLE), \
10566 X(2, (R, S), SINGLE), \
10567 X(2, (F, R), SINGLE), \
10568 X(2, (R, F), SINGLE)
10570 #define S2(A,B) NS_##A##B
10571 #define S3(A,B,C) NS_##A##B##C
10572 #define S4(A,B,C,D) NS_##A##B##C##D
10574 #define X(N, L, C) S##N L
10587 enum neon_shape_class
10595 #define X(N, L, C) SC_##C
10597 static enum neon_shape_class neon_shape_class
[] =
10615 /* Register widths of above. */
10616 static unsigned neon_shape_el_size
[] =
10627 struct neon_shape_info
10630 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
10633 #define S2(A,B) { SE_##A, SE_##B }
10634 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10635 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10637 #define X(N, L, C) { N, S##N L }
10639 static struct neon_shape_info neon_shape_tab
[] =
10649 /* Bit masks used in type checking given instructions.
10650 'N_EQK' means the type must be the same as (or based on in some way) the key
10651 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10652 set, various other bits can be set as well in order to modify the meaning of
10653 the type constraint. */
10655 enum neon_type_mask
10677 N_KEY
= 0x100000, /* key element (main type specifier). */
10678 N_EQK
= 0x200000, /* given operand has the same type & size as the key. */
10679 N_VFP
= 0x400000, /* VFP mode: operand size must match register width. */
10680 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
10681 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
10682 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
10683 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10684 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
10685 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
10686 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10688 N_MAX_NONSPECIAL
= N_F64
10691 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10693 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10694 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10695 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10696 #define N_SUF_32 (N_SU_32 | N_F32)
10697 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10698 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10700 /* Pass this as the first type argument to neon_check_type to ignore types
10702 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10704 /* Select a "shape" for the current instruction (describing register types or
10705 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10706 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10707 function of operand parsing, so this function doesn't need to be called.
10708 Shapes should be listed in order of decreasing length. */
10710 static enum neon_shape
10711 neon_select_shape (enum neon_shape shape
, ...)
10714 enum neon_shape first_shape
= shape
;
10716 /* Fix missing optional operands. FIXME: we don't know at this point how
10717 many arguments we should have, so this makes the assumption that we have
10718 > 1. This is true of all current Neon opcodes, I think, but may not be
10719 true in the future. */
10720 if (!inst
.operands
[1].present
)
10721 inst
.operands
[1] = inst
.operands
[0];
10723 va_start (ap
, shape
);
10725 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
10730 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
10732 if (!inst
.operands
[j
].present
)
10738 switch (neon_shape_tab
[shape
].el
[j
])
10741 if (!(inst
.operands
[j
].isreg
10742 && inst
.operands
[j
].isvec
10743 && inst
.operands
[j
].issingle
10744 && !inst
.operands
[j
].isquad
))
10749 if (!(inst
.operands
[j
].isreg
10750 && inst
.operands
[j
].isvec
10751 && !inst
.operands
[j
].isquad
10752 && !inst
.operands
[j
].issingle
))
10757 if (!(inst
.operands
[j
].isreg
10758 && !inst
.operands
[j
].isvec
))
10763 if (!(inst
.operands
[j
].isreg
10764 && inst
.operands
[j
].isvec
10765 && inst
.operands
[j
].isquad
10766 && !inst
.operands
[j
].issingle
))
10771 if (!(!inst
.operands
[j
].isreg
10772 && !inst
.operands
[j
].isscalar
))
10777 if (!(!inst
.operands
[j
].isreg
10778 && inst
.operands
[j
].isscalar
))
10792 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
10793 first_error (_("invalid instruction shape"));
10798 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10799 means the Q bit should be set). */
10802 neon_quad (enum neon_shape shape
)
10804 return neon_shape_class
[shape
] == SC_QUAD
;
10808 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
10811 /* Allow modification to be made to types which are constrained to be
10812 based on the key element, based on bits set alongside N_EQK. */
10813 if ((typebits
& N_EQK
) != 0)
10815 if ((typebits
& N_HLF
) != 0)
10817 else if ((typebits
& N_DBL
) != 0)
10819 if ((typebits
& N_SGN
) != 0)
10820 *g_type
= NT_signed
;
10821 else if ((typebits
& N_UNS
) != 0)
10822 *g_type
= NT_unsigned
;
10823 else if ((typebits
& N_INT
) != 0)
10824 *g_type
= NT_integer
;
10825 else if ((typebits
& N_FLT
) != 0)
10826 *g_type
= NT_float
;
10827 else if ((typebits
& N_SIZ
) != 0)
10828 *g_type
= NT_untyped
;
10832 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10833 operand type, i.e. the single type specified in a Neon instruction when it
10834 is the only one given. */
10836 static struct neon_type_el
10837 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
10839 struct neon_type_el dest
= *key
;
10841 assert ((thisarg
& N_EQK
) != 0);
10843 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
10848 /* Convert Neon type and size into compact bitmask representation. */
10850 static enum neon_type_mask
10851 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
10858 case 8: return N_8
;
10859 case 16: return N_16
;
10860 case 32: return N_32
;
10861 case 64: return N_64
;
10869 case 8: return N_I8
;
10870 case 16: return N_I16
;
10871 case 32: return N_I32
;
10872 case 64: return N_I64
;
10880 case 32: return N_F32
;
10881 case 64: return N_F64
;
10889 case 8: return N_P8
;
10890 case 16: return N_P16
;
10898 case 8: return N_S8
;
10899 case 16: return N_S16
;
10900 case 32: return N_S32
;
10901 case 64: return N_S64
;
10909 case 8: return N_U8
;
10910 case 16: return N_U16
;
10911 case 32: return N_U32
;
10912 case 64: return N_U64
;
10923 /* Convert compact Neon bitmask type representation to a type and size. Only
10924 handles the case where a single bit is set in the mask. */
10927 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10928 enum neon_type_mask mask
)
10930 if ((mask
& N_EQK
) != 0)
10933 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10935 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10937 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10939 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10944 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10946 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10947 *type
= NT_unsigned
;
10948 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10949 *type
= NT_integer
;
10950 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10951 *type
= NT_untyped
;
10952 else if ((mask
& (N_P8
| N_P16
)) != 0)
10954 else if ((mask
& (N_F32
| N_F64
)) != 0)
10962 /* Modify a bitmask of allowed types. This is only needed for type
10966 modify_types_allowed (unsigned allowed
, unsigned mods
)
10969 enum neon_el_type type
;
10975 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
10977 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
10979 neon_modify_type_size (mods
, &type
, &size
);
10980 destmask
|= type_chk_of_el_type (type
, size
);
10987 /* Check type and return type classification.
10988 The manual states (paraphrase): If one datatype is given, it indicates the
10990 - the second operand, if there is one
10991 - the operand, if there is no second operand
10992 - the result, if there are no operands.
10993 This isn't quite good enough though, so we use a concept of a "key" datatype
10994 which is set on a per-instruction basis, which is the one which matters when
10995 only one data type is written.
10996 Note: this function has side-effects (e.g. filling in missing operands). All
10997 Neon instructions should call it before performing bit encoding. */
10999 static struct neon_type_el
11000 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
11003 unsigned i
, pass
, key_el
= 0;
11004 unsigned types
[NEON_MAX_TYPE_ELS
];
11005 enum neon_el_type k_type
= NT_invtype
;
11006 unsigned k_size
= -1u;
11007 struct neon_type_el badtype
= {NT_invtype
, -1};
11008 unsigned key_allowed
= 0;
11010 /* Optional registers in Neon instructions are always (not) in operand 1.
11011 Fill in the missing operand here, if it was omitted. */
11012 if (els
> 1 && !inst
.operands
[1].present
)
11013 inst
.operands
[1] = inst
.operands
[0];
11015 /* Suck up all the varargs. */
11017 for (i
= 0; i
< els
; i
++)
11019 unsigned thisarg
= va_arg (ap
, unsigned);
11020 if (thisarg
== N_IGNORE_TYPE
)
11025 types
[i
] = thisarg
;
11026 if ((thisarg
& N_KEY
) != 0)
11031 if (inst
.vectype
.elems
> 0)
11032 for (i
= 0; i
< els
; i
++)
11033 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
11035 first_error (_("types specified in both the mnemonic and operands"));
11039 /* Duplicate inst.vectype elements here as necessary.
11040 FIXME: No idea if this is exactly the same as the ARM assembler,
11041 particularly when an insn takes one register and one non-register
11043 if (inst
.vectype
.elems
== 1 && els
> 1)
11046 inst
.vectype
.elems
= els
;
11047 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
11048 for (j
= 0; j
< els
; j
++)
11050 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11053 else if (inst
.vectype
.elems
== 0 && els
> 0)
11056 /* No types were given after the mnemonic, so look for types specified
11057 after each operand. We allow some flexibility here; as long as the
11058 "key" operand has a type, we can infer the others. */
11059 for (j
= 0; j
< els
; j
++)
11060 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
11061 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
11063 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
11065 for (j
= 0; j
< els
; j
++)
11066 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
11067 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11072 first_error (_("operand types can't be inferred"));
11076 else if (inst
.vectype
.elems
!= els
)
11078 first_error (_("type specifier has the wrong number of parts"));
11082 for (pass
= 0; pass
< 2; pass
++)
11084 for (i
= 0; i
< els
; i
++)
11086 unsigned thisarg
= types
[i
];
11087 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
11088 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
11089 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
11090 unsigned g_size
= inst
.vectype
.el
[i
].size
;
11092 /* Decay more-specific signed & unsigned types to sign-insensitive
11093 integer types if sign-specific variants are unavailable. */
11094 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
11095 && (types_allowed
& N_SU_ALL
) == 0)
11096 g_type
= NT_integer
;
11098 /* If only untyped args are allowed, decay any more specific types to
11099 them. Some instructions only care about signs for some element
11100 sizes, so handle that properly. */
11101 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
11102 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
11103 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
11104 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
11105 g_type
= NT_untyped
;
11109 if ((thisarg
& N_KEY
) != 0)
11113 key_allowed
= thisarg
& ~N_KEY
;
11118 if ((thisarg
& N_VFP
) != 0)
11120 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
11121 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
11123 /* In VFP mode, operands must match register widths. If we
11124 have a key operand, use its width, else use the width of
11125 the current operand. */
11131 if (regwidth
!= match
)
11133 first_error (_("operand size must match register width"));
11138 if ((thisarg
& N_EQK
) == 0)
11140 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
11142 if ((given_type
& types_allowed
) == 0)
11144 first_error (_("bad type in Neon instruction"));
11150 enum neon_el_type mod_k_type
= k_type
;
11151 unsigned mod_k_size
= k_size
;
11152 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
11153 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
11155 first_error (_("inconsistent types in Neon instruction"));
11163 return inst
.vectype
.el
[key_el
];
11166 /* Neon-style VFP instruction forwarding. */
11168 /* Thumb VFP instructions have 0xE in the condition field. */
11171 do_vfp_cond_or_thumb (void)
11174 inst
.instruction
|= 0xe0000000;
11176 inst
.instruction
|= inst
.cond
<< 28;
11179 /* Look up and encode a simple mnemonic, for use as a helper function for the
11180 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11181 etc. It is assumed that operand parsing has already been done, and that the
11182 operands are in the form expected by the given opcode (this isn't necessarily
11183 the same as the form in which they were parsed, hence some massaging must
11184 take place before this function is called).
11185 Checks current arch version against that in the looked-up opcode. */
11188 do_vfp_nsyn_opcode (const char *opname
)
11190 const struct asm_opcode
*opcode
;
11192 opcode
= hash_find (arm_ops_hsh
, opname
);
11197 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
11198 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
11203 inst
.instruction
= opcode
->tvalue
;
11204 opcode
->tencode ();
11208 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
11209 opcode
->aencode ();
11214 do_vfp_nsyn_add_sub (enum neon_shape rs
)
11216 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
11221 do_vfp_nsyn_opcode ("fadds");
11223 do_vfp_nsyn_opcode ("fsubs");
11228 do_vfp_nsyn_opcode ("faddd");
11230 do_vfp_nsyn_opcode ("fsubd");
11234 /* Check operand types to see if this is a VFP instruction, and if so call
11238 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
11240 enum neon_shape rs
;
11241 struct neon_type_el et
;
11246 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11247 et
= neon_check_type (2, rs
,
11248 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11252 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11253 et
= neon_check_type (3, rs
,
11254 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11261 if (et
.type
!= NT_invtype
)
11273 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
11275 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
11280 do_vfp_nsyn_opcode ("fmacs");
11282 do_vfp_nsyn_opcode ("fmscs");
11287 do_vfp_nsyn_opcode ("fmacd");
11289 do_vfp_nsyn_opcode ("fmscd");
11294 do_vfp_nsyn_mul (enum neon_shape rs
)
11297 do_vfp_nsyn_opcode ("fmuls");
11299 do_vfp_nsyn_opcode ("fmuld");
11303 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
11305 int is_neg
= (inst
.instruction
& 0x80) != 0;
11306 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
11311 do_vfp_nsyn_opcode ("fnegs");
11313 do_vfp_nsyn_opcode ("fabss");
11318 do_vfp_nsyn_opcode ("fnegd");
11320 do_vfp_nsyn_opcode ("fabsd");
11324 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11325 insns belong to Neon, and are handled elsewhere. */
11328 do_vfp_nsyn_ldm_stm (int is_dbmode
)
11330 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
11334 do_vfp_nsyn_opcode ("fldmdbs");
11336 do_vfp_nsyn_opcode ("fldmias");
11341 do_vfp_nsyn_opcode ("fstmdbs");
11343 do_vfp_nsyn_opcode ("fstmias");
11348 do_vfp_nsyn_sqrt (void)
11350 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11351 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11354 do_vfp_nsyn_opcode ("fsqrts");
11356 do_vfp_nsyn_opcode ("fsqrtd");
11360 do_vfp_nsyn_div (void)
11362 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11363 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11364 N_F32
| N_F64
| N_KEY
| N_VFP
);
11367 do_vfp_nsyn_opcode ("fdivs");
11369 do_vfp_nsyn_opcode ("fdivd");
11373 do_vfp_nsyn_nmul (void)
11375 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11376 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11377 N_F32
| N_F64
| N_KEY
| N_VFP
);
11381 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11382 do_vfp_sp_dyadic ();
11386 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11387 do_vfp_dp_rd_rn_rm ();
11389 do_vfp_cond_or_thumb ();
11393 do_vfp_nsyn_cmp (void)
11395 if (inst
.operands
[1].isreg
)
11397 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11398 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11402 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11403 do_vfp_sp_monadic ();
11407 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11408 do_vfp_dp_rd_rm ();
11413 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
11414 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
11416 switch (inst
.instruction
& 0x0fffffff)
11419 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
11422 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
11430 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11431 do_vfp_sp_compare_z ();
11435 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11439 do_vfp_cond_or_thumb ();
11443 nsyn_insert_sp (void)
11445 inst
.operands
[1] = inst
.operands
[0];
11446 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
11447 inst
.operands
[0].reg
= 13;
11448 inst
.operands
[0].isreg
= 1;
11449 inst
.operands
[0].writeback
= 1;
11450 inst
.operands
[0].present
= 1;
11454 do_vfp_nsyn_push (void)
11457 if (inst
.operands
[1].issingle
)
11458 do_vfp_nsyn_opcode ("fstmdbs");
11460 do_vfp_nsyn_opcode ("fstmdbd");
11464 do_vfp_nsyn_pop (void)
11467 if (inst
.operands
[1].issingle
)
11468 do_vfp_nsyn_opcode ("fldmias");
11470 do_vfp_nsyn_opcode ("fldmiad");
11473 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11474 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11477 neon_dp_fixup (unsigned i
)
11481 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11495 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11499 neon_logbits (unsigned x
)
11501 return ffs (x
) - 4;
11504 #define LOW4(R) ((R) & 0xf)
11505 #define HI1(R) (((R) >> 4) & 1)
11507 /* Encode insns with bit pattern:
11509 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11510 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11512 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11513 different meaning for some instruction. */
11516 neon_three_same (int isquad
, int ubit
, int size
)
11518 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11519 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11520 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11521 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11522 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11523 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11524 inst
.instruction
|= (isquad
!= 0) << 6;
11525 inst
.instruction
|= (ubit
!= 0) << 24;
11527 inst
.instruction
|= neon_logbits (size
) << 20;
11529 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11532 /* Encode instructions of the form:
11534 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11535 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11537 Don't write size if SIZE == -1. */
11540 neon_two_same (int qbit
, int ubit
, int size
)
11542 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11543 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11544 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11545 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11546 inst
.instruction
|= (qbit
!= 0) << 6;
11547 inst
.instruction
|= (ubit
!= 0) << 24;
11550 inst
.instruction
|= neon_logbits (size
) << 18;
11552 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11555 /* Neon instruction encoders, in approximate order of appearance. */
11558 do_neon_dyadic_i_su (void)
11560 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11561 struct neon_type_el et
= neon_check_type (3, rs
,
11562 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
11563 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11567 do_neon_dyadic_i64_su (void)
11569 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11570 struct neon_type_el et
= neon_check_type (3, rs
,
11571 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11572 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11576 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
11579 unsigned size
= et
.size
>> 3;
11580 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11584 inst
.instruction
|= (isquad
!= 0) << 6;
11585 inst
.instruction
|= immbits
<< 16;
11586 inst
.instruction
|= (size
>> 3) << 7;
11587 inst
.instruction
|= (size
& 0x7) << 19;
11589 inst
.instruction
|= (uval
!= 0) << 24;
11591 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11595 do_neon_shl_imm (void)
11597 if (!inst
.operands
[2].isreg
)
11599 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11600 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
11601 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11602 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
11606 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11607 struct neon_type_el et
= neon_check_type (3, rs
,
11608 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11611 /* VSHL/VQSHL 3-register variants have syntax such as:
11613 whereas other 3-register operations encoded by neon_three_same have
11616 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11618 tmp
= inst
.operands
[2].reg
;
11619 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11620 inst
.operands
[1].reg
= tmp
;
11621 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11622 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11627 do_neon_qshl_imm (void)
11629 if (!inst
.operands
[2].isreg
)
11631 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11632 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11634 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11635 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
11636 inst
.operands
[2].imm
);
11640 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11641 struct neon_type_el et
= neon_check_type (3, rs
,
11642 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11645 /* See note in do_neon_shl_imm. */
11646 tmp
= inst
.operands
[2].reg
;
11647 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11648 inst
.operands
[1].reg
= tmp
;
11649 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11650 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11655 do_neon_rshl (void)
11657 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11658 struct neon_type_el et
= neon_check_type (3, rs
,
11659 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11662 tmp
= inst
.operands
[2].reg
;
11663 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11664 inst
.operands
[1].reg
= tmp
;
11665 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11669 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
11671 /* Handle .I8 pseudo-instructions. */
11674 /* Unfortunately, this will make everything apart from zero out-of-range.
11675 FIXME is this the intended semantics? There doesn't seem much point in
11676 accepting .I8 if so. */
11677 immediate
|= immediate
<< 8;
11683 if (immediate
== (immediate
& 0x000000ff))
11685 *immbits
= immediate
;
11688 else if (immediate
== (immediate
& 0x0000ff00))
11690 *immbits
= immediate
>> 8;
11693 else if (immediate
== (immediate
& 0x00ff0000))
11695 *immbits
= immediate
>> 16;
11698 else if (immediate
== (immediate
& 0xff000000))
11700 *immbits
= immediate
>> 24;
11703 if ((immediate
& 0xffff) != (immediate
>> 16))
11704 goto bad_immediate
;
11705 immediate
&= 0xffff;
11708 if (immediate
== (immediate
& 0x000000ff))
11710 *immbits
= immediate
;
11713 else if (immediate
== (immediate
& 0x0000ff00))
11715 *immbits
= immediate
>> 8;
11720 first_error (_("immediate value out of range"));
11724 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11728 neon_bits_same_in_bytes (unsigned imm
)
11730 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
11731 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
11732 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
11733 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
11736 /* For immediate of above form, return 0bABCD. */
11739 neon_squash_bits (unsigned imm
)
11741 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
11742 | ((imm
& 0x01000000) >> 21);
11745 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11748 neon_qfloat_bits (unsigned imm
)
11750 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
11753 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11754 the instruction. *OP is passed as the initial value of the op field, and
11755 may be set to a different value depending on the constant (i.e.
11756 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11757 MVN). If the immediate looks like a repeated parttern then also
11758 try smaller element sizes. */
11761 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
11762 unsigned *immbits
, int *op
, int size
,
11763 enum neon_el_type type
)
11765 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11767 if (type
== NT_float
&& !float_p
)
11770 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
11772 if (size
!= 32 || *op
== 1)
11774 *immbits
= neon_qfloat_bits (immlo
);
11780 if (neon_bits_same_in_bytes (immhi
)
11781 && neon_bits_same_in_bytes (immlo
))
11785 *immbits
= (neon_squash_bits (immhi
) << 4)
11786 | neon_squash_bits (immlo
);
11791 if (immhi
!= immlo
)
11797 if (immlo
== (immlo
& 0x000000ff))
11802 else if (immlo
== (immlo
& 0x0000ff00))
11804 *immbits
= immlo
>> 8;
11807 else if (immlo
== (immlo
& 0x00ff0000))
11809 *immbits
= immlo
>> 16;
11812 else if (immlo
== (immlo
& 0xff000000))
11814 *immbits
= immlo
>> 24;
11817 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
11819 *immbits
= (immlo
>> 8) & 0xff;
11822 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
11824 *immbits
= (immlo
>> 16) & 0xff;
11828 if ((immlo
& 0xffff) != (immlo
>> 16))
11835 if (immlo
== (immlo
& 0x000000ff))
11840 else if (immlo
== (immlo
& 0x0000ff00))
11842 *immbits
= immlo
>> 8;
11846 if ((immlo
& 0xff) != (immlo
>> 8))
11851 if (immlo
== (immlo
& 0x000000ff))
11853 /* Don't allow MVN with 8-bit immediate. */
11863 /* Write immediate bits [7:0] to the following locations:
11865 |28/24|23 19|18 16|15 4|3 0|
11866 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11868 This function is used by VMOV/VMVN/VORR/VBIC. */
11871 neon_write_immbits (unsigned immbits
)
11873 inst
.instruction
|= immbits
& 0xf;
11874 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
11875 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
11878 /* Invert low-order SIZE bits of XHI:XLO. */
11881 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
11883 unsigned immlo
= xlo
? *xlo
: 0;
11884 unsigned immhi
= xhi
? *xhi
: 0;
11889 immlo
= (~immlo
) & 0xff;
11893 immlo
= (~immlo
) & 0xffff;
11897 immhi
= (~immhi
) & 0xffffffff;
11898 /* fall through. */
11901 immlo
= (~immlo
) & 0xffffffff;
11916 do_neon_logic (void)
11918 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
11920 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11921 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11922 /* U bit and size field were set as part of the bitmask. */
11923 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11924 neon_three_same (neon_quad (rs
), 0, -1);
11928 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11929 struct neon_type_el et
= neon_check_type (2, rs
,
11930 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11931 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11935 if (et
.type
== NT_invtype
)
11938 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11940 immbits
= inst
.operands
[1].imm
;
11943 /* .i64 is a pseudo-op, so the immediate must be a repeating
11945 if (immbits
!= (inst
.operands
[1].regisimm
?
11946 inst
.operands
[1].reg
: 0))
11948 /* Set immbits to an invalid constant. */
11949 immbits
= 0xdeadbeef;
11956 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11960 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11964 /* Pseudo-instruction for VBIC. */
11965 neon_invert_size (&immbits
, 0, et
.size
);
11966 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11970 /* Pseudo-instruction for VORR. */
11971 neon_invert_size (&immbits
, 0, et
.size
);
11972 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11982 inst
.instruction
|= neon_quad (rs
) << 6;
11983 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11984 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11985 inst
.instruction
|= cmode
<< 8;
11986 neon_write_immbits (immbits
);
11988 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11993 do_neon_bitfield (void)
11995 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11996 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11997 neon_three_same (neon_quad (rs
), 0, -1);
12001 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
12004 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12005 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
12007 if (et
.type
== NT_float
)
12009 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
12010 neon_three_same (neon_quad (rs
), 0, -1);
12014 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12015 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
12020 do_neon_dyadic_if_su (void)
12022 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12026 do_neon_dyadic_if_su_d (void)
12028 /* This version only allow D registers, but that constraint is enforced during
12029 operand parsing so we don't need to do anything extra here. */
12030 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12034 do_neon_dyadic_if_i_d (void)
12036 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12037 affected if we specify unsigned args. */
12038 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12041 enum vfp_or_neon_is_neon_bits
12044 NEON_CHECK_ARCH
= 2
12047 /* Call this function if an instruction which may have belonged to the VFP or
12048 Neon instruction sets, but turned out to be a Neon instruction (due to the
12049 operand types involved, etc.). We have to check and/or fix-up a couple of
12052 - Make sure the user hasn't attempted to make a Neon instruction
12054 - Alter the value in the condition code field if necessary.
12055 - Make sure that the arch supports Neon instructions.
12057 Which of these operations take place depends on bits from enum
12058 vfp_or_neon_is_neon_bits.
12060 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12061 current instruction's condition is COND_ALWAYS, the condition field is
12062 changed to inst.uncond_value. This is necessary because instructions shared
12063 between VFP and Neon may be conditional for the VFP variants only, and the
12064 unconditional Neon version must have, e.g., 0xF in the condition field. */
12067 vfp_or_neon_is_neon (unsigned check
)
12069 /* Conditions are always legal in Thumb mode (IT blocks). */
12070 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
12072 if (inst
.cond
!= COND_ALWAYS
)
12074 first_error (_(BAD_COND
));
12077 if (inst
.uncond_value
!= -1)
12078 inst
.instruction
|= inst
.uncond_value
<< 28;
12081 if ((check
& NEON_CHECK_ARCH
)
12082 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
12084 first_error (_(BAD_FPU
));
12092 do_neon_addsub_if_i (void)
12094 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
12097 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12100 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12101 affected if we specify unsigned args. */
12102 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
12105 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12107 V<op> A,B (A is operand 0, B is operand 2)
12112 so handle that case specially. */
12115 neon_exchange_operands (void)
12117 void *scratch
= alloca (sizeof (inst
.operands
[0]));
12118 if (inst
.operands
[1].present
)
12120 /* Swap operands[1] and operands[2]. */
12121 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
12122 inst
.operands
[1] = inst
.operands
[2];
12123 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
12127 inst
.operands
[1] = inst
.operands
[2];
12128 inst
.operands
[2] = inst
.operands
[0];
12133 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
12135 if (inst
.operands
[2].isreg
)
12138 neon_exchange_operands ();
12139 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
12143 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12144 struct neon_type_el et
= neon_check_type (2, rs
,
12145 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
12147 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12148 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12149 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12150 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12151 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12152 inst
.instruction
|= neon_quad (rs
) << 6;
12153 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12154 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12156 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12163 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
12167 do_neon_cmp_inv (void)
12169 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
12175 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
12178 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12179 scalars, which are encoded in 5 bits, M : Rm.
12180 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12181 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12185 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
12187 unsigned regno
= NEON_SCALAR_REG (scalar
);
12188 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
12193 if (regno
> 7 || elno
> 3)
12195 return regno
| (elno
<< 3);
12198 if (regno
> 15 || elno
> 1)
12200 return regno
| (elno
<< 4);
12204 first_error (_("scalar out of range for multiply instruction"));
12210 /* Encode multiply / multiply-accumulate scalar instructions. */
12213 neon_mul_mac (struct neon_type_el et
, int ubit
)
12217 /* Give a more helpful error message if we have an invalid type. */
12218 if (et
.type
== NT_invtype
)
12221 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
12222 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12223 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12224 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12225 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12226 inst
.instruction
|= LOW4 (scalar
);
12227 inst
.instruction
|= HI1 (scalar
) << 5;
12228 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12229 inst
.instruction
|= neon_logbits (et
.size
) << 20;
12230 inst
.instruction
|= (ubit
!= 0) << 24;
12232 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12236 do_neon_mac_maybe_scalar (void)
12238 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
12241 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12244 if (inst
.operands
[2].isscalar
)
12246 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12247 struct neon_type_el et
= neon_check_type (3, rs
,
12248 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
12249 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12250 neon_mul_mac (et
, neon_quad (rs
));
12254 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12255 affected if we specify unsigned args. */
12256 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12263 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12264 struct neon_type_el et
= neon_check_type (3, rs
,
12265 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12266 neon_three_same (neon_quad (rs
), 0, et
.size
);
12269 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12270 same types as the MAC equivalents. The polynomial type for this instruction
12271 is encoded the same as the integer type. */
12276 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
12279 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12282 if (inst
.operands
[2].isscalar
)
12283 do_neon_mac_maybe_scalar ();
12285 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
12289 do_neon_qdmulh (void)
12291 if (inst
.operands
[2].isscalar
)
12293 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12294 struct neon_type_el et
= neon_check_type (3, rs
,
12295 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12296 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12297 neon_mul_mac (et
, neon_quad (rs
));
12301 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12302 struct neon_type_el et
= neon_check_type (3, rs
,
12303 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12304 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12305 /* The U bit (rounding) comes from bit mask. */
12306 neon_three_same (neon_quad (rs
), 0, et
.size
);
12311 do_neon_fcmp_absolute (void)
12313 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12314 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12315 /* Size field comes from bit mask. */
12316 neon_three_same (neon_quad (rs
), 1, -1);
12320 do_neon_fcmp_absolute_inv (void)
12322 neon_exchange_operands ();
12323 do_neon_fcmp_absolute ();
12327 do_neon_step (void)
12329 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12330 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12331 neon_three_same (neon_quad (rs
), 0, -1);
12335 do_neon_abs_neg (void)
12337 enum neon_shape rs
;
12338 struct neon_type_el et
;
12340 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
12343 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12346 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12347 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
12349 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12350 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12351 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12352 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12353 inst
.instruction
|= neon_quad (rs
) << 6;
12354 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12355 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12357 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12363 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12364 struct neon_type_el et
= neon_check_type (2, rs
,
12365 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12366 int imm
= inst
.operands
[2].imm
;
12367 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12368 _("immediate out of range for insert"));
12369 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12375 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12376 struct neon_type_el et
= neon_check_type (2, rs
,
12377 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12378 int imm
= inst
.operands
[2].imm
;
12379 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12380 _("immediate out of range for insert"));
12381 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
12385 do_neon_qshlu_imm (void)
12387 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12388 struct neon_type_el et
= neon_check_type (2, rs
,
12389 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
12390 int imm
= inst
.operands
[2].imm
;
12391 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12392 _("immediate out of range for shift"));
12393 /* Only encodes the 'U present' variant of the instruction.
12394 In this case, signed types have OP (bit 8) set to 0.
12395 Unsigned types have OP set to 1. */
12396 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
12397 /* The rest of the bits are the same as other immediate shifts. */
12398 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12402 do_neon_qmovn (void)
12404 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12405 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12406 /* Saturating move where operands can be signed or unsigned, and the
12407 destination has the same signedness. */
12408 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12409 if (et
.type
== NT_unsigned
)
12410 inst
.instruction
|= 0xc0;
12412 inst
.instruction
|= 0x80;
12413 neon_two_same (0, 1, et
.size
/ 2);
12417 do_neon_qmovun (void)
12419 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12420 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12421 /* Saturating move with unsigned results. Operands must be signed. */
12422 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12423 neon_two_same (0, 1, et
.size
/ 2);
12427 do_neon_rshift_sat_narrow (void)
12429 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12430 or unsigned. If operands are unsigned, results must also be unsigned. */
12431 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12432 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12433 int imm
= inst
.operands
[2].imm
;
12434 /* This gets the bounds check, size encoding and immediate bits calculation
12438 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12439 VQMOVN.I<size> <Dd>, <Qm>. */
12442 inst
.operands
[2].present
= 0;
12443 inst
.instruction
= N_MNEM_vqmovn
;
12448 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12449 _("immediate out of range"));
12450 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
12454 do_neon_rshift_sat_narrow_u (void)
12456 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12457 or unsigned. If operands are unsigned, results must also be unsigned. */
12458 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12459 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12460 int imm
= inst
.operands
[2].imm
;
12461 /* This gets the bounds check, size encoding and immediate bits calculation
12465 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12466 VQMOVUN.I<size> <Dd>, <Qm>. */
12469 inst
.operands
[2].present
= 0;
12470 inst
.instruction
= N_MNEM_vqmovun
;
12475 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12476 _("immediate out of range"));
12477 /* FIXME: The manual is kind of unclear about what value U should have in
12478 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12480 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
12484 do_neon_movn (void)
12486 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12487 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12488 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12489 neon_two_same (0, 1, et
.size
/ 2);
12493 do_neon_rshift_narrow (void)
12495 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12496 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12497 int imm
= inst
.operands
[2].imm
;
12498 /* This gets the bounds check, size encoding and immediate bits calculation
12502 /* If immediate is zero then we are a pseudo-instruction for
12503 VMOVN.I<size> <Dd>, <Qm> */
12506 inst
.operands
[2].present
= 0;
12507 inst
.instruction
= N_MNEM_vmovn
;
12512 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12513 _("immediate out of range for narrowing operation"));
12514 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
12518 do_neon_shll (void)
12520 /* FIXME: Type checking when lengthening. */
12521 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
12522 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
12523 unsigned imm
= inst
.operands
[2].imm
;
12525 if (imm
== et
.size
)
12527 /* Maximum shift variant. */
12528 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12529 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12530 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12531 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12532 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12533 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12535 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12539 /* A more-specific type check for non-max versions. */
12540 et
= neon_check_type (2, NS_QDI
,
12541 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12542 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12543 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
12547 /* Check the various types for the VCVT instruction, and return which version
12548 the current instruction is. */
12551 neon_cvt_flavour (enum neon_shape rs
)
12553 #define CVT_VAR(C,X,Y) \
12554 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12555 if (et.type != NT_invtype) \
12557 inst.error = NULL; \
12560 struct neon_type_el et
;
12561 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
12562 || rs
== NS_FF
) ? N_VFP
: 0;
12563 /* The instruction versions which take an immediate take one register
12564 argument, which is extended to the width of the full register. Thus the
12565 "source" and "destination" registers must have the same width. Hack that
12566 here by making the size equal to the key (wider, in this case) operand. */
12567 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
12569 CVT_VAR (0, N_S32
, N_F32
);
12570 CVT_VAR (1, N_U32
, N_F32
);
12571 CVT_VAR (2, N_F32
, N_S32
);
12572 CVT_VAR (3, N_F32
, N_U32
);
12576 /* VFP instructions. */
12577 CVT_VAR (4, N_F32
, N_F64
);
12578 CVT_VAR (5, N_F64
, N_F32
);
12579 CVT_VAR (6, N_S32
, N_F64
| key
);
12580 CVT_VAR (7, N_U32
, N_F64
| key
);
12581 CVT_VAR (8, N_F64
| key
, N_S32
);
12582 CVT_VAR (9, N_F64
| key
, N_U32
);
12583 /* VFP instructions with bitshift. */
12584 CVT_VAR (10, N_F32
| key
, N_S16
);
12585 CVT_VAR (11, N_F32
| key
, N_U16
);
12586 CVT_VAR (12, N_F64
| key
, N_S16
);
12587 CVT_VAR (13, N_F64
| key
, N_U16
);
12588 CVT_VAR (14, N_S16
, N_F32
| key
);
12589 CVT_VAR (15, N_U16
, N_F32
| key
);
12590 CVT_VAR (16, N_S16
, N_F64
| key
);
12591 CVT_VAR (17, N_U16
, N_F64
| key
);
12597 /* Neon-syntax VFP conversions. */
12600 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
12602 const char *opname
= 0;
12604 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
12606 /* Conversions with immediate bitshift. */
12607 const char *enc
[] =
12629 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12631 opname
= enc
[flavour
];
12632 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12633 _("operands 0 and 1 must be the same register"));
12634 inst
.operands
[1] = inst
.operands
[2];
12635 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
12640 /* Conversions without bitshift. */
12641 const char *enc
[] =
12655 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12656 opname
= enc
[flavour
];
12660 do_vfp_nsyn_opcode (opname
);
12664 do_vfp_nsyn_cvtz (void)
12666 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
12667 int flavour
= neon_cvt_flavour (rs
);
12668 const char *enc
[] =
12680 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
12681 do_vfp_nsyn_opcode (enc
[flavour
]);
12687 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
12688 NS_FD
, NS_DF
, NS_FF
, NS_NULL
);
12689 int flavour
= neon_cvt_flavour (rs
);
12691 /* VFP rather than Neon conversions. */
12694 do_vfp_nsyn_cvt (rs
, flavour
);
12703 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12706 /* Fixed-point conversion with #0 immediate is encoded as an
12707 integer conversion. */
12708 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
12710 unsigned immbits
= 32 - inst
.operands
[2].imm
;
12711 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12712 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12714 inst
.instruction
|= enctab
[flavour
];
12715 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12716 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12717 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12718 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12719 inst
.instruction
|= neon_quad (rs
) << 6;
12720 inst
.instruction
|= 1 << 21;
12721 inst
.instruction
|= immbits
<< 16;
12723 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12731 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
12733 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12735 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12739 inst
.instruction
|= enctab
[flavour
];
12741 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12742 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12743 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12744 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12745 inst
.instruction
|= neon_quad (rs
) << 6;
12746 inst
.instruction
|= 2 << 18;
12748 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12753 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12754 do_vfp_nsyn_cvt (rs
, flavour
);
12759 neon_move_immediate (void)
12761 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12762 struct neon_type_el et
= neon_check_type (2, rs
,
12763 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12764 unsigned immlo
, immhi
= 0, immbits
;
12765 int op
, cmode
, float_p
;
12767 constraint (et
.type
== NT_invtype
,
12768 _("operand size must be specified for immediate VMOV"));
12770 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12771 op
= (inst
.instruction
& (1 << 5)) != 0;
12773 immlo
= inst
.operands
[1].imm
;
12774 if (inst
.operands
[1].regisimm
)
12775 immhi
= inst
.operands
[1].reg
;
12777 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
12778 _("immediate has bits set outside the operand size"));
12780 float_p
= inst
.operands
[1].immisfloat
;
12782 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
12783 et
.size
, et
.type
)) == FAIL
)
12785 /* Invert relevant bits only. */
12786 neon_invert_size (&immlo
, &immhi
, et
.size
);
12787 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12788 with one or the other; those cases are caught by
12789 neon_cmode_for_move_imm. */
12791 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
12792 &op
, et
.size
, et
.type
)) == FAIL
)
12794 first_error (_("immediate out of range"));
12799 inst
.instruction
&= ~(1 << 5);
12800 inst
.instruction
|= op
<< 5;
12802 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12803 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12804 inst
.instruction
|= neon_quad (rs
) << 6;
12805 inst
.instruction
|= cmode
<< 8;
12807 neon_write_immbits (immbits
);
12813 if (inst
.operands
[1].isreg
)
12815 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12817 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12818 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12819 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12820 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12821 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12822 inst
.instruction
|= neon_quad (rs
) << 6;
12826 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12827 neon_move_immediate ();
12830 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12833 /* Encode instructions of form:
12835 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12836 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12841 neon_mixed_length (struct neon_type_el et
, unsigned size
)
12843 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12844 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12845 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12846 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12847 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12848 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12849 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
12850 inst
.instruction
|= neon_logbits (size
) << 20;
12852 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12856 do_neon_dyadic_long (void)
12858 /* FIXME: Type checking for lengthening op. */
12859 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12860 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12861 neon_mixed_length (et
, et
.size
);
12865 do_neon_abal (void)
12867 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12868 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12869 neon_mixed_length (et
, et
.size
);
12873 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12875 if (inst
.operands
[2].isscalar
)
12877 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12878 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12879 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12880 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12884 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12885 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12886 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12887 neon_mixed_length (et
, et
.size
);
12892 do_neon_mac_maybe_scalar_long (void)
12894 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12898 do_neon_dyadic_wide (void)
12900 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12901 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12902 neon_mixed_length (et
, et
.size
);
12906 do_neon_dyadic_narrow (void)
12908 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12909 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12910 /* Operand sign is unimportant, and the U bit is part of the opcode,
12911 so force the operand type to integer. */
12912 et
.type
= NT_integer
;
12913 neon_mixed_length (et
, et
.size
/ 2);
12917 do_neon_mul_sat_scalar_long (void)
12919 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12923 do_neon_vmull (void)
12925 if (inst
.operands
[2].isscalar
)
12926 do_neon_mac_maybe_scalar_long ();
12929 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12930 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12931 if (et
.type
== NT_poly
)
12932 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12934 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12935 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12936 zero. Should be OK as-is. */
12937 neon_mixed_length (et
, et
.size
);
12944 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12945 struct neon_type_el et
= neon_check_type (3, rs
,
12946 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12947 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12948 constraint (imm
>= (neon_quad (rs
) ? 16 : 8), _("shift out of range"));
12949 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12950 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12951 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12952 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12953 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12954 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12955 inst
.instruction
|= neon_quad (rs
) << 6;
12956 inst
.instruction
|= imm
<< 8;
12958 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12964 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12965 struct neon_type_el et
= neon_check_type (2, rs
,
12966 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12967 unsigned op
= (inst
.instruction
>> 7) & 3;
12968 /* N (width of reversed regions) is encoded as part of the bitmask. We
12969 extract it here to check the elements to be reversed are smaller.
12970 Otherwise we'd get a reserved instruction. */
12971 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
12972 assert (elsize
!= 0);
12973 constraint (et
.size
>= elsize
,
12974 _("elements must be smaller than reversal region"));
12975 neon_two_same (neon_quad (rs
), 1, et
.size
);
12981 if (inst
.operands
[1].isscalar
)
12983 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
12984 struct neon_type_el et
= neon_check_type (2, rs
,
12985 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12986 unsigned sizebits
= et
.size
>> 3;
12987 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12988 int logsize
= neon_logbits (et
.size
);
12989 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
12991 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
12994 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12995 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12996 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12997 inst
.instruction
|= LOW4 (dm
);
12998 inst
.instruction
|= HI1 (dm
) << 5;
12999 inst
.instruction
|= neon_quad (rs
) << 6;
13000 inst
.instruction
|= x
<< 17;
13001 inst
.instruction
|= sizebits
<< 16;
13003 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13007 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
13008 struct neon_type_el et
= neon_check_type (2, rs
,
13009 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13010 /* Duplicate ARM register to lanes of vector. */
13011 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
13014 case 8: inst
.instruction
|= 0x400000; break;
13015 case 16: inst
.instruction
|= 0x000020; break;
13016 case 32: inst
.instruction
|= 0x000000; break;
13019 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13020 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
13021 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
13022 inst
.instruction
|= neon_quad (rs
) << 21;
13023 /* The encoding for this instruction is identical for the ARM and Thumb
13024 variants, except for the condition field. */
13025 do_vfp_cond_or_thumb ();
13029 /* VMOV has particularly many variations. It can be one of:
13030 0. VMOV<c><q> <Qd>, <Qm>
13031 1. VMOV<c><q> <Dd>, <Dm>
13032 (Register operations, which are VORR with Rm = Rn.)
13033 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13034 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13036 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13037 (ARM register to scalar.)
13038 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13039 (Two ARM registers to vector.)
13040 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13041 (Scalar to ARM register.)
13042 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13043 (Vector to two ARM registers.)
13044 8. VMOV.F32 <Sd>, <Sm>
13045 9. VMOV.F64 <Dd>, <Dm>
13046 (VFP register moves.)
13047 10. VMOV.F32 <Sd>, #imm
13048 11. VMOV.F64 <Dd>, #imm
13049 (VFP float immediate load.)
13050 12. VMOV <Rd>, <Sm>
13051 (VFP single to ARM reg.)
13052 13. VMOV <Sd>, <Rm>
13053 (ARM reg to VFP single.)
13054 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13055 (Two ARM regs to two VFP singles.)
13056 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13057 (Two VFP singles to two ARM regs.)
13059 These cases can be disambiguated using neon_select_shape, except cases 1/9
13060 and 3/11 which depend on the operand type too.
13062 All the encoded bits are hardcoded by this function.
13064 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13065 Cases 5, 7 may be used with VFPv2 and above.
13067 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13068 can specify a type where it doesn't make sense to, and is ignored).
13074 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
13075 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
13077 struct neon_type_el et
;
13078 const char *ldconst
= 0;
13082 case NS_DD
: /* case 1/9. */
13083 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13084 /* It is not an error here if no type is given. */
13086 if (et
.type
== NT_float
&& et
.size
== 64)
13088 do_vfp_nsyn_opcode ("fcpyd");
13091 /* fall through. */
13093 case NS_QQ
: /* case 0/1. */
13095 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13097 /* The architecture manual I have doesn't explicitly state which
13098 value the U bit should have for register->register moves, but
13099 the equivalent VORR instruction has U = 0, so do that. */
13100 inst
.instruction
= 0x0200110;
13101 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13102 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13103 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13104 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13105 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13106 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13107 inst
.instruction
|= neon_quad (rs
) << 6;
13109 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13113 case NS_DI
: /* case 3/11. */
13114 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13116 if (et
.type
== NT_float
&& et
.size
== 64)
13118 /* case 11 (fconstd). */
13119 ldconst
= "fconstd";
13120 goto encode_fconstd
;
13122 /* fall through. */
13124 case NS_QI
: /* case 2/3. */
13125 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13127 inst
.instruction
= 0x0800010;
13128 neon_move_immediate ();
13129 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13132 case NS_SR
: /* case 4. */
13134 unsigned bcdebits
= 0;
13135 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13136 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13137 int logsize
= neon_logbits (et
.size
);
13138 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
13139 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
13141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13143 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13144 && et
.size
!= 32, _(BAD_FPU
));
13145 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13146 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13150 case 8: bcdebits
= 0x8; break;
13151 case 16: bcdebits
= 0x1; break;
13152 case 32: bcdebits
= 0x0; break;
13156 bcdebits
|= x
<< logsize
;
13158 inst
.instruction
= 0xe000b10;
13159 do_vfp_cond_or_thumb ();
13160 inst
.instruction
|= LOW4 (dn
) << 16;
13161 inst
.instruction
|= HI1 (dn
) << 7;
13162 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13163 inst
.instruction
|= (bcdebits
& 3) << 5;
13164 inst
.instruction
|= (bcdebits
>> 2) << 21;
13168 case NS_DRR
: /* case 5 (fmdrr). */
13169 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13172 inst
.instruction
= 0xc400b10;
13173 do_vfp_cond_or_thumb ();
13174 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
13175 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
13176 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13177 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13180 case NS_RS
: /* case 6. */
13182 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13183 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
13184 unsigned logsize
= neon_logbits (et
.size
);
13185 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13186 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
13187 unsigned abcdebits
= 0;
13189 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13191 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13192 && et
.size
!= 32, _(BAD_FPU
));
13193 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13194 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13198 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
13199 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
13200 case 32: abcdebits
= 0x00; break;
13204 abcdebits
|= x
<< logsize
;
13205 inst
.instruction
= 0xe100b10;
13206 do_vfp_cond_or_thumb ();
13207 inst
.instruction
|= LOW4 (dn
) << 16;
13208 inst
.instruction
|= HI1 (dn
) << 7;
13209 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13210 inst
.instruction
|= (abcdebits
& 3) << 5;
13211 inst
.instruction
|= (abcdebits
>> 2) << 21;
13215 case NS_RRD
: /* case 7 (fmrrd). */
13216 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13219 inst
.instruction
= 0xc500b10;
13220 do_vfp_cond_or_thumb ();
13221 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13222 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13223 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13224 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13227 case NS_FF
: /* case 8 (fcpys). */
13228 do_vfp_nsyn_opcode ("fcpys");
13231 case NS_FI
: /* case 10 (fconsts). */
13232 ldconst
= "fconsts";
13234 if (is_quarter_float (inst
.operands
[1].imm
))
13236 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
13237 do_vfp_nsyn_opcode (ldconst
);
13240 first_error (_("immediate out of range"));
13243 case NS_RF
: /* case 12 (fmrs). */
13244 do_vfp_nsyn_opcode ("fmrs");
13247 case NS_FR
: /* case 13 (fmsr). */
13248 do_vfp_nsyn_opcode ("fmsr");
13251 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13252 (one of which is a list), but we have parsed four. Do some fiddling to
13253 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13255 case NS_RRFF
: /* case 14 (fmrrs). */
13256 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
13257 _("VFP registers must be adjacent"));
13258 inst
.operands
[2].imm
= 2;
13259 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13260 do_vfp_nsyn_opcode ("fmrrs");
13263 case NS_FFRR
: /* case 15 (fmsrr). */
13264 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
13265 _("VFP registers must be adjacent"));
13266 inst
.operands
[1] = inst
.operands
[2];
13267 inst
.operands
[2] = inst
.operands
[3];
13268 inst
.operands
[0].imm
= 2;
13269 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13270 do_vfp_nsyn_opcode ("fmsrr");
13279 do_neon_rshift_round_imm (void)
13281 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13282 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
13283 int imm
= inst
.operands
[2].imm
;
13285 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13288 inst
.operands
[2].present
= 0;
13293 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13294 _("immediate out of range for shift"));
13295 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
13300 do_neon_movl (void)
13302 struct neon_type_el et
= neon_check_type (2, NS_QD
,
13303 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13304 unsigned sizebits
= et
.size
>> 3;
13305 inst
.instruction
|= sizebits
<< 19;
13306 neon_two_same (0, et
.type
== NT_unsigned
, -1);
13312 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13313 struct neon_type_el et
= neon_check_type (2, rs
,
13314 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13315 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13316 neon_two_same (neon_quad (rs
), 1, et
.size
);
13320 do_neon_zip_uzp (void)
13322 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13323 struct neon_type_el et
= neon_check_type (2, rs
,
13324 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13325 if (rs
== NS_DD
&& et
.size
== 32)
13327 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13328 inst
.instruction
= N_MNEM_vtrn
;
13332 neon_two_same (neon_quad (rs
), 1, et
.size
);
13336 do_neon_sat_abs_neg (void)
13338 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13339 struct neon_type_el et
= neon_check_type (2, rs
,
13340 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13341 neon_two_same (neon_quad (rs
), 1, et
.size
);
13345 do_neon_pair_long (void)
13347 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13348 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
13349 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13350 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
13351 neon_two_same (neon_quad (rs
), 1, et
.size
);
13355 do_neon_recip_est (void)
13357 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13358 struct neon_type_el et
= neon_check_type (2, rs
,
13359 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
13360 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13361 neon_two_same (neon_quad (rs
), 1, et
.size
);
13367 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13368 struct neon_type_el et
= neon_check_type (2, rs
,
13369 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13370 neon_two_same (neon_quad (rs
), 1, et
.size
);
13376 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13377 struct neon_type_el et
= neon_check_type (2, rs
,
13378 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
13379 neon_two_same (neon_quad (rs
), 1, et
.size
);
13385 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13386 struct neon_type_el et
= neon_check_type (2, rs
,
13387 N_EQK
| N_INT
, N_8
| N_KEY
);
13388 neon_two_same (neon_quad (rs
), 1, et
.size
);
13394 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13395 neon_two_same (neon_quad (rs
), 1, -1);
13399 do_neon_tbl_tbx (void)
13401 unsigned listlenbits
;
13402 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
13404 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
13406 first_error (_("bad list length for table lookup"));
13410 listlenbits
= inst
.operands
[1].imm
- 1;
13411 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13412 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13413 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13414 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13415 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13416 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13417 inst
.instruction
|= listlenbits
<< 8;
13419 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13423 do_neon_ldm_stm (void)
13425 /* P, U and L bits are part of bitmask. */
13426 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
13427 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
13429 if (inst
.operands
[1].issingle
)
13431 do_vfp_nsyn_ldm_stm (is_dbmode
);
13435 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
13436 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13438 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
13439 _("register list must contain at least 1 and at most 16 "
13442 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
13443 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
13444 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13445 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
13447 inst
.instruction
|= offsetbits
;
13449 do_vfp_cond_or_thumb ();
13453 do_neon_ldr_str (void)
13455 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
13457 if (inst
.operands
[0].issingle
)
13460 do_vfp_nsyn_opcode ("flds");
13462 do_vfp_nsyn_opcode ("fsts");
13467 do_vfp_nsyn_opcode ("fldd");
13469 do_vfp_nsyn_opcode ("fstd");
13473 /* "interleave" version also handles non-interleaving register VLD1/VST1
13477 do_neon_ld_st_interleave (void)
13479 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
13480 N_8
| N_16
| N_32
| N_64
);
13481 unsigned alignbits
= 0;
13483 /* The bits in this table go:
13484 0: register stride of one (0) or two (1)
13485 1,2: register list length, minus one (1, 2, 3, 4).
13486 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13487 We use -1 for invalid entries. */
13488 const int typetable
[] =
13490 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13491 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13492 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13493 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13497 if (et
.type
== NT_invtype
)
13500 if (inst
.operands
[1].immisalign
)
13501 switch (inst
.operands
[1].imm
>> 8)
13503 case 64: alignbits
= 1; break;
13505 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13506 goto bad_alignment
;
13510 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13511 goto bad_alignment
;
13516 first_error (_("bad alignment"));
13520 inst
.instruction
|= alignbits
<< 4;
13521 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13523 /* Bits [4:6] of the immediate in a list specifier encode register stride
13524 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13525 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13526 up the right value for "type" in a table based on this value and the given
13527 list style, then stick it back. */
13528 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
13529 | (((inst
.instruction
>> 8) & 3) << 3);
13531 typebits
= typetable
[idx
];
13533 constraint (typebits
== -1, _("bad list type for instruction"));
13535 inst
.instruction
&= ~0xf00;
13536 inst
.instruction
|= typebits
<< 8;
13539 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13540 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13541 otherwise. The variable arguments are a list of pairs of legal (size, align)
13542 values, terminated with -1. */
13545 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
13548 int result
= FAIL
, thissize
, thisalign
;
13550 if (!inst
.operands
[1].immisalign
)
13556 va_start (ap
, do_align
);
13560 thissize
= va_arg (ap
, int);
13561 if (thissize
== -1)
13563 thisalign
= va_arg (ap
, int);
13565 if (size
== thissize
&& align
== thisalign
)
13568 while (result
!= SUCCESS
);
13572 if (result
== SUCCESS
)
13575 first_error (_("unsupported alignment for instruction"));
13581 do_neon_ld_st_lane (void)
13583 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13584 int align_good
, do_align
= 0;
13585 int logsize
= neon_logbits (et
.size
);
13586 int align
= inst
.operands
[1].imm
>> 8;
13587 int n
= (inst
.instruction
>> 8) & 3;
13588 int max_el
= 64 / et
.size
;
13590 if (et
.type
== NT_invtype
)
13593 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
13594 _("bad list length"));
13595 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
13596 _("scalar index out of range"));
13597 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
13599 _("stride of 2 unavailable when element size is 8"));
13603 case 0: /* VLD1 / VST1. */
13604 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
13606 if (align_good
== FAIL
)
13610 unsigned alignbits
= 0;
13613 case 16: alignbits
= 0x1; break;
13614 case 32: alignbits
= 0x3; break;
13617 inst
.instruction
|= alignbits
<< 4;
13621 case 1: /* VLD2 / VST2. */
13622 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
13624 if (align_good
== FAIL
)
13627 inst
.instruction
|= 1 << 4;
13630 case 2: /* VLD3 / VST3. */
13631 constraint (inst
.operands
[1].immisalign
,
13632 _("can't use alignment with this instruction"));
13635 case 3: /* VLD4 / VST4. */
13636 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13637 16, 64, 32, 64, 32, 128, -1);
13638 if (align_good
== FAIL
)
13642 unsigned alignbits
= 0;
13645 case 8: alignbits
= 0x1; break;
13646 case 16: alignbits
= 0x1; break;
13647 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
13650 inst
.instruction
|= alignbits
<< 4;
13657 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13658 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13659 inst
.instruction
|= 1 << (4 + logsize
);
13661 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
13662 inst
.instruction
|= logsize
<< 10;
13665 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13668 do_neon_ld_dup (void)
13670 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13671 int align_good
, do_align
= 0;
13673 if (et
.type
== NT_invtype
)
13676 switch ((inst
.instruction
>> 8) & 3)
13678 case 0: /* VLD1. */
13679 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
13680 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13681 &do_align
, 16, 16, 32, 32, -1);
13682 if (align_good
== FAIL
)
13684 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
13687 case 2: inst
.instruction
|= 1 << 5; break;
13688 default: first_error (_("bad list length")); return;
13690 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13693 case 1: /* VLD2. */
13694 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13695 &do_align
, 8, 16, 16, 32, 32, 64, -1);
13696 if (align_good
== FAIL
)
13698 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
13699 _("bad list length"));
13700 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13701 inst
.instruction
|= 1 << 5;
13702 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13705 case 2: /* VLD3. */
13706 constraint (inst
.operands
[1].immisalign
,
13707 _("can't use alignment with this instruction"));
13708 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
13709 _("bad list length"));
13710 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13711 inst
.instruction
|= 1 << 5;
13712 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13715 case 3: /* VLD4. */
13717 int align
= inst
.operands
[1].imm
>> 8;
13718 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13719 16, 64, 32, 64, 32, 128, -1);
13720 if (align_good
== FAIL
)
13722 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
13723 _("bad list length"));
13724 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13725 inst
.instruction
|= 1 << 5;
13726 if (et
.size
== 32 && align
== 128)
13727 inst
.instruction
|= 0x3 << 6;
13729 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13736 inst
.instruction
|= do_align
<< 4;
13739 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13740 apart from bits [11:4]. */
13743 do_neon_ldx_stx (void)
13745 switch (NEON_LANE (inst
.operands
[0].imm
))
13747 case NEON_INTERLEAVE_LANES
:
13748 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
13749 do_neon_ld_st_interleave ();
13752 case NEON_ALL_LANES
:
13753 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
13758 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
13759 do_neon_ld_st_lane ();
13762 /* L bit comes from bit mask. */
13763 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13764 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13765 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13767 if (inst
.operands
[1].postind
)
13769 int postreg
= inst
.operands
[1].imm
& 0xf;
13770 constraint (!inst
.operands
[1].immisreg
,
13771 _("post-index must be a register"));
13772 constraint (postreg
== 0xd || postreg
== 0xf,
13773 _("bad register for post-index"));
13774 inst
.instruction
|= postreg
;
13776 else if (inst
.operands
[1].writeback
)
13778 inst
.instruction
|= 0xd;
13781 inst
.instruction
|= 0xf;
13784 inst
.instruction
|= 0xf9000000;
13786 inst
.instruction
|= 0xf4000000;
13790 /* Overall per-instruction processing. */
13792 /* We need to be able to fix up arbitrary expressions in some statements.
13793 This is so that we can handle symbols that are an arbitrary distance from
13794 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13795 which returns part of an address in a form which will be valid for
13796 a data instruction. We do this by pushing the expression into a symbol
13797 in the expr_section, and creating a fix for that. */
13800 fix_new_arm (fragS
* frag
,
13815 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
13819 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
13824 /* Mark whether the fix is to a THUMB instruction, or an ARM
13826 new_fix
->tc_fix_data
= thumb_mode
;
13829 /* Create a frg for an instruction requiring relaxation. */
13831 output_relax_insn (void)
13837 /* The size of the instruction is unknown, so tie the debug info to the
13838 start of the instruction. */
13839 dwarf2_emit_insn (0);
13841 switch (inst
.reloc
.exp
.X_op
)
13844 sym
= inst
.reloc
.exp
.X_add_symbol
;
13845 offset
= inst
.reloc
.exp
.X_add_number
;
13849 offset
= inst
.reloc
.exp
.X_add_number
;
13852 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13856 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13857 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13858 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13861 /* Write a 32-bit thumb instruction to buf. */
13863 put_thumb32_insn (char * buf
, unsigned long insn
)
13865 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13866 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13870 output_inst (const char * str
)
13876 as_bad ("%s -- `%s'", inst
.error
, str
);
13880 output_relax_insn();
13883 if (inst
.size
== 0)
13886 to
= frag_more (inst
.size
);
13888 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13890 assert (inst
.size
== (2 * THUMB_SIZE
));
13891 put_thumb32_insn (to
, inst
.instruction
);
13893 else if (inst
.size
> INSN_SIZE
)
13895 assert (inst
.size
== (2 * INSN_SIZE
));
13896 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13897 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13900 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13902 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13903 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13904 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13907 dwarf2_emit_insn (inst
.size
);
13910 /* Tag values used in struct asm_opcode's tag field. */
13913 OT_unconditional
, /* Instruction cannot be conditionalized.
13914 The ARM condition field is still 0xE. */
13915 OT_unconditionalF
, /* Instruction cannot be conditionalized
13916 and carries 0xF in its ARM condition field. */
13917 OT_csuffix
, /* Instruction takes a conditional suffix. */
13918 OT_csuffixF
, /* Some forms of the instruction take a conditional
13919 suffix, others place 0xF where the condition field
13921 OT_cinfix3
, /* Instruction takes a conditional infix,
13922 beginning at character index 3. (In
13923 unified mode, it becomes a suffix.) */
13924 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13925 tsts, cmps, cmns, and teqs. */
13926 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13927 character index 3, even in unified mode. Used for
13928 legacy instructions where suffix and infix forms
13929 may be ambiguous. */
13930 OT_csuf_or_in3
, /* Instruction takes either a conditional
13931 suffix or an infix at character index 3. */
13932 OT_odd_infix_unc
, /* This is the unconditional variant of an
13933 instruction that takes a conditional infix
13934 at an unusual position. In unified mode,
13935 this variant will accept a suffix. */
13936 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13937 are the conditional variants of instructions that
13938 take conditional infixes in unusual positions.
13939 The infix appears at character index
13940 (tag - OT_odd_infix_0). These are not accepted
13941 in unified mode. */
13944 /* Subroutine of md_assemble, responsible for looking up the primary
13945 opcode from the mnemonic the user wrote. STR points to the
13946 beginning of the mnemonic.
13948 This is not simply a hash table lookup, because of conditional
13949 variants. Most instructions have conditional variants, which are
13950 expressed with a _conditional affix_ to the mnemonic. If we were
13951 to encode each conditional variant as a literal string in the opcode
13952 table, it would have approximately 20,000 entries.
13954 Most mnemonics take this affix as a suffix, and in unified syntax,
13955 'most' is upgraded to 'all'. However, in the divided syntax, some
13956 instructions take the affix as an infix, notably the s-variants of
13957 the arithmetic instructions. Of those instructions, all but six
13958 have the infix appear after the third character of the mnemonic.
13960 Accordingly, the algorithm for looking up primary opcodes given
13963 1. Look up the identifier in the opcode table.
13964 If we find a match, go to step U.
13966 2. Look up the last two characters of the identifier in the
13967 conditions table. If we find a match, look up the first N-2
13968 characters of the identifier in the opcode table. If we
13969 find a match, go to step CE.
13971 3. Look up the fourth and fifth characters of the identifier in
13972 the conditions table. If we find a match, extract those
13973 characters from the identifier, and look up the remaining
13974 characters in the opcode table. If we find a match, go
13979 U. Examine the tag field of the opcode structure, in case this is
13980 one of the six instructions with its conditional infix in an
13981 unusual place. If it is, the tag tells us where to find the
13982 infix; look it up in the conditions table and set inst.cond
13983 accordingly. Otherwise, this is an unconditional instruction.
13984 Again set inst.cond accordingly. Return the opcode structure.
13986 CE. Examine the tag field to make sure this is an instruction that
13987 should receive a conditional suffix. If it is not, fail.
13988 Otherwise, set inst.cond from the suffix we already looked up,
13989 and return the opcode structure.
13991 CM. Examine the tag field to make sure this is an instruction that
13992 should receive a conditional infix after the third character.
13993 If it is not, fail. Otherwise, undo the edits to the current
13994 line of input and proceed as for case CE. */
13996 static const struct asm_opcode
*
13997 opcode_lookup (char **str
)
14001 const struct asm_opcode
*opcode
;
14002 const struct asm_cond
*cond
;
14004 bfd_boolean neon_supported
;
14006 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
14008 /* Scan up to the end of the mnemonic, which must end in white space,
14009 '.' (in unified mode, or for Neon instructions), or end of string. */
14010 for (base
= end
= *str
; *end
!= '\0'; end
++)
14011 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
14017 /* Handle a possible width suffix and/or Neon type suffix. */
14022 /* The .w and .n suffixes are only valid if the unified syntax is in
14024 if (unified_syntax
&& end
[1] == 'w')
14026 else if (unified_syntax
&& end
[1] == 'n')
14031 inst
.vectype
.elems
= 0;
14033 *str
= end
+ offset
;
14035 if (end
[offset
] == '.')
14037 /* See if we have a Neon type suffix (possible in either unified or
14038 non-unified ARM syntax mode). */
14039 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
14042 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
14048 /* Look for unaffixed or special-case affixed mnemonic. */
14049 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
14053 if (opcode
->tag
< OT_odd_infix_0
)
14055 inst
.cond
= COND_ALWAYS
;
14059 if (unified_syntax
)
14060 as_warn (_("conditional infixes are deprecated in unified syntax"));
14061 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
14062 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14065 inst
.cond
= cond
->value
;
14069 /* Cannot have a conditional suffix on a mnemonic of less than two
14071 if (end
- base
< 3)
14074 /* Look for suffixed mnemonic. */
14076 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14077 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
14078 if (opcode
&& cond
)
14081 switch (opcode
->tag
)
14083 case OT_cinfix3_legacy
:
14084 /* Ignore conditional suffixes matched on infix only mnemonics. */
14088 case OT_cinfix3_deprecated
:
14089 case OT_odd_infix_unc
:
14090 if (!unified_syntax
)
14092 /* else fall through */
14096 case OT_csuf_or_in3
:
14097 inst
.cond
= cond
->value
;
14100 case OT_unconditional
:
14101 case OT_unconditionalF
:
14104 inst
.cond
= cond
->value
;
14108 /* delayed diagnostic */
14109 inst
.error
= BAD_COND
;
14110 inst
.cond
= COND_ALWAYS
;
14119 /* Cannot have a usual-position infix on a mnemonic of less than
14120 six characters (five would be a suffix). */
14121 if (end
- base
< 6)
14124 /* Look for infixed mnemonic in the usual position. */
14126 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14130 memcpy (save
, affix
, 2);
14131 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
14132 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
14133 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
14134 memcpy (affix
, save
, 2);
14137 && (opcode
->tag
== OT_cinfix3
14138 || opcode
->tag
== OT_cinfix3_deprecated
14139 || opcode
->tag
== OT_csuf_or_in3
14140 || opcode
->tag
== OT_cinfix3_legacy
))
14144 && (opcode
->tag
== OT_cinfix3
14145 || opcode
->tag
== OT_cinfix3_deprecated
))
14146 as_warn (_("conditional infixes are deprecated in unified syntax"));
14148 inst
.cond
= cond
->value
;
14156 md_assemble (char *str
)
14159 const struct asm_opcode
* opcode
;
14161 /* Align the previous label if needed. */
14162 if (last_label_seen
!= NULL
)
14164 symbol_set_frag (last_label_seen
, frag_now
);
14165 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
14166 S_SET_SEGMENT (last_label_seen
, now_seg
);
14169 memset (&inst
, '\0', sizeof (inst
));
14170 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
14172 opcode
= opcode_lookup (&p
);
14175 /* It wasn't an instruction, but it might be a register alias of
14176 the form alias .req reg, or a Neon .dn/.qn directive. */
14177 if (!create_register_alias (str
, p
)
14178 && !create_neon_reg_alias (str
, p
))
14179 as_bad (_("bad instruction `%s'"), str
);
14184 if (opcode
->tag
== OT_cinfix3_deprecated
)
14185 as_warn (_("s suffix on comparison instruction is deprecated"));
14187 /* The value which unconditional instructions should have in place of the
14188 condition field. */
14189 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
14193 arm_feature_set variant
;
14195 variant
= cpu_variant
;
14196 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14197 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
14198 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
14199 /* Check that this instruction is supported for this CPU. */
14200 if (!opcode
->tvariant
14201 || (thumb_mode
== 1
14202 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
14204 as_bad (_("selected processor does not support `%s'"), str
);
14207 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
14208 && opcode
->tencode
!= do_t_branch
)
14210 as_bad (_("Thumb does not support conditional execution"));
14214 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
) && !inst
.size_req
)
14216 /* Implicit require narrow instructions on Thumb-1. This avoids
14217 relaxation accidentally introducing Thumb-2 instructions. */
14218 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
)
14222 /* Check conditional suffixes. */
14223 if (current_it_mask
)
14226 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
14227 current_it_mask
<<= 1;
14228 current_it_mask
&= 0x1f;
14229 /* The BKPT instruction is unconditional even in an IT block. */
14231 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
14233 as_bad (_("incorrect condition in IT block"));
14237 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
14239 as_bad (_("thumb conditional instruction not in IT block"));
14243 mapping_state (MAP_THUMB
);
14244 inst
.instruction
= opcode
->tvalue
;
14246 if (!parse_operands (p
, opcode
->operands
))
14247 opcode
->tencode ();
14249 /* Clear current_it_mask at the end of an IT block. */
14250 if (current_it_mask
== 0x10)
14251 current_it_mask
= 0;
14253 if (!(inst
.error
|| inst
.relax
))
14255 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
14256 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
14257 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
14259 as_bad (_("cannot honor width suffix -- `%s'"), str
);
14264 /* Something has gone badly wrong if we try to relax a fixed size
14266 assert (inst
.size_req
== 0 || !inst
.relax
);
14268 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14269 *opcode
->tvariant
);
14270 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14271 set those bits when Thumb-2 32-bit instructions are seen. ie.
14272 anything other than bl/blx.
14273 This is overly pessimistic for relaxable instructions. */
14274 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
14276 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14279 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
14281 /* Check that this instruction is supported for this CPU. */
14282 if (!opcode
->avariant
||
14283 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
14285 as_bad (_("selected processor does not support `%s'"), str
);
14290 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
14294 mapping_state (MAP_ARM
);
14295 inst
.instruction
= opcode
->avalue
;
14296 if (opcode
->tag
== OT_unconditionalF
)
14297 inst
.instruction
|= 0xF << 28;
14299 inst
.instruction
|= inst
.cond
<< 28;
14300 inst
.size
= INSN_SIZE
;
14301 if (!parse_operands (p
, opcode
->operands
))
14302 opcode
->aencode ();
14303 /* Arm mode bx is marked as both v4T and v5 because it's still required
14304 on a hypothetical non-thumb v5 core. */
14305 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
14306 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
14307 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
14309 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
14310 *opcode
->avariant
);
14314 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14321 /* Various frobbings of labels and their addresses. */
14324 arm_start_line_hook (void)
14326 last_label_seen
= NULL
;
14330 arm_frob_label (symbolS
* sym
)
14332 last_label_seen
= sym
;
14334 ARM_SET_THUMB (sym
, thumb_mode
);
14336 #if defined OBJ_COFF || defined OBJ_ELF
14337 ARM_SET_INTERWORK (sym
, support_interwork
);
14340 /* Note - do not allow local symbols (.Lxxx) to be labeled
14341 as Thumb functions. This is because these labels, whilst
14342 they exist inside Thumb code, are not the entry points for
14343 possible ARM->Thumb calls. Also, these labels can be used
14344 as part of a computed goto or switch statement. eg gcc
14345 can generate code that looks like this:
14347 ldr r2, [pc, .Laaa]
14357 The first instruction loads the address of the jump table.
14358 The second instruction converts a table index into a byte offset.
14359 The third instruction gets the jump address out of the table.
14360 The fourth instruction performs the jump.
14362 If the address stored at .Laaa is that of a symbol which has the
14363 Thumb_Func bit set, then the linker will arrange for this address
14364 to have the bottom bit set, which in turn would mean that the
14365 address computation performed by the third instruction would end
14366 up with the bottom bit set. Since the ARM is capable of unaligned
14367 word loads, the instruction would then load the incorrect address
14368 out of the jump table, and chaos would ensue. */
14369 if (label_is_thumb_function_name
14370 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
14371 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
14373 /* When the address of a Thumb function is taken the bottom
14374 bit of that address should be set. This will allow
14375 interworking between Arm and Thumb functions to work
14378 THUMB_SET_FUNC (sym
, 1);
14380 label_is_thumb_function_name
= FALSE
;
14383 dwarf2_emit_label (sym
);
14387 arm_data_in_code (void)
14389 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
14391 *input_line_pointer
= '/';
14392 input_line_pointer
+= 5;
14393 *input_line_pointer
= 0;
14401 arm_canonicalize_symbol_name (char * name
)
14405 if (thumb_mode
&& (len
= strlen (name
)) > 5
14406 && streq (name
+ len
- 5, "/data"))
14407 *(name
+ len
- 5) = 0;
14412 /* Table of all register names defined by default. The user can
14413 define additional names with .req. Note that all register names
14414 should appear in both upper and lowercase variants. Some registers
14415 also have mixed-case names. */
14417 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14418 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14419 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14420 #define REGSET(p,t) \
14421 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14422 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14423 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14424 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14425 #define REGSETH(p,t) \
14426 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14427 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14428 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14429 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14430 #define REGSET2(p,t) \
14431 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14432 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14433 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14434 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14436 static const struct reg_entry reg_names
[] =
14438 /* ARM integer registers. */
14439 REGSET(r
, RN
), REGSET(R
, RN
),
14441 /* ATPCS synonyms. */
14442 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
14443 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
14444 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
14446 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
14447 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
14448 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
14450 /* Well-known aliases. */
14451 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
14452 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
14454 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
14455 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
14457 /* Coprocessor numbers. */
14458 REGSET(p
, CP
), REGSET(P
, CP
),
14460 /* Coprocessor register numbers. The "cr" variants are for backward
14462 REGSET(c
, CN
), REGSET(C
, CN
),
14463 REGSET(cr
, CN
), REGSET(CR
, CN
),
14465 /* FPA registers. */
14466 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
14467 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
14469 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
14470 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
14472 /* VFP SP registers. */
14473 REGSET(s
,VFS
), REGSET(S
,VFS
),
14474 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
14476 /* VFP DP Registers. */
14477 REGSET(d
,VFD
), REGSET(D
,VFD
),
14478 /* Extra Neon DP registers. */
14479 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
14481 /* Neon QP registers. */
14482 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
14484 /* VFP control registers. */
14485 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
14486 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
14487 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
14488 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
14489 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
14490 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
14492 /* Maverick DSP coprocessor registers. */
14493 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
14494 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
14496 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
14497 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
14498 REGDEF(dspsc
,0,DSPSC
),
14500 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
14501 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
14502 REGDEF(DSPSC
,0,DSPSC
),
14504 /* iWMMXt data registers - p0, c0-15. */
14505 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
14507 /* iWMMXt control registers - p1, c0-3. */
14508 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
14509 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
14510 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
14511 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
14513 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14514 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
14515 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
14516 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
14517 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
14519 /* XScale accumulator registers. */
14520 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
14526 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14527 within psr_required_here. */
14528 static const struct asm_psr psrs
[] =
14530 /* Backward compatibility notation. Note that "all" is no longer
14531 truly all possible PSR bits. */
14532 {"all", PSR_c
| PSR_f
},
14536 /* Individual flags. */
14541 /* Combinations of flags. */
14542 {"fs", PSR_f
| PSR_s
},
14543 {"fx", PSR_f
| PSR_x
},
14544 {"fc", PSR_f
| PSR_c
},
14545 {"sf", PSR_s
| PSR_f
},
14546 {"sx", PSR_s
| PSR_x
},
14547 {"sc", PSR_s
| PSR_c
},
14548 {"xf", PSR_x
| PSR_f
},
14549 {"xs", PSR_x
| PSR_s
},
14550 {"xc", PSR_x
| PSR_c
},
14551 {"cf", PSR_c
| PSR_f
},
14552 {"cs", PSR_c
| PSR_s
},
14553 {"cx", PSR_c
| PSR_x
},
14554 {"fsx", PSR_f
| PSR_s
| PSR_x
},
14555 {"fsc", PSR_f
| PSR_s
| PSR_c
},
14556 {"fxs", PSR_f
| PSR_x
| PSR_s
},
14557 {"fxc", PSR_f
| PSR_x
| PSR_c
},
14558 {"fcs", PSR_f
| PSR_c
| PSR_s
},
14559 {"fcx", PSR_f
| PSR_c
| PSR_x
},
14560 {"sfx", PSR_s
| PSR_f
| PSR_x
},
14561 {"sfc", PSR_s
| PSR_f
| PSR_c
},
14562 {"sxf", PSR_s
| PSR_x
| PSR_f
},
14563 {"sxc", PSR_s
| PSR_x
| PSR_c
},
14564 {"scf", PSR_s
| PSR_c
| PSR_f
},
14565 {"scx", PSR_s
| PSR_c
| PSR_x
},
14566 {"xfs", PSR_x
| PSR_f
| PSR_s
},
14567 {"xfc", PSR_x
| PSR_f
| PSR_c
},
14568 {"xsf", PSR_x
| PSR_s
| PSR_f
},
14569 {"xsc", PSR_x
| PSR_s
| PSR_c
},
14570 {"xcf", PSR_x
| PSR_c
| PSR_f
},
14571 {"xcs", PSR_x
| PSR_c
| PSR_s
},
14572 {"cfs", PSR_c
| PSR_f
| PSR_s
},
14573 {"cfx", PSR_c
| PSR_f
| PSR_x
},
14574 {"csf", PSR_c
| PSR_s
| PSR_f
},
14575 {"csx", PSR_c
| PSR_s
| PSR_x
},
14576 {"cxf", PSR_c
| PSR_x
| PSR_f
},
14577 {"cxs", PSR_c
| PSR_x
| PSR_s
},
14578 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
14579 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
14580 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
14581 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
14582 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
14583 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
14584 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
14585 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
14586 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
14587 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
14588 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
14589 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
14590 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
14591 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
14592 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
14593 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
14594 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
14595 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
14596 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
14597 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
14598 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
14599 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
14600 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
14601 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
14604 /* Table of V7M psr names. */
14605 static const struct asm_psr v7m_psrs
[] =
14607 {"apsr", 0 }, {"APSR", 0 },
14608 {"iapsr", 1 }, {"IAPSR", 1 },
14609 {"eapsr", 2 }, {"EAPSR", 2 },
14610 {"psr", 3 }, {"PSR", 3 },
14611 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
14612 {"ipsr", 5 }, {"IPSR", 5 },
14613 {"epsr", 6 }, {"EPSR", 6 },
14614 {"iepsr", 7 }, {"IEPSR", 7 },
14615 {"msp", 8 }, {"MSP", 8 },
14616 {"psp", 9 }, {"PSP", 9 },
14617 {"primask", 16}, {"PRIMASK", 16},
14618 {"basepri", 17}, {"BASEPRI", 17},
14619 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
14620 {"faultmask", 19}, {"FAULTMASK", 19},
14621 {"control", 20}, {"CONTROL", 20}
14624 /* Table of all shift-in-operand names. */
14625 static const struct asm_shift_name shift_names
[] =
14627 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
14628 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
14629 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
14630 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
14631 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
14632 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
14635 /* Table of all explicit relocation names. */
14637 static struct reloc_entry reloc_names
[] =
14639 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
14640 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
14641 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
14642 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
14643 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
14644 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
14645 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
14646 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
14647 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
14648 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
14649 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
14653 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14654 static const struct asm_cond conds
[] =
14658 {"cs", 0x2}, {"hs", 0x2},
14659 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14673 static struct asm_barrier_opt barrier_opt_names
[] =
14681 /* Table of ARM-format instructions. */
14683 /* Macros for gluing together operand strings. N.B. In all cases
14684 other than OPS0, the trailing OP_stop comes from default
14685 zero-initialization of the unspecified elements of the array. */
14686 #define OPS0() { OP_stop, }
14687 #define OPS1(a) { OP_##a, }
14688 #define OPS2(a,b) { OP_##a,OP_##b, }
14689 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14690 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14691 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14692 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14694 /* These macros abstract out the exact format of the mnemonic table and
14695 save some repeated characters. */
14697 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14698 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14699 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14700 THUMB_VARIANT, do_##ae, do_##te }
14702 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14703 a T_MNEM_xyz enumerator. */
14704 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14705 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14706 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14707 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14709 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14710 infix after the third character. */
14711 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14712 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14713 THUMB_VARIANT, do_##ae, do_##te }
14714 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14715 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14716 THUMB_VARIANT, do_##ae, do_##te }
14717 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14718 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14719 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14720 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14721 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14722 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14723 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14724 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14726 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14727 appear in the condition table. */
14728 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14729 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14730 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14732 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14733 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14734 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14735 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14736 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14737 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14738 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14739 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14740 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14741 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14742 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14743 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14744 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14745 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14746 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14747 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14748 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14749 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14750 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14751 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14753 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14754 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14755 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14756 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14758 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14759 field is still 0xE. Many of the Thumb variants can be executed
14760 conditionally, so this is checked separately. */
14761 #define TUE(mnem, op, top, nops, ops, ae, te) \
14762 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14763 THUMB_VARIANT, do_##ae, do_##te }
14765 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14766 condition code field. */
14767 #define TUF(mnem, op, top, nops, ops, ae, te) \
14768 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14769 THUMB_VARIANT, do_##ae, do_##te }
14771 /* ARM-only variants of all the above. */
14772 #define CE(mnem, op, nops, ops, ae) \
14773 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14775 #define C3(mnem, op, nops, ops, ae) \
14776 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14778 /* Legacy mnemonics that always have conditional infix after the third
14780 #define CL(mnem, op, nops, ops, ae) \
14781 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14782 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14784 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14785 #define cCE(mnem, op, nops, ops, ae) \
14786 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14788 /* Legacy coprocessor instructions where conditional infix and conditional
14789 suffix are ambiguous. For consistency this includes all FPA instructions,
14790 not just the potentially ambiguous ones. */
14791 #define cCL(mnem, op, nops, ops, ae) \
14792 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14793 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14795 /* Coprocessor, takes either a suffix or a position-3 infix
14796 (for an FPA corner case). */
14797 #define C3E(mnem, op, nops, ops, ae) \
14798 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14799 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14801 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14802 { #m1 #m2 #m3, OPS##nops ops, \
14803 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14804 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14806 #define CM(m1, m2, op, nops, ops, ae) \
14807 xCM_(m1, , m2, op, nops, ops, ae), \
14808 xCM_(m1, eq, m2, op, nops, ops, ae), \
14809 xCM_(m1, ne, m2, op, nops, ops, ae), \
14810 xCM_(m1, cs, m2, op, nops, ops, ae), \
14811 xCM_(m1, hs, m2, op, nops, ops, ae), \
14812 xCM_(m1, cc, m2, op, nops, ops, ae), \
14813 xCM_(m1, ul, m2, op, nops, ops, ae), \
14814 xCM_(m1, lo, m2, op, nops, ops, ae), \
14815 xCM_(m1, mi, m2, op, nops, ops, ae), \
14816 xCM_(m1, pl, m2, op, nops, ops, ae), \
14817 xCM_(m1, vs, m2, op, nops, ops, ae), \
14818 xCM_(m1, vc, m2, op, nops, ops, ae), \
14819 xCM_(m1, hi, m2, op, nops, ops, ae), \
14820 xCM_(m1, ls, m2, op, nops, ops, ae), \
14821 xCM_(m1, ge, m2, op, nops, ops, ae), \
14822 xCM_(m1, lt, m2, op, nops, ops, ae), \
14823 xCM_(m1, gt, m2, op, nops, ops, ae), \
14824 xCM_(m1, le, m2, op, nops, ops, ae), \
14825 xCM_(m1, al, m2, op, nops, ops, ae)
14827 #define UE(mnem, op, nops, ops, ae) \
14828 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14830 #define UF(mnem, op, nops, ops, ae) \
14831 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14833 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14834 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14835 use the same encoding function for each. */
14836 #define NUF(mnem, op, nops, ops, enc) \
14837 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14838 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14840 /* Neon data processing, version which indirects through neon_enc_tab for
14841 the various overloaded versions of opcodes. */
14842 #define nUF(mnem, op, nops, ops, enc) \
14843 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14844 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14846 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14848 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14849 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14850 THUMB_VARIANT, do_##enc, do_##enc }
14852 #define NCE(mnem, op, nops, ops, enc) \
14853 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14855 #define NCEF(mnem, op, nops, ops, enc) \
14856 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14858 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14859 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14860 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14861 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14863 #define nCE(mnem, op, nops, ops, enc) \
14864 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14866 #define nCEF(mnem, op, nops, ops, enc) \
14867 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14871 /* Thumb-only, unconditional. */
14872 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14874 static const struct asm_opcode insns
[] =
14876 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14877 #define THUMB_VARIANT &arm_ext_v4t
14878 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14879 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14880 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14881 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14882 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14883 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14884 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14885 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14886 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14887 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14888 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14889 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14890 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14891 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14892 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14893 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14895 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14896 for setting PSR flag bits. They are obsolete in V6 and do not
14897 have Thumb equivalents. */
14898 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14899 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14900 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14901 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14902 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14903 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14904 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14905 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14906 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14908 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14909 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14910 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14911 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14913 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14914 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14915 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14916 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14918 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14919 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14920 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14921 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14922 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14923 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14925 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14926 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14927 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14928 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14931 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14932 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14933 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14935 /* Thumb-compatibility pseudo ops. */
14936 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14937 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14938 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14939 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14940 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14941 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14942 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14943 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14944 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14945 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14946 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14947 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14949 /* These may simplify to neg. */
14950 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14951 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14953 #undef THUMB_VARIANT
14954 #define THUMB_VARIANT &arm_ext_v6
14955 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14957 /* V1 instructions with no Thumb analogue prior to V6T2. */
14958 #undef THUMB_VARIANT
14959 #define THUMB_VARIANT &arm_ext_v6t2
14960 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14961 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14962 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
14964 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14965 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14966 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14967 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14969 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14970 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14972 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14973 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14975 /* V1 instructions with no Thumb analogue at all. */
14976 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
14977 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
14979 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14980 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14981 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14982 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14983 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14984 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14985 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14986 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14989 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14990 #undef THUMB_VARIANT
14991 #define THUMB_VARIANT &arm_ext_v4t
14992 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14993 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14995 #undef THUMB_VARIANT
14996 #define THUMB_VARIANT &arm_ext_v6t2
14997 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14998 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
15000 /* Generic coprocessor instructions. */
15001 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15002 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15003 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15004 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15005 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15006 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15007 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15010 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15011 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15012 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15015 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15016 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
15017 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
15020 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15021 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15022 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15023 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15024 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15025 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15026 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15027 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15028 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15031 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15032 #undef THUMB_VARIANT
15033 #define THUMB_VARIANT &arm_ext_v4t
15034 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15035 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15036 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15037 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15038 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15039 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15042 #define ARM_VARIANT &arm_ext_v4t_5
15043 /* ARM Architecture 4T. */
15044 /* Note: bx (and blx) are required on V5, even if the processor does
15045 not support Thumb. */
15046 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
15049 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15050 #undef THUMB_VARIANT
15051 #define THUMB_VARIANT &arm_ext_v5t
15052 /* Note: blx has 2 variants; the .value coded here is for
15053 BLX(2). Only this variant has conditional execution. */
15054 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
15055 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
15057 #undef THUMB_VARIANT
15058 #define THUMB_VARIANT &arm_ext_v6t2
15059 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
15060 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15061 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15062 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15063 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15064 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15065 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15066 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15069 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15070 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15071 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15072 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15073 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15075 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15076 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15078 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15079 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15080 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15081 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15083 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15084 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15085 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15086 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15088 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15089 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15091 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15092 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15093 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15094 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15097 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15098 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
15099 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15100 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15102 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15103 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15106 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15107 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
15110 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15111 #undef THUMB_VARIANT
15112 #define THUMB_VARIANT &arm_ext_v6
15113 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15114 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15115 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15116 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15117 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15118 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15119 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15120 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15121 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15122 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
15124 #undef THUMB_VARIANT
15125 #define THUMB_VARIANT &arm_ext_v6t2
15126 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
15127 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
15128 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15129 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15131 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
15132 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
15134 /* ARM V6 not included in V7M (eg. integer SIMD). */
15135 #undef THUMB_VARIANT
15136 #define THUMB_VARIANT &arm_ext_v6_notm
15137 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
15138 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
15139 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
15140 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15141 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15142 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15143 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15144 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15145 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15146 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15147 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15148 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15149 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15150 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15151 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15152 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15153 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15154 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15155 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15156 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15157 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15158 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15159 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15160 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15161 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15162 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15163 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15164 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15165 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15166 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15167 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15168 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15169 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15170 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15171 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15172 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15173 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15174 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15175 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15176 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15177 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
15178 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
15179 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15180 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15181 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
15182 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
15183 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15184 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15185 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15186 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15187 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15188 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15189 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15190 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15191 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15192 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15193 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15194 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15195 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15196 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15197 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15198 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15199 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15200 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15201 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15202 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15203 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15204 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15205 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15206 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15207 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15208 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15209 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15210 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15211 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
15212 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
15213 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
15214 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
15215 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
15216 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
15217 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15218 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15219 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
15222 #define ARM_VARIANT &arm_ext_v6k
15223 #undef THUMB_VARIANT
15224 #define THUMB_VARIANT &arm_ext_v6k
15225 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
15226 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
15227 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
15228 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
15230 #undef THUMB_VARIANT
15231 #define THUMB_VARIANT &arm_ext_v6_notm
15232 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
15233 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
15235 #undef THUMB_VARIANT
15236 #define THUMB_VARIANT &arm_ext_v6t2
15237 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15238 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15239 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15240 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15241 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
15244 #define ARM_VARIANT &arm_ext_v6z
15245 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
15248 #define ARM_VARIANT &arm_ext_v6t2
15249 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
15250 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
15251 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15252 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15254 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15255 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15256 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15257 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
15259 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15260 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15261 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15262 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15264 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
15265 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
15266 /* ARM does not really have an IT instruction, so always allow it. */
15268 #define ARM_VARIANT &arm_ext_v1
15269 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
15270 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
15271 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
15272 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
15273 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
15274 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
15275 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
15276 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
15277 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
15278 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
15279 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
15280 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
15281 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
15282 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
15283 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
15285 /* Thumb2 only instructions. */
15287 #define ARM_VARIANT NULL
15289 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15290 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15291 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
15292 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
15294 /* Thumb-2 hardware division instructions (R and M profiles only). */
15295 #undef THUMB_VARIANT
15296 #define THUMB_VARIANT &arm_ext_div
15297 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15298 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15300 /* ARM V7 instructions. */
15302 #define ARM_VARIANT &arm_ext_v7
15303 #undef THUMB_VARIANT
15304 #define THUMB_VARIANT &arm_ext_v7
15305 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
15306 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
15307 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
15308 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
15309 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
15312 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15313 cCE(wfs
, e200110
, 1, (RR
), rd
),
15314 cCE(rfs
, e300110
, 1, (RR
), rd
),
15315 cCE(wfc
, e400110
, 1, (RR
), rd
),
15316 cCE(rfc
, e500110
, 1, (RR
), rd
),
15318 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15319 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15320 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15321 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15323 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15324 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15325 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15326 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15328 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
15329 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
15330 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
15331 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
15332 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
15333 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
15334 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
15335 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
15336 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
15337 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
15338 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
15339 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
15341 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
15342 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
15343 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
15344 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
15345 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
15346 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
15347 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
15348 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
15349 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
15350 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
15351 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
15352 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
15354 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
15355 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
15356 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
15357 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
15358 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
15359 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
15360 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
15361 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
15362 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
15363 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
15364 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
15365 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
15367 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
15368 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
15369 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
15370 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
15371 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
15372 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
15373 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
15374 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
15375 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
15376 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
15377 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
15378 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
15380 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
15381 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
15382 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
15383 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
15384 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
15385 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
15386 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
15387 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
15388 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
15389 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
15390 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
15391 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
15393 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
15394 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
15395 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
15396 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
15397 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
15398 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
15399 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
15400 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
15401 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
15402 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
15403 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
15404 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
15406 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
15407 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
15408 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
15409 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
15410 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
15411 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
15412 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
15413 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
15414 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
15415 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
15416 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
15417 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
15419 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
15420 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
15421 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
15422 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
15423 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
15424 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
15425 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
15426 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
15427 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
15428 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
15429 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
15430 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
15432 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
15433 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
15434 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
15435 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
15436 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
15437 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
15438 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
15439 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
15440 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
15441 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
15442 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
15443 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
15445 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
15446 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
15447 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
15448 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
15449 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
15450 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
15451 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
15452 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
15453 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
15454 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
15455 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
15456 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
15458 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
15459 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
15460 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
15461 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
15462 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
15463 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
15464 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
15465 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
15466 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
15467 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
15468 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
15469 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
15471 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
15472 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
15473 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
15474 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
15475 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
15476 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
15477 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
15478 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
15479 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
15480 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
15481 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
15482 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
15484 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
15485 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
15486 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
15487 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
15488 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
15489 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
15490 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
15491 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
15492 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
15493 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
15494 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
15495 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
15497 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
15498 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
15499 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
15500 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
15501 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
15502 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
15503 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
15504 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
15505 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
15506 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
15507 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
15508 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
15510 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
15511 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
15512 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
15513 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
15514 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
15515 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
15516 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
15517 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
15518 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
15519 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
15520 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
15521 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
15523 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
15524 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
15525 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
15526 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
15527 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
15528 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
15529 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
15530 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
15531 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
15532 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
15533 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
15534 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
15536 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15537 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15538 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15539 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15540 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15541 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15542 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15543 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15544 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15545 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15546 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15547 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15549 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15550 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15551 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15552 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15553 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15554 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15555 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15556 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15557 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15558 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15559 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15560 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15562 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15563 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15564 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15565 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15566 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15567 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15568 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15569 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15570 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15571 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15572 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15573 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15575 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15576 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15577 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15578 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15579 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15580 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15581 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15582 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15583 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15584 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15585 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15586 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15588 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15589 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15590 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15591 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15592 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15593 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15594 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15595 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15596 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15597 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15598 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15599 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15601 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15602 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15603 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15604 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15605 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15606 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15607 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15608 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15609 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15610 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15611 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15612 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15614 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15615 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15616 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15617 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15618 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15619 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15620 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15621 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15622 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15623 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15624 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15625 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15627 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15628 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15629 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15630 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15631 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15632 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15633 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15634 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15635 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15636 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15637 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15638 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15640 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15641 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15642 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15643 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15644 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15645 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15646 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15647 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15648 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15649 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15650 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15651 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15653 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15654 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15655 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15656 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15657 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15658 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15659 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15660 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15661 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15662 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15663 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15664 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15666 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15667 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15668 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15669 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15670 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15671 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15672 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15673 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15674 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15675 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15676 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15677 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15679 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15680 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15681 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15682 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15683 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15684 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15685 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15686 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15687 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15688 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15689 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15690 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15692 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15693 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15694 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15695 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15696 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15697 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15698 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15699 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15700 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15701 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15702 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15703 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15705 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15706 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15707 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15708 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15710 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
15711 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
15712 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
15713 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
15714 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
15715 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
15716 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
15717 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
15718 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
15719 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
15720 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
15721 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
15723 /* The implementation of the FIX instruction is broken on some
15724 assemblers, in that it accepts a precision specifier as well as a
15725 rounding specifier, despite the fact that this is meaningless.
15726 To be more compatible, we accept it as well, though of course it
15727 does not set any bits. */
15728 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
15729 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
15730 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
15731 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
15732 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
15733 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
15734 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
15735 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
15736 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
15737 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
15738 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
15739 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
15740 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
15742 /* Instructions that were new with the real FPA, call them V2. */
15744 #define ARM_VARIANT &fpu_fpa_ext_v2
15745 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15746 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15747 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15748 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15749 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15750 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15753 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15754 /* Moves and type conversions. */
15755 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15756 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
15757 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
15758 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
15759 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15760 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15761 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15762 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15763 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15764 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15765 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
15766 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
15768 /* Memory operations. */
15769 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15770 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15771 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15772 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15773 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15774 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15775 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15776 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15777 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15778 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15779 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15780 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15781 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15782 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15783 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15784 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15785 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15786 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15788 /* Monadic operations. */
15789 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15790 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15791 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15793 /* Dyadic operations. */
15794 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15795 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15796 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15797 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15798 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15799 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15800 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15801 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15802 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15805 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15806 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15807 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15808 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15811 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15812 /* Moves and type conversions. */
15813 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15814 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15815 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15816 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15817 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15818 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15819 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15820 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15821 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15822 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15823 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15824 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15825 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15827 /* Memory operations. */
15828 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15829 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15830 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15831 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15832 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15833 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15834 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15835 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15836 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15837 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15839 /* Monadic operations. */
15840 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15841 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15842 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15844 /* Dyadic operations. */
15845 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15846 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15847 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15848 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15849 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15850 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15851 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15852 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15853 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15856 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15857 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15858 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15859 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15862 #define ARM_VARIANT &fpu_vfp_ext_v2
15863 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15864 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15865 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15866 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15868 /* Instructions which may belong to either the Neon or VFP instruction sets.
15869 Individual encoder functions perform additional architecture checks. */
15871 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15872 #undef THUMB_VARIANT
15873 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15874 /* These mnemonics are unique to VFP. */
15875 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15876 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15877 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15878 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15879 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15880 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15881 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15882 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15883 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15884 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
15886 /* Mnemonics shared by Neon and VFP. */
15887 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15888 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15889 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15891 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15892 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15894 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15895 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15897 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15898 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15899 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15900 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15901 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15902 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15903 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15904 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15906 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15908 /* NOTE: All VMOV encoding is special-cased! */
15909 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15910 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15912 #undef THUMB_VARIANT
15913 #define THUMB_VARIANT &fpu_neon_ext_v1
15915 #define ARM_VARIANT &fpu_neon_ext_v1
15916 /* Data processing with three registers of the same length. */
15917 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15918 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15919 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15920 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15921 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15922 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15923 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15924 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15925 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15926 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15927 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15928 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15929 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15930 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15931 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15932 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15933 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15934 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15935 /* If not immediate, fall back to neon_dyadic_i64_su.
15936 shl_imm should accept I8 I16 I32 I64,
15937 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15938 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15939 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15940 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15941 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15942 /* Logic ops, types optional & ignored. */
15943 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15944 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15945 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15946 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15947 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15948 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15949 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15950 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15951 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15952 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15953 /* Bitfield ops, untyped. */
15954 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15955 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15956 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15957 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15958 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15959 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15960 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15961 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15962 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15963 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15964 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15965 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15966 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15967 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15968 back to neon_dyadic_if_su. */
15969 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15970 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15971 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15972 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15973 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15974 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15975 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15976 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15977 /* Comparison. Type I8 I16 I32 F32. */
15978 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
15979 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
15980 /* As above, D registers only. */
15981 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15982 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15983 /* Int and float variants, signedness unimportant. */
15984 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15985 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15986 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
15987 /* Add/sub take types I8 I16 I32 I64 F32. */
15988 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15989 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15990 /* vtst takes sizes 8, 16, 32. */
15991 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
15992 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
15993 /* VMUL takes I8 I16 I32 F32 P8. */
15994 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
15995 /* VQD{R}MULH takes S16 S32. */
15996 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15997 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15998 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15999 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
16000 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16001 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16002 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16003 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16004 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16005 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16006 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16007 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16008 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16009 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16010 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16011 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16013 /* Two address, int/float. Types S8 S16 S32 F32. */
16014 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16015 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16017 /* Data processing with two registers and a shift amount. */
16018 /* Right shifts, and variants with rounding.
16019 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16020 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16021 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16022 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16023 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16024 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16025 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16026 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16027 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16028 /* Shift and insert. Sizes accepted 8 16 32 64. */
16029 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
16030 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
16031 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
16032 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
16033 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16034 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
16035 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
16036 /* Right shift immediate, saturating & narrowing, with rounding variants.
16037 Types accepted S16 S32 S64 U16 U32 U64. */
16038 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16039 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16040 /* As above, unsigned. Types accepted S16 S32 S64. */
16041 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16042 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16043 /* Right shift narrowing. Types accepted I16 I32 I64. */
16044 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16045 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16046 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16047 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
16048 /* CVT with optional immediate for fixed-point variant. */
16049 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
16051 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
16052 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
16054 /* Data processing, three registers of different lengths. */
16055 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16056 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
16057 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16058 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16059 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16060 /* If not scalar, fall back to neon_dyadic_long.
16061 Vector types as above, scalar types S16 S32 U16 U32. */
16062 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16063 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16064 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16065 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16066 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16067 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16068 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16069 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16070 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16071 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16072 /* Saturating doubling multiplies. Types S16 S32. */
16073 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16074 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16075 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16076 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16077 S16 S32 U16 U32. */
16078 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
16080 /* Extract. Size 8. */
16081 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
16082 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
16084 /* Two registers, miscellaneous. */
16085 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16086 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
16087 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
16088 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
16089 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
16090 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
16091 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
16092 /* Vector replicate. Sizes 8 16 32. */
16093 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
16094 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
16095 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16096 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
16097 /* VMOVN. Types I16 I32 I64. */
16098 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
16099 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16100 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
16101 /* VQMOVUN. Types S16 S32 S64. */
16102 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
16103 /* VZIP / VUZP. Sizes 8 16 32. */
16104 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16105 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16106 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16107 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16108 /* VQABS / VQNEG. Types S8 S16 S32. */
16109 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16110 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16111 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16112 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16113 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16114 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16115 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
16116 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16117 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
16118 /* Reciprocal estimates. Types U32 F32. */
16119 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16120 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
16121 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16122 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
16123 /* VCLS. Types S8 S16 S32. */
16124 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
16125 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
16126 /* VCLZ. Types I8 I16 I32. */
16127 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
16128 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
16129 /* VCNT. Size 8. */
16130 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
16131 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
16132 /* Two address, untyped. */
16133 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
16134 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
16135 /* VTRN. Sizes 8 16 32. */
16136 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
16137 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
16139 /* Table lookup. Size 8. */
16140 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16141 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16143 #undef THUMB_VARIANT
16144 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16146 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16147 /* Neon element/structure load/store. */
16148 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16149 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16150 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16151 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16152 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16153 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16154 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16155 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16157 #undef THUMB_VARIANT
16158 #define THUMB_VARIANT &fpu_vfp_ext_v3
16160 #define ARM_VARIANT &fpu_vfp_ext_v3
16161 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
16162 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
16163 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16164 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16165 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16166 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16167 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16168 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16169 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16170 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16171 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16172 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16173 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16174 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16175 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16176 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16177 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16178 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16180 #undef THUMB_VARIANT
16182 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16183 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16184 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16185 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16186 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16187 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16188 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16189 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
16190 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
16193 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16194 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
16195 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
16196 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
16197 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
16198 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
16199 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
16200 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
16201 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
16202 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
16203 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16204 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16205 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16206 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16207 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16208 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16209 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16210 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16211 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16212 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
16213 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
16214 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16215 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16216 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16217 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16218 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16219 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16220 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
16221 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
16222 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
16223 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
16224 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
16225 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
16226 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
16227 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
16228 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16229 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16230 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16231 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16232 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16233 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16234 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16235 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16236 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16237 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16238 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16239 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16240 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
16241 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16242 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16243 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16244 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16245 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16246 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16247 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16248 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16249 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16250 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16251 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16252 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16253 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16254 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16255 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16256 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16257 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16258 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16259 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16260 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16261 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16262 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16263 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16264 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16265 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16266 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16267 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16268 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16269 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16270 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16271 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16272 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16273 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16274 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16275 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16276 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16277 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16278 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16279 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16280 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16281 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16282 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
16283 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16284 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16285 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16286 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16287 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16288 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16289 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16290 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16291 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16292 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16293 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16294 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16295 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16296 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16297 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16298 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16299 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16300 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16301 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16302 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16303 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16304 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
16305 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16306 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16307 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16308 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16309 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16310 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16311 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16312 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16313 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16314 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16315 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16316 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16317 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16318 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16319 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16320 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16321 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16322 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16323 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16324 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16325 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16326 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16327 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16328 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16329 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16330 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16331 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16332 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16333 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16334 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16335 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16336 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16337 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16338 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16339 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16340 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16341 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16342 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16343 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16344 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16345 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16346 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16347 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16348 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16349 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16350 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16351 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16352 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16353 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16354 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16355 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
16358 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16359 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
16360 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
16361 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
16362 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16363 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16364 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16365 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16366 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16367 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16368 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16369 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16370 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16371 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16372 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16373 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16374 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16375 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16376 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16377 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16378 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16379 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
16380 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16381 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16382 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16383 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16384 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16385 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16386 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16387 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16388 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16389 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16390 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16391 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16392 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16393 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16394 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16395 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16396 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16397 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16398 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16399 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16400 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16401 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16402 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16403 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16404 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16405 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16406 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16407 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16408 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16409 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16410 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16411 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16412 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16413 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16414 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16415 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16418 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16419 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16420 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16421 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16422 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16423 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16424 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16425 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16426 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16427 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
16428 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
16429 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
16430 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
16431 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
16432 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
16433 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
16434 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
16435 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
16436 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
16437 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
16438 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
16439 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
16440 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
16441 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
16442 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
16443 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
16444 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
16445 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
16446 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
16447 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
16448 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
16449 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
16450 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
16451 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
16452 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
16453 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
16454 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
16455 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
16456 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
16457 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
16458 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
16459 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
16460 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
16461 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
16462 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
16463 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
16464 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
16465 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
16466 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
16467 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
16468 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
16469 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
16470 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
16471 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
16472 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
16473 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16474 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16475 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16476 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16477 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16478 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16479 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
16480 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
16481 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
16482 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
16483 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16484 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16485 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16486 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16487 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16488 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16489 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16490 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16491 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16492 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16493 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16494 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16497 #undef THUMB_VARIANT
16524 /* MD interface: bits in the object file. */
16526 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16527 for use in the a.out file, and stores them in the array pointed to by buf.
16528 This knows about the endian-ness of the target machine and does
16529 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16530 2 (short) and 4 (long) Floating numbers are put out as a series of
16531 LITTLENUMS (shorts, here at least). */
16534 md_number_to_chars (char * buf
, valueT val
, int n
)
16536 if (target_big_endian
)
16537 number_to_chars_bigendian (buf
, val
, n
);
16539 number_to_chars_littleendian (buf
, val
, n
);
16543 md_chars_to_number (char * buf
, int n
)
16546 unsigned char * where
= (unsigned char *) buf
;
16548 if (target_big_endian
)
16553 result
|= (*where
++ & 255);
16561 result
|= (where
[n
] & 255);
16568 /* MD interface: Sections. */
16570 /* Estimate the size of a frag before relaxing. Assume everything fits in
16574 md_estimate_size_before_relax (fragS
* fragp
,
16575 segT segtype ATTRIBUTE_UNUSED
)
16581 /* Convert a machine dependent frag. */
16584 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
16586 unsigned long insn
;
16587 unsigned long old_op
;
16595 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16597 old_op
= bfd_get_16(abfd
, buf
);
16598 if (fragp
->fr_symbol
) {
16599 exp
.X_op
= O_symbol
;
16600 exp
.X_add_symbol
= fragp
->fr_symbol
;
16602 exp
.X_op
= O_constant
;
16604 exp
.X_add_number
= fragp
->fr_offset
;
16605 opcode
= fragp
->fr_subtype
;
16608 case T_MNEM_ldr_pc
:
16609 case T_MNEM_ldr_pc2
:
16610 case T_MNEM_ldr_sp
:
16611 case T_MNEM_str_sp
:
16618 if (fragp
->fr_var
== 4)
16620 insn
= THUMB_OP32(opcode
);
16621 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
16623 insn
|= (old_op
& 0x700) << 4;
16627 insn
|= (old_op
& 7) << 12;
16628 insn
|= (old_op
& 0x38) << 13;
16630 insn
|= 0x00000c00;
16631 put_thumb32_insn (buf
, insn
);
16632 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
16636 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
16638 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
16641 if (fragp
->fr_var
== 4)
16643 insn
= THUMB_OP32 (opcode
);
16644 insn
|= (old_op
& 0xf0) << 4;
16645 put_thumb32_insn (buf
, insn
);
16646 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
16650 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16651 exp
.X_add_number
-= 4;
16659 if (fragp
->fr_var
== 4)
16661 int r0off
= (opcode
== T_MNEM_mov
16662 || opcode
== T_MNEM_movs
) ? 0 : 8;
16663 insn
= THUMB_OP32 (opcode
);
16664 insn
= (insn
& 0xe1ffffff) | 0x10000000;
16665 insn
|= (old_op
& 0x700) << r0off
;
16666 put_thumb32_insn (buf
, insn
);
16667 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16671 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
16676 if (fragp
->fr_var
== 4)
16678 insn
= THUMB_OP32(opcode
);
16679 put_thumb32_insn (buf
, insn
);
16680 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
16683 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
16687 if (fragp
->fr_var
== 4)
16689 insn
= THUMB_OP32(opcode
);
16690 insn
|= (old_op
& 0xf00) << 14;
16691 put_thumb32_insn (buf
, insn
);
16692 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
16695 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
16698 case T_MNEM_add_sp
:
16699 case T_MNEM_add_pc
:
16700 case T_MNEM_inc_sp
:
16701 case T_MNEM_dec_sp
:
16702 if (fragp
->fr_var
== 4)
16704 /* ??? Choose between add and addw. */
16705 insn
= THUMB_OP32 (opcode
);
16706 insn
|= (old_op
& 0xf0) << 4;
16707 put_thumb32_insn (buf
, insn
);
16708 if (opcode
== T_MNEM_add_pc
)
16709 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
16711 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16714 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16722 if (fragp
->fr_var
== 4)
16724 insn
= THUMB_OP32 (opcode
);
16725 insn
|= (old_op
& 0xf0) << 4;
16726 insn
|= (old_op
& 0xf) << 16;
16727 put_thumb32_insn (buf
, insn
);
16728 if (insn
& (1 << 20))
16729 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16731 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16734 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16740 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
16742 fixp
->fx_file
= fragp
->fr_file
;
16743 fixp
->fx_line
= fragp
->fr_line
;
16744 fragp
->fr_fix
+= fragp
->fr_var
;
16747 /* Return the size of a relaxable immediate operand instruction.
16748 SHIFT and SIZE specify the form of the allowable immediate. */
16750 relax_immediate (fragS
*fragp
, int size
, int shift
)
16756 /* ??? Should be able to do better than this. */
16757 if (fragp
->fr_symbol
)
16760 low
= (1 << shift
) - 1;
16761 mask
= (1 << (shift
+ size
)) - (1 << shift
);
16762 offset
= fragp
->fr_offset
;
16763 /* Force misaligned offsets to 32-bit variant. */
16766 if (offset
& ~mask
)
16771 /* Get the address of a symbol during relaxation. */
16773 relaxed_symbol_addr(fragS
*fragp
, long stretch
)
16779 sym
= fragp
->fr_symbol
;
16780 sym_frag
= symbol_get_frag (sym
);
16781 know (S_GET_SEGMENT (sym
) != absolute_section
16782 || sym_frag
== &zero_address_frag
);
16783 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
16785 /* If frag has yet to be reached on this pass, assume it will
16786 move by STRETCH just as we did. If this is not so, it will
16787 be because some frag between grows, and that will force
16791 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
16795 /* Adjust stretch for any alignment frag. Note that if have
16796 been expanding the earlier code, the symbol may be
16797 defined in what appears to be an earlier frag. FIXME:
16798 This doesn't handle the fr_subtype field, which specifies
16799 a maximum number of bytes to skip when doing an
16801 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
16803 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
16806 stretch
= - ((- stretch
)
16807 & ~ ((1 << (int) f
->fr_offset
) - 1));
16809 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
16821 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16824 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
16829 /* Assume worst case for symbols not known to be in the same section. */
16830 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16831 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16834 val
= relaxed_symbol_addr(fragp
, stretch
);
16835 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
16836 addr
= (addr
+ 4) & ~3;
16837 /* Force misaligned targets to 32-bit variant. */
16841 if (val
< 0 || val
> 1020)
16846 /* Return the size of a relaxable add/sub immediate instruction. */
16848 relax_addsub (fragS
*fragp
, asection
*sec
)
16853 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16854 op
= bfd_get_16(sec
->owner
, buf
);
16855 if ((op
& 0xf) == ((op
>> 4) & 0xf))
16856 return relax_immediate (fragp
, 8, 0);
16858 return relax_immediate (fragp
, 3, 0);
16862 /* Return the size of a relaxable branch instruction. BITS is the
16863 size of the offset field in the narrow instruction. */
16866 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
16872 /* Assume worst case for symbols not known to be in the same section. */
16873 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16874 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16877 val
= relaxed_symbol_addr(fragp
, stretch
);
16878 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
16881 /* Offset is a signed value *2 */
16883 if (val
>= limit
|| val
< -limit
)
16889 /* Relax a machine dependent frag. This returns the amount by which
16890 the current size of the frag should change. */
16893 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
16898 oldsize
= fragp
->fr_var
;
16899 switch (fragp
->fr_subtype
)
16901 case T_MNEM_ldr_pc2
:
16902 newsize
= relax_adr(fragp
, sec
, stretch
);
16904 case T_MNEM_ldr_pc
:
16905 case T_MNEM_ldr_sp
:
16906 case T_MNEM_str_sp
:
16907 newsize
= relax_immediate(fragp
, 8, 2);
16911 newsize
= relax_immediate(fragp
, 5, 2);
16915 newsize
= relax_immediate(fragp
, 5, 1);
16919 newsize
= relax_immediate(fragp
, 5, 0);
16922 newsize
= relax_adr(fragp
, sec
, stretch
);
16928 newsize
= relax_immediate(fragp
, 8, 0);
16931 newsize
= relax_branch(fragp
, sec
, 11, stretch
);
16934 newsize
= relax_branch(fragp
, sec
, 8, stretch
);
16936 case T_MNEM_add_sp
:
16937 case T_MNEM_add_pc
:
16938 newsize
= relax_immediate (fragp
, 8, 2);
16940 case T_MNEM_inc_sp
:
16941 case T_MNEM_dec_sp
:
16942 newsize
= relax_immediate (fragp
, 7, 2);
16948 newsize
= relax_addsub (fragp
, sec
);
16954 fragp
->fr_var
= newsize
;
16955 /* Freeze wide instructions that are at or before the same location as
16956 in the previous pass. This avoids infinite loops.
16957 Don't freeze them unconditionally because targets may be artificialy
16958 misaligned by the expansion of preceeding frags. */
16959 if (stretch
<= 0 && newsize
> 2)
16961 md_convert_frag (sec
->owner
, sec
, fragp
);
16965 return newsize
- oldsize
;
16968 /* Round up a section size to the appropriate boundary. */
16971 md_section_align (segT segment ATTRIBUTE_UNUSED
,
16974 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16975 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
16977 /* For a.out, force the section size to be aligned. If we don't do
16978 this, BFD will align it for us, but it will not write out the
16979 final bytes of the section. This may be a bug in BFD, but it is
16980 easier to fix it here since that is how the other a.out targets
16984 align
= bfd_get_section_alignment (stdoutput
, segment
);
16985 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
16992 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16993 of an rs_align_code fragment. */
16996 arm_handle_align (fragS
* fragP
)
16998 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16999 static char const thumb_noop
[2] = { 0xc0, 0x46 };
17000 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
17001 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
17003 int bytes
, fix
, noop_size
;
17007 if (fragP
->fr_type
!= rs_align_code
)
17010 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
17011 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
17014 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17015 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
17017 if (fragP
->tc_frag_data
)
17019 if (target_big_endian
)
17020 noop
= thumb_bigend_noop
;
17023 noop_size
= sizeof (thumb_noop
);
17027 if (target_big_endian
)
17028 noop
= arm_bigend_noop
;
17031 noop_size
= sizeof (arm_noop
);
17034 if (bytes
& (noop_size
- 1))
17036 fix
= bytes
& (noop_size
- 1);
17037 memset (p
, 0, fix
);
17042 while (bytes
>= noop_size
)
17044 memcpy (p
, noop
, noop_size
);
17046 bytes
-= noop_size
;
17050 fragP
->fr_fix
+= fix
;
17051 fragP
->fr_var
= noop_size
;
17054 /* Called from md_do_align. Used to create an alignment
17055 frag in a code section. */
17058 arm_frag_align_code (int n
, int max
)
17062 /* We assume that there will never be a requirement
17063 to support alignments greater than 32 bytes. */
17064 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17065 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17067 p
= frag_var (rs_align_code
,
17068 MAX_MEM_FOR_RS_ALIGN_CODE
,
17070 (relax_substateT
) max
,
17077 /* Perform target specific initialisation of a frag. */
17080 arm_init_frag (fragS
* fragP
)
17082 /* Record whether this frag is in an ARM or a THUMB area. */
17083 fragP
->tc_frag_data
= thumb_mode
;
17087 /* When we change sections we need to issue a new mapping symbol. */
17090 arm_elf_change_section (void)
17093 segment_info_type
*seginfo
;
17095 /* Link an unlinked unwind index table section to the .text section. */
17096 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
17097 && elf_linked_to_section (now_seg
) == NULL
)
17098 elf_linked_to_section (now_seg
) = text_section
;
17100 if (!SEG_NORMAL (now_seg
))
17103 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
17105 /* We can ignore sections that only contain debug info. */
17106 if ((flags
& SEC_ALLOC
) == 0)
17109 seginfo
= seg_info (now_seg
);
17110 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
17111 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
17115 arm_elf_section_type (const char * str
, size_t len
)
17117 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
17118 return SHT_ARM_EXIDX
;
17123 /* Code to deal with unwinding tables. */
17125 static void add_unwind_adjustsp (offsetT
);
17127 /* Cenerate and deferred unwind frame offset. */
17130 flush_pending_unwind (void)
17134 offset
= unwind
.pending_offset
;
17135 unwind
.pending_offset
= 0;
17137 add_unwind_adjustsp (offset
);
17140 /* Add an opcode to this list for this function. Two-byte opcodes should
17141 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17145 add_unwind_opcode (valueT op
, int length
)
17147 /* Add any deferred stack adjustment. */
17148 if (unwind
.pending_offset
)
17149 flush_pending_unwind ();
17151 unwind
.sp_restored
= 0;
17153 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
17155 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
17156 if (unwind
.opcodes
)
17157 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
17158 unwind
.opcode_alloc
);
17160 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
17165 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
17167 unwind
.opcode_count
++;
17171 /* Add unwind opcodes to adjust the stack pointer. */
17174 add_unwind_adjustsp (offsetT offset
)
17178 if (offset
> 0x200)
17180 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17185 /* Long form: 0xb2, uleb128. */
17186 /* This might not fit in a word so add the individual bytes,
17187 remembering the list is built in reverse order. */
17188 o
= (valueT
) ((offset
- 0x204) >> 2);
17190 add_unwind_opcode (0, 1);
17192 /* Calculate the uleb128 encoding of the offset. */
17196 bytes
[n
] = o
& 0x7f;
17202 /* Add the insn. */
17204 add_unwind_opcode (bytes
[n
- 1], 1);
17205 add_unwind_opcode (0xb2, 1);
17207 else if (offset
> 0x100)
17209 /* Two short opcodes. */
17210 add_unwind_opcode (0x3f, 1);
17211 op
= (offset
- 0x104) >> 2;
17212 add_unwind_opcode (op
, 1);
17214 else if (offset
> 0)
17216 /* Short opcode. */
17217 op
= (offset
- 4) >> 2;
17218 add_unwind_opcode (op
, 1);
17220 else if (offset
< 0)
17223 while (offset
> 0x100)
17225 add_unwind_opcode (0x7f, 1);
17228 op
= ((offset
- 4) >> 2) | 0x40;
17229 add_unwind_opcode (op
, 1);
17233 /* Finish the list of unwind opcodes for this function. */
17235 finish_unwind_opcodes (void)
17239 if (unwind
.fp_used
)
17241 /* Adjust sp as necessary. */
17242 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
17243 flush_pending_unwind ();
17245 /* After restoring sp from the frame pointer. */
17246 op
= 0x90 | unwind
.fp_reg
;
17247 add_unwind_opcode (op
, 1);
17250 flush_pending_unwind ();
17254 /* Start an exception table entry. If idx is nonzero this is an index table
17258 start_unwind_section (const segT text_seg
, int idx
)
17260 const char * text_name
;
17261 const char * prefix
;
17262 const char * prefix_once
;
17263 const char * group_name
;
17267 size_t sec_name_len
;
17274 prefix
= ELF_STRING_ARM_unwind
;
17275 prefix_once
= ELF_STRING_ARM_unwind_once
;
17276 type
= SHT_ARM_EXIDX
;
17280 prefix
= ELF_STRING_ARM_unwind_info
;
17281 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
17282 type
= SHT_PROGBITS
;
17285 text_name
= segment_name (text_seg
);
17286 if (streq (text_name
, ".text"))
17289 if (strncmp (text_name
, ".gnu.linkonce.t.",
17290 strlen (".gnu.linkonce.t.")) == 0)
17292 prefix
= prefix_once
;
17293 text_name
+= strlen (".gnu.linkonce.t.");
17296 prefix_len
= strlen (prefix
);
17297 text_len
= strlen (text_name
);
17298 sec_name_len
= prefix_len
+ text_len
;
17299 sec_name
= xmalloc (sec_name_len
+ 1);
17300 memcpy (sec_name
, prefix
, prefix_len
);
17301 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
17302 sec_name
[prefix_len
+ text_len
] = '\0';
17308 /* Handle COMDAT group. */
17309 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
17311 group_name
= elf_group_name (text_seg
);
17312 if (group_name
== NULL
)
17314 as_bad (_("Group section `%s' has no group signature"),
17315 segment_name (text_seg
));
17316 ignore_rest_of_line ();
17319 flags
|= SHF_GROUP
;
17323 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
17325 /* Set the setion link for index tables. */
17327 elf_linked_to_section (now_seg
) = text_seg
;
17331 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17332 personality routine data. Returns zero, or the index table value for
17333 and inline entry. */
17336 create_unwind_entry (int have_data
)
17341 /* The current word of data. */
17343 /* The number of bytes left in this word. */
17346 finish_unwind_opcodes ();
17348 /* Remember the current text section. */
17349 unwind
.saved_seg
= now_seg
;
17350 unwind
.saved_subseg
= now_subseg
;
17352 start_unwind_section (now_seg
, 0);
17354 if (unwind
.personality_routine
== NULL
)
17356 if (unwind
.personality_index
== -2)
17359 as_bad (_("handerdata in cantunwind frame"));
17360 return 1; /* EXIDX_CANTUNWIND. */
17363 /* Use a default personality routine if none is specified. */
17364 if (unwind
.personality_index
== -1)
17366 if (unwind
.opcode_count
> 3)
17367 unwind
.personality_index
= 1;
17369 unwind
.personality_index
= 0;
17372 /* Space for the personality routine entry. */
17373 if (unwind
.personality_index
== 0)
17375 if (unwind
.opcode_count
> 3)
17376 as_bad (_("too many unwind opcodes for personality routine 0"));
17380 /* All the data is inline in the index table. */
17383 while (unwind
.opcode_count
> 0)
17385 unwind
.opcode_count
--;
17386 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17390 /* Pad with "finish" opcodes. */
17392 data
= (data
<< 8) | 0xb0;
17399 /* We get two opcodes "free" in the first word. */
17400 size
= unwind
.opcode_count
- 2;
17403 /* An extra byte is required for the opcode count. */
17404 size
= unwind
.opcode_count
+ 1;
17406 size
= (size
+ 3) >> 2;
17408 as_bad (_("too many unwind opcodes"));
17410 frag_align (2, 0, 0);
17411 record_alignment (now_seg
, 2);
17412 unwind
.table_entry
= expr_build_dot ();
17414 /* Allocate the table entry. */
17415 ptr
= frag_more ((size
<< 2) + 4);
17416 where
= frag_now_fix () - ((size
<< 2) + 4);
17418 switch (unwind
.personality_index
)
17421 /* ??? Should this be a PLT generating relocation? */
17422 /* Custom personality routine. */
17423 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
17424 BFD_RELOC_ARM_PREL31
);
17429 /* Set the first byte to the number of additional words. */
17434 /* ABI defined personality routines. */
17436 /* Three opcodes bytes are packed into the first word. */
17443 /* The size and first two opcode bytes go in the first word. */
17444 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
17449 /* Should never happen. */
17453 /* Pack the opcodes into words (MSB first), reversing the list at the same
17455 while (unwind
.opcode_count
> 0)
17459 md_number_to_chars (ptr
, data
, 4);
17464 unwind
.opcode_count
--;
17466 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17469 /* Finish off the last word. */
17472 /* Pad with "finish" opcodes. */
17474 data
= (data
<< 8) | 0xb0;
17476 md_number_to_chars (ptr
, data
, 4);
17481 /* Add an empty descriptor if there is no user-specified data. */
17482 ptr
= frag_more (4);
17483 md_number_to_chars (ptr
, 0, 4);
17490 /* Initialize the DWARF-2 unwind information for this procedure. */
17493 tc_arm_frame_initial_instructions (void)
17495 cfi_add_CFA_def_cfa (REG_SP
, 0);
17497 #endif /* OBJ_ELF */
17499 /* Convert REGNAME to a DWARF-2 register number. */
17502 tc_arm_regname_to_dw2regnum (char *regname
)
17504 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
17514 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
17518 expr
.X_op
= O_secrel
;
17519 expr
.X_add_symbol
= symbol
;
17520 expr
.X_add_number
= 0;
17521 emit_expr (&expr
, size
);
17525 /* MD interface: Symbol and relocation handling. */
17527 /* Return the address within the segment that a PC-relative fixup is
17528 relative to. For ARM, PC-relative fixups applied to instructions
17529 are generally relative to the location of the fixup plus 8 bytes.
17530 Thumb branches are offset by 4, and Thumb loads relative to PC
17531 require special handling. */
17534 md_pcrel_from_section (fixS
* fixP
, segT seg
)
17536 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
17538 /* If this is pc-relative and we are going to emit a relocation
17539 then we just want to put out any pipeline compensation that the linker
17540 will need. Otherwise we want to use the calculated base.
17541 For WinCE we skip the bias for externals as well, since this
17542 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17544 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
17545 || (arm_force_relocation (fixP
)
17547 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
17552 switch (fixP
->fx_r_type
)
17554 /* PC relative addressing on the Thumb is slightly odd as the
17555 bottom two bits of the PC are forced to zero for the
17556 calculation. This happens *after* application of the
17557 pipeline offset. However, Thumb adrl already adjusts for
17558 this, so we need not do it again. */
17559 case BFD_RELOC_ARM_THUMB_ADD
:
17562 case BFD_RELOC_ARM_THUMB_OFFSET
:
17563 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17564 case BFD_RELOC_ARM_T32_ADD_PC12
:
17565 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17566 return (base
+ 4) & ~3;
17568 /* Thumb branches are simply offset by +4. */
17569 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
17570 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
17571 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
17572 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17573 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17574 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17575 case BFD_RELOC_THUMB_PCREL_BLX
:
17578 /* ARM mode branches are offset by +8. However, the Windows CE
17579 loader expects the relocation not to take this into account. */
17580 case BFD_RELOC_ARM_PCREL_BRANCH
:
17581 case BFD_RELOC_ARM_PCREL_CALL
:
17582 case BFD_RELOC_ARM_PCREL_JUMP
:
17583 case BFD_RELOC_ARM_PCREL_BLX
:
17584 case BFD_RELOC_ARM_PLT32
:
17586 /* When handling fixups immediately, because we have already
17587 discovered the value of a symbol, or the address of the frag involved
17588 we must account for the offset by +8, as the OS loader will never see the reloc.
17589 see fixup_segment() in write.c
17590 The S_IS_EXTERNAL test handles the case of global symbols.
17591 Those need the calculated base, not just the pipe compensation the linker will need. */
17593 && fixP
->fx_addsy
!= NULL
17594 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
17595 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
17602 /* ARM mode loads relative to PC are also offset by +8. Unlike
17603 branches, the Windows CE loader *does* expect the relocation
17604 to take this into account. */
17605 case BFD_RELOC_ARM_OFFSET_IMM
:
17606 case BFD_RELOC_ARM_OFFSET_IMM8
:
17607 case BFD_RELOC_ARM_HWLITERAL
:
17608 case BFD_RELOC_ARM_LITERAL
:
17609 case BFD_RELOC_ARM_CP_OFF_IMM
:
17613 /* Other PC-relative relocations are un-offset. */
17619 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17620 Otherwise we have no need to default values of symbols. */
17623 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
17626 if (name
[0] == '_' && name
[1] == 'G'
17627 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
17631 if (symbol_find (name
))
17632 as_bad (_("GOT already in the symbol table"));
17634 GOT_symbol
= symbol_new (name
, undefined_section
,
17635 (valueT
) 0, & zero_address_frag
);
17645 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17646 computed as two separate immediate values, added together. We
17647 already know that this value cannot be computed by just one ARM
17650 static unsigned int
17651 validate_immediate_twopart (unsigned int val
,
17652 unsigned int * highpart
)
17657 for (i
= 0; i
< 32; i
+= 2)
17658 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
17664 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
17666 else if (a
& 0xff0000)
17668 if (a
& 0xff000000)
17670 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
17674 assert (a
& 0xff000000);
17675 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
17678 return (a
& 0xff) | (i
<< 7);
17685 validate_offset_imm (unsigned int val
, int hwse
)
17687 if ((hwse
&& val
> 255) || val
> 4095)
17692 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17693 negative immediate constant by altering the instruction. A bit of
17698 by inverting the second operand, and
17701 by negating the second operand. */
17704 negate_data_op (unsigned long * instruction
,
17705 unsigned long value
)
17708 unsigned long negated
, inverted
;
17710 negated
= encode_arm_immediate (-value
);
17711 inverted
= encode_arm_immediate (~value
);
17713 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
17716 /* First negates. */
17717 case OPCODE_SUB
: /* ADD <-> SUB */
17718 new_inst
= OPCODE_ADD
;
17723 new_inst
= OPCODE_SUB
;
17727 case OPCODE_CMP
: /* CMP <-> CMN */
17728 new_inst
= OPCODE_CMN
;
17733 new_inst
= OPCODE_CMP
;
17737 /* Now Inverted ops. */
17738 case OPCODE_MOV
: /* MOV <-> MVN */
17739 new_inst
= OPCODE_MVN
;
17744 new_inst
= OPCODE_MOV
;
17748 case OPCODE_AND
: /* AND <-> BIC */
17749 new_inst
= OPCODE_BIC
;
17754 new_inst
= OPCODE_AND
;
17758 case OPCODE_ADC
: /* ADC <-> SBC */
17759 new_inst
= OPCODE_SBC
;
17764 new_inst
= OPCODE_ADC
;
17768 /* We cannot do anything. */
17773 if (value
== (unsigned) FAIL
)
17776 *instruction
&= OPCODE_MASK
;
17777 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
17781 /* Like negate_data_op, but for Thumb-2. */
17783 static unsigned int
17784 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
17788 unsigned int negated
, inverted
;
17790 negated
= encode_thumb32_immediate (-value
);
17791 inverted
= encode_thumb32_immediate (~value
);
17793 rd
= (*instruction
>> 8) & 0xf;
17794 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
17797 /* ADD <-> SUB. Includes CMP <-> CMN. */
17798 case T2_OPCODE_SUB
:
17799 new_inst
= T2_OPCODE_ADD
;
17803 case T2_OPCODE_ADD
:
17804 new_inst
= T2_OPCODE_SUB
;
17808 /* ORR <-> ORN. Includes MOV <-> MVN. */
17809 case T2_OPCODE_ORR
:
17810 new_inst
= T2_OPCODE_ORN
;
17814 case T2_OPCODE_ORN
:
17815 new_inst
= T2_OPCODE_ORR
;
17819 /* AND <-> BIC. TST has no inverted equivalent. */
17820 case T2_OPCODE_AND
:
17821 new_inst
= T2_OPCODE_BIC
;
17828 case T2_OPCODE_BIC
:
17829 new_inst
= T2_OPCODE_AND
;
17834 case T2_OPCODE_ADC
:
17835 new_inst
= T2_OPCODE_SBC
;
17839 case T2_OPCODE_SBC
:
17840 new_inst
= T2_OPCODE_ADC
;
17844 /* We cannot do anything. */
17849 if (value
== (unsigned int)FAIL
)
17852 *instruction
&= T2_OPCODE_MASK
;
17853 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
17857 /* Read a 32-bit thumb instruction from buf. */
17858 static unsigned long
17859 get_thumb32_insn (char * buf
)
17861 unsigned long insn
;
17862 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
17863 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17869 /* We usually want to set the low bit on the address of thumb function
17870 symbols. In particular .word foo - . should have the low bit set.
17871 Generic code tries to fold the difference of two symbols to
17872 a constant. Prevent this and force a relocation when the first symbols
17873 is a thumb function. */
17875 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
17877 if (op
== O_subtract
17878 && l
->X_op
== O_symbol
17879 && r
->X_op
== O_symbol
17880 && THUMB_IS_FUNC (l
->X_add_symbol
))
17882 l
->X_op
= O_subtract
;
17883 l
->X_op_symbol
= r
->X_add_symbol
;
17884 l
->X_add_number
-= r
->X_add_number
;
17887 /* Process as normal. */
17892 md_apply_fix (fixS
* fixP
,
17896 offsetT value
= * valP
;
17898 unsigned int newimm
;
17899 unsigned long temp
;
17901 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
17903 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
17905 /* Note whether this will delete the relocation. */
17907 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
17910 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17911 consistency with the behavior on 32-bit hosts. Remember value
17913 value
&= 0xffffffff;
17914 value
^= 0x80000000;
17915 value
-= 0x80000000;
17918 fixP
->fx_addnumber
= value
;
17920 /* Same treatment for fixP->fx_offset. */
17921 fixP
->fx_offset
&= 0xffffffff;
17922 fixP
->fx_offset
^= 0x80000000;
17923 fixP
->fx_offset
-= 0x80000000;
17925 switch (fixP
->fx_r_type
)
17927 case BFD_RELOC_NONE
:
17928 /* This will need to go in the object file. */
17932 case BFD_RELOC_ARM_IMMEDIATE
:
17933 /* We claim that this fixup has been processed here,
17934 even if in fact we generate an error because we do
17935 not have a reloc for it, so tc_gen_reloc will reject it. */
17939 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17941 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17942 _("undefined symbol %s used as an immediate value"),
17943 S_GET_NAME (fixP
->fx_addsy
));
17947 newimm
= encode_arm_immediate (value
);
17948 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17950 /* If the instruction will fail, see if we can fix things up by
17951 changing the opcode. */
17952 if (newimm
== (unsigned int) FAIL
17953 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
17955 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17956 _("invalid constant (%lx) after fixup"),
17957 (unsigned long) value
);
17961 newimm
|= (temp
& 0xfffff000);
17962 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17965 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
17967 unsigned int highpart
= 0;
17968 unsigned int newinsn
= 0xe1a00000; /* nop. */
17970 newimm
= encode_arm_immediate (value
);
17971 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17973 /* If the instruction will fail, see if we can fix things up by
17974 changing the opcode. */
17975 if (newimm
== (unsigned int) FAIL
17976 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
17978 /* No ? OK - try using two ADD instructions to generate
17980 newimm
= validate_immediate_twopart (value
, & highpart
);
17982 /* Yes - then make sure that the second instruction is
17984 if (newimm
!= (unsigned int) FAIL
)
17986 /* Still No ? Try using a negated value. */
17987 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
17988 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
17989 /* Otherwise - give up. */
17992 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17993 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17998 /* Replace the first operand in the 2nd instruction (which
17999 is the PC) with the destination register. We have
18000 already added in the PC in the first instruction and we
18001 do not want to do it again. */
18002 newinsn
&= ~ 0xf0000;
18003 newinsn
|= ((newinsn
& 0x0f000) << 4);
18006 newimm
|= (temp
& 0xfffff000);
18007 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
18009 highpart
|= (newinsn
& 0xfffff000);
18010 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
18014 case BFD_RELOC_ARM_OFFSET_IMM
:
18015 if (!fixP
->fx_done
&& seg
->use_rela_p
)
18018 case BFD_RELOC_ARM_LITERAL
:
18024 if (validate_offset_imm (value
, 0) == FAIL
)
18026 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
18027 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18028 _("invalid literal constant: pool needs to be closer"));
18030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18031 _("bad immediate value for offset (%ld)"),
18036 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18037 newval
&= 0xff7ff000;
18038 newval
|= value
| (sign
? INDEX_UP
: 0);
18039 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18042 case BFD_RELOC_ARM_OFFSET_IMM8
:
18043 case BFD_RELOC_ARM_HWLITERAL
:
18049 if (validate_offset_imm (value
, 1) == FAIL
)
18051 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
18052 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18053 _("invalid literal constant: pool needs to be closer"));
18055 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18060 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18061 newval
&= 0xff7ff0f0;
18062 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
18063 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18066 case BFD_RELOC_ARM_T32_OFFSET_U8
:
18067 if (value
< 0 || value
> 1020 || value
% 4 != 0)
18068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18069 _("bad immediate value for offset (%ld)"), (long) value
);
18072 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
18074 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
18077 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
18078 /* This is a complicated relocation used for all varieties of Thumb32
18079 load/store instruction with immediate offset:
18081 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18082 *4, optional writeback(W)
18083 (doubleword load/store)
18085 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18086 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18087 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18088 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18089 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18091 Uppercase letters indicate bits that are already encoded at
18092 this point. Lowercase letters are our problem. For the
18093 second block of instructions, the secondary opcode nybble
18094 (bits 8..11) is present, and bit 23 is zero, even if this is
18095 a PC-relative operation. */
18096 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18098 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
18100 if ((newval
& 0xf0000000) == 0xe0000000)
18102 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18104 newval
|= (1 << 23);
18107 if (value
% 4 != 0)
18109 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18110 _("offset not a multiple of 4"));
18116 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18117 _("offset out of range"));
18122 else if ((newval
& 0x000f0000) == 0x000f0000)
18124 /* PC-relative, 12-bit offset. */
18126 newval
|= (1 << 23);
18131 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18132 _("offset out of range"));
18137 else if ((newval
& 0x00000100) == 0x00000100)
18139 /* Writeback: 8-bit, +/- offset. */
18141 newval
|= (1 << 9);
18146 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18147 _("offset out of range"));
18152 else if ((newval
& 0x00000f00) == 0x00000e00)
18154 /* T-instruction: positive 8-bit offset. */
18155 if (value
< 0 || value
> 0xff)
18157 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18158 _("offset out of range"));
18166 /* Positive 12-bit or negative 8-bit offset. */
18170 newval
|= (1 << 23);
18180 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18181 _("offset out of range"));
18188 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
18189 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
18192 case BFD_RELOC_ARM_SHIFT_IMM
:
18193 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18194 if (((unsigned long) value
) > 32
18196 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
18198 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18199 _("shift expression is too large"));
18204 /* Shifts of zero must be done as lsl. */
18206 else if (value
== 32)
18208 newval
&= 0xfffff07f;
18209 newval
|= (value
& 0x1f) << 7;
18210 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18213 case BFD_RELOC_ARM_T32_IMMEDIATE
:
18214 case BFD_RELOC_ARM_T32_ADD_IMM
:
18215 case BFD_RELOC_ARM_T32_IMM12
:
18216 case BFD_RELOC_ARM_T32_ADD_PC12
:
18217 /* We claim that this fixup has been processed here,
18218 even if in fact we generate an error because we do
18219 not have a reloc for it, so tc_gen_reloc will reject it. */
18223 && ! S_IS_DEFINED (fixP
->fx_addsy
))
18225 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18226 _("undefined symbol %s used as an immediate value"),
18227 S_GET_NAME (fixP
->fx_addsy
));
18231 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18233 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
18236 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18237 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18239 newimm
= encode_thumb32_immediate (value
);
18240 if (newimm
== (unsigned int) FAIL
)
18241 newimm
= thumb32_negate_data_op (&newval
, value
);
18243 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
18244 && newimm
== (unsigned int) FAIL
)
18246 /* Turn add/sum into addw/subw. */
18247 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18248 newval
= (newval
& 0xfeffffff) | 0x02000000;
18250 /* 12 bit immediate for addw/subw. */
18254 newval
^= 0x00a00000;
18257 newimm
= (unsigned int) FAIL
;
18262 if (newimm
== (unsigned int)FAIL
)
18264 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18265 _("invalid constant (%lx) after fixup"),
18266 (unsigned long) value
);
18270 newval
|= (newimm
& 0x800) << 15;
18271 newval
|= (newimm
& 0x700) << 4;
18272 newval
|= (newimm
& 0x0ff);
18274 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
18275 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
18278 case BFD_RELOC_ARM_SMC
:
18279 if (((unsigned long) value
) > 0xffff)
18280 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18281 _("invalid smc expression"));
18282 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18283 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
18284 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18287 case BFD_RELOC_ARM_SWI
:
18288 if (fixP
->tc_fix_data
!= 0)
18290 if (((unsigned long) value
) > 0xff)
18291 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18292 _("invalid swi expression"));
18293 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18295 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18299 if (((unsigned long) value
) > 0x00ffffff)
18300 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18301 _("invalid swi expression"));
18302 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18304 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18308 case BFD_RELOC_ARM_MULTI
:
18309 if (((unsigned long) value
) > 0xffff)
18310 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18311 _("invalid expression in load/store multiple"));
18312 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
18313 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18317 case BFD_RELOC_ARM_PCREL_CALL
:
18318 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18319 if ((newval
& 0xf0000000) == 0xf0000000)
18323 goto arm_branch_common
;
18325 case BFD_RELOC_ARM_PCREL_JUMP
:
18326 case BFD_RELOC_ARM_PLT32
:
18328 case BFD_RELOC_ARM_PCREL_BRANCH
:
18330 goto arm_branch_common
;
18332 case BFD_RELOC_ARM_PCREL_BLX
:
18335 /* We are going to store value (shifted right by two) in the
18336 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18337 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18338 also be be clear. */
18340 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18341 _("misaligned branch destination"));
18342 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
18343 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
18344 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18345 _("branch out of range"));
18347 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18349 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18350 newval
|= (value
>> 2) & 0x00ffffff;
18351 /* Set the H bit on BLX instructions. */
18355 newval
|= 0x01000000;
18357 newval
&= ~0x01000000;
18359 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18363 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
18364 /* CBZ can only branch forward. */
18366 /* Attempts to use CBZ to branch to the next instruction
18367 (which, strictly speaking, are prohibited) will be turned into
18370 FIXME: It may be better to remove the instruction completely and
18371 perform relaxation. */
18374 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18375 newval
= 0xbf00; /* NOP encoding T1 */
18376 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18381 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18382 _("branch out of range"));
18384 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18386 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18387 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
18388 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18393 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
18394 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
18395 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18396 _("branch out of range"));
18398 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18400 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18401 newval
|= (value
& 0x1ff) >> 1;
18402 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18406 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
18407 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
18408 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18409 _("branch out of range"));
18411 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18413 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18414 newval
|= (value
& 0xfff) >> 1;
18415 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18419 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18420 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
18421 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18422 _("conditional branch out of range"));
18424 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18427 addressT S
, J1
, J2
, lo
, hi
;
18429 S
= (value
& 0x00100000) >> 20;
18430 J2
= (value
& 0x00080000) >> 19;
18431 J1
= (value
& 0x00040000) >> 18;
18432 hi
= (value
& 0x0003f000) >> 12;
18433 lo
= (value
& 0x00000ffe) >> 1;
18435 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18436 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18437 newval
|= (S
<< 10) | hi
;
18438 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
18439 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18440 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18444 case BFD_RELOC_THUMB_PCREL_BLX
:
18445 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18446 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
18447 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18448 _("branch out of range"));
18450 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
18451 /* For a BLX instruction, make sure that the relocation is rounded up
18452 to a word boundary. This follows the semantics of the instruction
18453 which specifies that bit 1 of the target address will come from bit
18454 1 of the base address. */
18455 value
= (value
+ 1) & ~ 1;
18457 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18461 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18462 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18463 newval
|= (value
& 0x7fffff) >> 12;
18464 newval2
|= (value
& 0xfff) >> 1;
18465 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18466 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18470 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18471 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
18472 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18473 _("branch out of range"));
18475 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18478 addressT S
, I1
, I2
, lo
, hi
;
18480 S
= (value
& 0x01000000) >> 24;
18481 I1
= (value
& 0x00800000) >> 23;
18482 I2
= (value
& 0x00400000) >> 22;
18483 hi
= (value
& 0x003ff000) >> 12;
18484 lo
= (value
& 0x00000ffe) >> 1;
18489 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18490 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18491 newval
|= (S
<< 10) | hi
;
18492 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
18493 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18494 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18499 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18500 md_number_to_chars (buf
, value
, 1);
18504 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18505 md_number_to_chars (buf
, value
, 2);
18509 case BFD_RELOC_ARM_TLS_GD32
:
18510 case BFD_RELOC_ARM_TLS_LE32
:
18511 case BFD_RELOC_ARM_TLS_IE32
:
18512 case BFD_RELOC_ARM_TLS_LDM32
:
18513 case BFD_RELOC_ARM_TLS_LDO32
:
18514 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
18517 case BFD_RELOC_ARM_GOT32
:
18518 case BFD_RELOC_ARM_GOTOFF
:
18519 case BFD_RELOC_ARM_TARGET2
:
18520 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18521 md_number_to_chars (buf
, 0, 4);
18525 case BFD_RELOC_RVA
:
18527 case BFD_RELOC_ARM_TARGET1
:
18528 case BFD_RELOC_ARM_ROSEGREL32
:
18529 case BFD_RELOC_ARM_SBREL32
:
18530 case BFD_RELOC_32_PCREL
:
18532 case BFD_RELOC_32_SECREL
:
18534 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18536 /* For WinCE we only do this for pcrel fixups. */
18537 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
18539 md_number_to_chars (buf
, value
, 4);
18543 case BFD_RELOC_ARM_PREL31
:
18544 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18546 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
18547 if ((value
^ (value
>> 1)) & 0x40000000)
18549 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18550 _("rel31 relocation overflow"));
18552 newval
|= value
& 0x7fffffff;
18553 md_number_to_chars (buf
, newval
, 4);
18558 case BFD_RELOC_ARM_CP_OFF_IMM
:
18559 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
18560 if (value
< -1023 || value
> 1023 || (value
& 3))
18561 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18562 _("co-processor offset out of range"));
18567 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18568 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18569 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18571 newval
= get_thumb32_insn (buf
);
18572 newval
&= 0xff7fff00;
18573 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
18574 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18575 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18576 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18578 put_thumb32_insn (buf
, newval
);
18581 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
18582 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
18583 if (value
< -255 || value
> 255)
18584 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18585 _("co-processor offset out of range"));
18587 goto cp_off_common
;
18589 case BFD_RELOC_ARM_THUMB_OFFSET
:
18590 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18591 /* Exactly what ranges, and where the offset is inserted depends
18592 on the type of instruction, we can establish this from the
18594 switch (newval
>> 12)
18596 case 4: /* PC load. */
18597 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18598 forced to zero for these loads; md_pcrel_from has already
18599 compensated for this. */
18601 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18602 _("invalid offset, target not word aligned (0x%08lX)"),
18603 (((unsigned long) fixP
->fx_frag
->fr_address
18604 + (unsigned long) fixP
->fx_where
) & ~3)
18605 + (unsigned long) value
);
18607 if (value
& ~0x3fc)
18608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18609 _("invalid offset, value too big (0x%08lX)"),
18612 newval
|= value
>> 2;
18615 case 9: /* SP load/store. */
18616 if (value
& ~0x3fc)
18617 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18618 _("invalid offset, value too big (0x%08lX)"),
18620 newval
|= value
>> 2;
18623 case 6: /* Word load/store. */
18625 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18626 _("invalid offset, value too big (0x%08lX)"),
18628 newval
|= value
<< 4; /* 6 - 2. */
18631 case 7: /* Byte load/store. */
18633 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18634 _("invalid offset, value too big (0x%08lX)"),
18636 newval
|= value
<< 6;
18639 case 8: /* Halfword load/store. */
18641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18642 _("invalid offset, value too big (0x%08lX)"),
18644 newval
|= value
<< 5; /* 6 - 1. */
18648 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18649 "Unable to process relocation for thumb opcode: %lx",
18650 (unsigned long) newval
);
18653 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18656 case BFD_RELOC_ARM_THUMB_ADD
:
18657 /* This is a complicated relocation, since we use it for all of
18658 the following immediate relocations:
18662 9bit ADD/SUB SP word-aligned
18663 10bit ADD PC/SP word-aligned
18665 The type of instruction being processed is encoded in the
18672 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18674 int rd
= (newval
>> 4) & 0xf;
18675 int rs
= newval
& 0xf;
18676 int subtract
= !!(newval
& 0x8000);
18678 /* Check for HI regs, only very restricted cases allowed:
18679 Adjusting SP, and using PC or SP to get an address. */
18680 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
18681 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
18682 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18683 _("invalid Hi register with immediate"));
18685 /* If value is negative, choose the opposite instruction. */
18689 subtract
= !subtract
;
18691 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18692 _("immediate value out of range"));
18697 if (value
& ~0x1fc)
18698 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18699 _("invalid immediate for stack address calculation"));
18700 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
18701 newval
|= value
>> 2;
18703 else if (rs
== REG_PC
|| rs
== REG_SP
)
18705 if (subtract
|| value
& ~0x3fc)
18706 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18707 _("invalid immediate for address calculation (value = 0x%08lX)"),
18708 (unsigned long) value
);
18709 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
18711 newval
|= value
>> 2;
18716 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18717 _("immediate value out of range"));
18718 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
18719 newval
|= (rd
<< 8) | value
;
18724 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18725 _("immediate value out of range"));
18726 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
18727 newval
|= rd
| (rs
<< 3) | (value
<< 6);
18730 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18733 case BFD_RELOC_ARM_THUMB_IMM
:
18734 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18735 if (value
< 0 || value
> 255)
18736 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18737 _("invalid immediate: %ld is out of range"),
18740 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18743 case BFD_RELOC_ARM_THUMB_SHIFT
:
18744 /* 5bit shift value (0..32). LSL cannot take 32. */
18745 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
18746 temp
= newval
& 0xf800;
18747 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
18748 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18749 _("invalid shift value: %ld"), (long) value
);
18750 /* Shifts of zero must be encoded as LSL. */
18752 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
18753 /* Shifts of 32 are encoded as zero. */
18754 else if (value
== 32)
18756 newval
|= value
<< 6;
18757 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18760 case BFD_RELOC_VTABLE_INHERIT
:
18761 case BFD_RELOC_VTABLE_ENTRY
:
18765 case BFD_RELOC_ARM_MOVW
:
18766 case BFD_RELOC_ARM_MOVT
:
18767 case BFD_RELOC_ARM_THUMB_MOVW
:
18768 case BFD_RELOC_ARM_THUMB_MOVT
:
18769 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18771 /* REL format relocations are limited to a 16-bit addend. */
18772 if (!fixP
->fx_done
)
18774 if (value
< -0x1000 || value
> 0xffff)
18775 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18776 _("offset out of range"));
18778 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
18779 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18784 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
18785 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18787 newval
= get_thumb32_insn (buf
);
18788 newval
&= 0xfbf08f00;
18789 newval
|= (value
& 0xf000) << 4;
18790 newval
|= (value
& 0x0800) << 15;
18791 newval
|= (value
& 0x0700) << 4;
18792 newval
|= (value
& 0x00ff);
18793 put_thumb32_insn (buf
, newval
);
18797 newval
= md_chars_to_number (buf
, 4);
18798 newval
&= 0xfff0f000;
18799 newval
|= value
& 0x0fff;
18800 newval
|= (value
& 0xf000) << 4;
18801 md_number_to_chars (buf
, newval
, 4);
18806 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18807 case BFD_RELOC_ARM_ALU_PC_G0
:
18808 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18809 case BFD_RELOC_ARM_ALU_PC_G1
:
18810 case BFD_RELOC_ARM_ALU_PC_G2
:
18811 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18812 case BFD_RELOC_ARM_ALU_SB_G0
:
18813 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18814 case BFD_RELOC_ARM_ALU_SB_G1
:
18815 case BFD_RELOC_ARM_ALU_SB_G2
:
18816 assert (!fixP
->fx_done
);
18817 if (!seg
->use_rela_p
)
18820 bfd_vma encoded_addend
;
18821 bfd_vma addend_abs
= abs (value
);
18823 /* Check that the absolute value of the addend can be
18824 expressed as an 8-bit constant plus a rotation. */
18825 encoded_addend
= encode_arm_immediate (addend_abs
);
18826 if (encoded_addend
== (unsigned int) FAIL
)
18827 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18828 _("the offset 0x%08lX is not representable"),
18829 (unsigned long) addend_abs
);
18831 /* Extract the instruction. */
18832 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18834 /* If the addend is positive, use an ADD instruction.
18835 Otherwise use a SUB. Take care not to destroy the S bit. */
18836 insn
&= 0xff1fffff;
18842 /* Place the encoded addend into the first 12 bits of the
18844 insn
&= 0xfffff000;
18845 insn
|= encoded_addend
;
18847 /* Update the instruction. */
18848 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18852 case BFD_RELOC_ARM_LDR_PC_G0
:
18853 case BFD_RELOC_ARM_LDR_PC_G1
:
18854 case BFD_RELOC_ARM_LDR_PC_G2
:
18855 case BFD_RELOC_ARM_LDR_SB_G0
:
18856 case BFD_RELOC_ARM_LDR_SB_G1
:
18857 case BFD_RELOC_ARM_LDR_SB_G2
:
18858 assert (!fixP
->fx_done
);
18859 if (!seg
->use_rela_p
)
18862 bfd_vma addend_abs
= abs (value
);
18864 /* Check that the absolute value of the addend can be
18865 encoded in 12 bits. */
18866 if (addend_abs
>= 0x1000)
18867 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18868 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18869 (unsigned long) addend_abs
);
18871 /* Extract the instruction. */
18872 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18874 /* If the addend is negative, clear bit 23 of the instruction.
18875 Otherwise set it. */
18877 insn
&= ~(1 << 23);
18881 /* Place the absolute value of the addend into the first 12 bits
18882 of the instruction. */
18883 insn
&= 0xfffff000;
18884 insn
|= addend_abs
;
18886 /* Update the instruction. */
18887 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18891 case BFD_RELOC_ARM_LDRS_PC_G0
:
18892 case BFD_RELOC_ARM_LDRS_PC_G1
:
18893 case BFD_RELOC_ARM_LDRS_PC_G2
:
18894 case BFD_RELOC_ARM_LDRS_SB_G0
:
18895 case BFD_RELOC_ARM_LDRS_SB_G1
:
18896 case BFD_RELOC_ARM_LDRS_SB_G2
:
18897 assert (!fixP
->fx_done
);
18898 if (!seg
->use_rela_p
)
18901 bfd_vma addend_abs
= abs (value
);
18903 /* Check that the absolute value of the addend can be
18904 encoded in 8 bits. */
18905 if (addend_abs
>= 0x100)
18906 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18907 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18908 (unsigned long) addend_abs
);
18910 /* Extract the instruction. */
18911 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18913 /* If the addend is negative, clear bit 23 of the instruction.
18914 Otherwise set it. */
18916 insn
&= ~(1 << 23);
18920 /* Place the first four bits of the absolute value of the addend
18921 into the first 4 bits of the instruction, and the remaining
18922 four into bits 8 .. 11. */
18923 insn
&= 0xfffff0f0;
18924 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
18926 /* Update the instruction. */
18927 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18931 case BFD_RELOC_ARM_LDC_PC_G0
:
18932 case BFD_RELOC_ARM_LDC_PC_G1
:
18933 case BFD_RELOC_ARM_LDC_PC_G2
:
18934 case BFD_RELOC_ARM_LDC_SB_G0
:
18935 case BFD_RELOC_ARM_LDC_SB_G1
:
18936 case BFD_RELOC_ARM_LDC_SB_G2
:
18937 assert (!fixP
->fx_done
);
18938 if (!seg
->use_rela_p
)
18941 bfd_vma addend_abs
= abs (value
);
18943 /* Check that the absolute value of the addend is a multiple of
18944 four and, when divided by four, fits in 8 bits. */
18945 if (addend_abs
& 0x3)
18946 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18947 _("bad offset 0x%08lX (must be word-aligned)"),
18948 (unsigned long) addend_abs
);
18950 if ((addend_abs
>> 2) > 0xff)
18951 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18952 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18953 (unsigned long) addend_abs
);
18955 /* Extract the instruction. */
18956 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18958 /* If the addend is negative, clear bit 23 of the instruction.
18959 Otherwise set it. */
18961 insn
&= ~(1 << 23);
18965 /* Place the addend (divided by four) into the first eight
18966 bits of the instruction. */
18967 insn
&= 0xfffffff0;
18968 insn
|= addend_abs
>> 2;
18970 /* Update the instruction. */
18971 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18975 case BFD_RELOC_UNUSED
:
18977 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18978 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
18982 /* Translate internal representation of relocation info to BFD target
18986 tc_gen_reloc (asection
*section
, fixS
*fixp
)
18989 bfd_reloc_code_real_type code
;
18991 reloc
= xmalloc (sizeof (arelent
));
18993 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
18994 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
18995 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
18997 if (fixp
->fx_pcrel
)
18999 if (section
->use_rela_p
)
19000 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
19002 fixp
->fx_offset
= reloc
->address
;
19004 reloc
->addend
= fixp
->fx_offset
;
19006 switch (fixp
->fx_r_type
)
19009 if (fixp
->fx_pcrel
)
19011 code
= BFD_RELOC_8_PCREL
;
19016 if (fixp
->fx_pcrel
)
19018 code
= BFD_RELOC_16_PCREL
;
19023 if (fixp
->fx_pcrel
)
19025 code
= BFD_RELOC_32_PCREL
;
19029 case BFD_RELOC_ARM_MOVW
:
19030 if (fixp
->fx_pcrel
)
19032 code
= BFD_RELOC_ARM_MOVW_PCREL
;
19036 case BFD_RELOC_ARM_MOVT
:
19037 if (fixp
->fx_pcrel
)
19039 code
= BFD_RELOC_ARM_MOVT_PCREL
;
19043 case BFD_RELOC_ARM_THUMB_MOVW
:
19044 if (fixp
->fx_pcrel
)
19046 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
19050 case BFD_RELOC_ARM_THUMB_MOVT
:
19051 if (fixp
->fx_pcrel
)
19053 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
19057 case BFD_RELOC_NONE
:
19058 case BFD_RELOC_ARM_PCREL_BRANCH
:
19059 case BFD_RELOC_ARM_PCREL_BLX
:
19060 case BFD_RELOC_RVA
:
19061 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19062 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19063 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19064 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19065 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19066 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19067 case BFD_RELOC_THUMB_PCREL_BLX
:
19068 case BFD_RELOC_VTABLE_ENTRY
:
19069 case BFD_RELOC_VTABLE_INHERIT
:
19071 case BFD_RELOC_32_SECREL
:
19073 code
= fixp
->fx_r_type
;
19076 case BFD_RELOC_ARM_LITERAL
:
19077 case BFD_RELOC_ARM_HWLITERAL
:
19078 /* If this is called then the a literal has
19079 been referenced across a section boundary. */
19080 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19081 _("literal referenced across section boundary"));
19085 case BFD_RELOC_ARM_GOT32
:
19086 case BFD_RELOC_ARM_GOTOFF
:
19087 case BFD_RELOC_ARM_PLT32
:
19088 case BFD_RELOC_ARM_TARGET1
:
19089 case BFD_RELOC_ARM_ROSEGREL32
:
19090 case BFD_RELOC_ARM_SBREL32
:
19091 case BFD_RELOC_ARM_PREL31
:
19092 case BFD_RELOC_ARM_TARGET2
:
19093 case BFD_RELOC_ARM_TLS_LE32
:
19094 case BFD_RELOC_ARM_TLS_LDO32
:
19095 case BFD_RELOC_ARM_PCREL_CALL
:
19096 case BFD_RELOC_ARM_PCREL_JUMP
:
19097 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
19098 case BFD_RELOC_ARM_ALU_PC_G0
:
19099 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
19100 case BFD_RELOC_ARM_ALU_PC_G1
:
19101 case BFD_RELOC_ARM_ALU_PC_G2
:
19102 case BFD_RELOC_ARM_LDR_PC_G0
:
19103 case BFD_RELOC_ARM_LDR_PC_G1
:
19104 case BFD_RELOC_ARM_LDR_PC_G2
:
19105 case BFD_RELOC_ARM_LDRS_PC_G0
:
19106 case BFD_RELOC_ARM_LDRS_PC_G1
:
19107 case BFD_RELOC_ARM_LDRS_PC_G2
:
19108 case BFD_RELOC_ARM_LDC_PC_G0
:
19109 case BFD_RELOC_ARM_LDC_PC_G1
:
19110 case BFD_RELOC_ARM_LDC_PC_G2
:
19111 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
19112 case BFD_RELOC_ARM_ALU_SB_G0
:
19113 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
19114 case BFD_RELOC_ARM_ALU_SB_G1
:
19115 case BFD_RELOC_ARM_ALU_SB_G2
:
19116 case BFD_RELOC_ARM_LDR_SB_G0
:
19117 case BFD_RELOC_ARM_LDR_SB_G1
:
19118 case BFD_RELOC_ARM_LDR_SB_G2
:
19119 case BFD_RELOC_ARM_LDRS_SB_G0
:
19120 case BFD_RELOC_ARM_LDRS_SB_G1
:
19121 case BFD_RELOC_ARM_LDRS_SB_G2
:
19122 case BFD_RELOC_ARM_LDC_SB_G0
:
19123 case BFD_RELOC_ARM_LDC_SB_G1
:
19124 case BFD_RELOC_ARM_LDC_SB_G2
:
19125 code
= fixp
->fx_r_type
;
19128 case BFD_RELOC_ARM_TLS_GD32
:
19129 case BFD_RELOC_ARM_TLS_IE32
:
19130 case BFD_RELOC_ARM_TLS_LDM32
:
19131 /* BFD will include the symbol's address in the addend.
19132 But we don't want that, so subtract it out again here. */
19133 if (!S_IS_COMMON (fixp
->fx_addsy
))
19134 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
19135 code
= fixp
->fx_r_type
;
19139 case BFD_RELOC_ARM_IMMEDIATE
:
19140 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19141 _("internal relocation (type: IMMEDIATE) not fixed up"));
19144 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19145 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19146 _("ADRL used for a symbol not defined in the same file"));
19149 case BFD_RELOC_ARM_OFFSET_IMM
:
19150 if (section
->use_rela_p
)
19152 code
= fixp
->fx_r_type
;
19156 if (fixp
->fx_addsy
!= NULL
19157 && !S_IS_DEFINED (fixp
->fx_addsy
)
19158 && S_IS_LOCAL (fixp
->fx_addsy
))
19160 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19161 _("undefined local label `%s'"),
19162 S_GET_NAME (fixp
->fx_addsy
));
19166 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19167 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19174 switch (fixp
->fx_r_type
)
19176 case BFD_RELOC_NONE
: type
= "NONE"; break;
19177 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
19178 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
19179 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
19180 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
19181 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
19182 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
19183 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
19184 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
19185 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
19186 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
19187 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
19188 default: type
= _("<unknown>"); break;
19190 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19191 _("cannot represent %s relocation in this object file format"),
19198 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
19200 && fixp
->fx_addsy
== GOT_symbol
)
19202 code
= BFD_RELOC_ARM_GOTPC
;
19203 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
19207 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
19209 if (reloc
->howto
== NULL
)
19211 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19212 _("cannot represent %s relocation in this object file format"),
19213 bfd_get_reloc_code_name (code
));
19217 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19218 vtable entry to be used in the relocation's section offset. */
19219 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19220 reloc
->address
= fixp
->fx_offset
;
19225 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19228 cons_fix_new_arm (fragS
* frag
,
19233 bfd_reloc_code_real_type type
;
19237 FIXME: @@ Should look at CPU word size. */
19241 type
= BFD_RELOC_8
;
19244 type
= BFD_RELOC_16
;
19248 type
= BFD_RELOC_32
;
19251 type
= BFD_RELOC_64
;
19256 if (exp
->X_op
== O_secrel
)
19258 exp
->X_op
= O_symbol
;
19259 type
= BFD_RELOC_32_SECREL
;
19263 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
19266 #if defined OBJ_COFF || defined OBJ_ELF
19268 arm_validate_fix (fixS
* fixP
)
19270 /* If the destination of the branch is a defined symbol which does not have
19271 the THUMB_FUNC attribute, then we must be calling a function which has
19272 the (interfacearm) attribute. We look for the Thumb entry point to that
19273 function and change the branch to refer to that function instead. */
19274 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
19275 && fixP
->fx_addsy
!= NULL
19276 && S_IS_DEFINED (fixP
->fx_addsy
)
19277 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
19279 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
19285 arm_force_relocation (struct fix
* fixp
)
19287 #if defined (OBJ_COFF) && defined (TE_PE)
19288 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
19292 /* Resolve these relocations even if the symbol is extern or weak. */
19293 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
19294 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
19295 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
19296 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
19297 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
19298 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
19299 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
19302 /* Always leave these relocations for the linker. */
19303 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19304 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19305 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19308 /* Always generate relocations against function symbols. */
19309 if (fixp
->fx_r_type
== BFD_RELOC_32
19311 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
19314 return generic_force_reloc (fixp
);
19317 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19318 /* Relocations against function names must be left unadjusted,
19319 so that the linker can use this information to generate interworking
19320 stubs. The MIPS version of this function
19321 also prevents relocations that are mips-16 specific, but I do not
19322 know why it does this.
19325 There is one other problem that ought to be addressed here, but
19326 which currently is not: Taking the address of a label (rather
19327 than a function) and then later jumping to that address. Such
19328 addresses also ought to have their bottom bit set (assuming that
19329 they reside in Thumb code), but at the moment they will not. */
19332 arm_fix_adjustable (fixS
* fixP
)
19334 if (fixP
->fx_addsy
== NULL
)
19337 /* Preserve relocations against symbols with function type. */
19338 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
19341 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
19342 && fixP
->fx_subsy
== NULL
)
19345 /* We need the symbol name for the VTABLE entries. */
19346 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
19347 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19350 /* Don't allow symbols to be discarded on GOT related relocs. */
19351 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
19352 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
19353 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
19354 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
19355 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
19356 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
19357 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
19358 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
19359 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
19362 /* Similarly for group relocations. */
19363 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19364 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19365 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19370 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19375 elf32_arm_target_format (void)
19378 return (target_big_endian
19379 ? "elf32-bigarm-symbian"
19380 : "elf32-littlearm-symbian");
19381 #elif defined (TE_VXWORKS)
19382 return (target_big_endian
19383 ? "elf32-bigarm-vxworks"
19384 : "elf32-littlearm-vxworks");
19386 if (target_big_endian
)
19387 return "elf32-bigarm";
19389 return "elf32-littlearm";
19394 armelf_frob_symbol (symbolS
* symp
,
19397 elf_frob_symbol (symp
, puntp
);
19401 /* MD interface: Finalization. */
19403 /* A good place to do this, although this was probably not intended
19404 for this kind of use. We need to dump the literal pool before
19405 references are made to a null symbol pointer. */
19410 literal_pool
* pool
;
19412 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
19414 /* Put it at the end of the relevent section. */
19415 subseg_set (pool
->section
, pool
->sub_section
);
19417 arm_elf_change_section ();
19423 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19427 arm_adjust_symtab (void)
19432 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19434 if (ARM_IS_THUMB (sym
))
19436 if (THUMB_IS_FUNC (sym
))
19438 /* Mark the symbol as a Thumb function. */
19439 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
19440 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
19441 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
19443 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
19444 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
19446 as_bad (_("%s: unexpected function type: %d"),
19447 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
19449 else switch (S_GET_STORAGE_CLASS (sym
))
19452 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
19455 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
19458 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
19466 if (ARM_IS_INTERWORK (sym
))
19467 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
19474 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19476 if (ARM_IS_THUMB (sym
))
19478 elf_symbol_type
* elf_sym
;
19480 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
19481 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
19483 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
19484 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
19486 /* If it's a .thumb_func, declare it as so,
19487 otherwise tag label as .code 16. */
19488 if (THUMB_IS_FUNC (sym
))
19489 elf_sym
->internal_elf_sym
.st_info
=
19490 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
19491 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
19492 elf_sym
->internal_elf_sym
.st_info
=
19493 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
19500 /* MD interface: Initialization. */
19503 set_constant_flonums (void)
19507 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
19508 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
19512 /* Auto-select Thumb mode if it's the only available instruction set for the
19513 given architecture. */
19516 autoselect_thumb_from_cpu_variant (void)
19518 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19519 opcode_select (16);
19528 if ( (arm_ops_hsh
= hash_new ()) == NULL
19529 || (arm_cond_hsh
= hash_new ()) == NULL
19530 || (arm_shift_hsh
= hash_new ()) == NULL
19531 || (arm_psr_hsh
= hash_new ()) == NULL
19532 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
19533 || (arm_reg_hsh
= hash_new ()) == NULL
19534 || (arm_reloc_hsh
= hash_new ()) == NULL
19535 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
19536 as_fatal (_("virtual memory exhausted"));
19538 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
19539 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
19540 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
19541 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
19542 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
19543 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
19544 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
19545 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
19546 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
19547 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
19548 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
19549 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
19551 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
19553 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
19554 (PTR
) (barrier_opt_names
+ i
));
19556 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
19557 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
19560 set_constant_flonums ();
19562 /* Set the cpu variant based on the command-line options. We prefer
19563 -mcpu= over -march= if both are set (as for GCC); and we prefer
19564 -mfpu= over any other way of setting the floating point unit.
19565 Use of legacy options with new options are faulted. */
19568 if (mcpu_cpu_opt
|| march_cpu_opt
)
19569 as_bad (_("use of old and new-style options to set CPU type"));
19571 mcpu_cpu_opt
= legacy_cpu
;
19573 else if (!mcpu_cpu_opt
)
19574 mcpu_cpu_opt
= march_cpu_opt
;
19579 as_bad (_("use of old and new-style options to set FPU type"));
19581 mfpu_opt
= legacy_fpu
;
19583 else if (!mfpu_opt
)
19585 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19586 /* Some environments specify a default FPU. If they don't, infer it
19587 from the processor. */
19589 mfpu_opt
= mcpu_fpu_opt
;
19591 mfpu_opt
= march_fpu_opt
;
19593 mfpu_opt
= &fpu_default
;
19599 if (mcpu_cpu_opt
!= NULL
)
19600 mfpu_opt
= &fpu_default
;
19601 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
19602 mfpu_opt
= &fpu_arch_vfp_v2
;
19604 mfpu_opt
= &fpu_arch_fpa
;
19610 mcpu_cpu_opt
= &cpu_default
;
19611 selected_cpu
= cpu_default
;
19615 selected_cpu
= *mcpu_cpu_opt
;
19617 mcpu_cpu_opt
= &arm_arch_any
;
19620 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19622 autoselect_thumb_from_cpu_variant ();
19624 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
19626 #if defined OBJ_COFF || defined OBJ_ELF
19628 unsigned int flags
= 0;
19630 #if defined OBJ_ELF
19631 flags
= meabi_flags
;
19633 switch (meabi_flags
)
19635 case EF_ARM_EABI_UNKNOWN
:
19637 /* Set the flags in the private structure. */
19638 if (uses_apcs_26
) flags
|= F_APCS26
;
19639 if (support_interwork
) flags
|= F_INTERWORK
;
19640 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
19641 if (pic_code
) flags
|= F_PIC
;
19642 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
19643 flags
|= F_SOFT_FLOAT
;
19645 switch (mfloat_abi_opt
)
19647 case ARM_FLOAT_ABI_SOFT
:
19648 case ARM_FLOAT_ABI_SOFTFP
:
19649 flags
|= F_SOFT_FLOAT
;
19652 case ARM_FLOAT_ABI_HARD
:
19653 if (flags
& F_SOFT_FLOAT
)
19654 as_bad (_("hard-float conflicts with specified fpu"));
19658 /* Using pure-endian doubles (even if soft-float). */
19659 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
19660 flags
|= F_VFP_FLOAT
;
19662 #if defined OBJ_ELF
19663 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
19664 flags
|= EF_ARM_MAVERICK_FLOAT
;
19667 case EF_ARM_EABI_VER4
:
19668 case EF_ARM_EABI_VER5
:
19669 /* No additional flags to set. */
19676 bfd_set_private_flags (stdoutput
, flags
);
19678 /* We have run out flags in the COFF header to encode the
19679 status of ATPCS support, so instead we create a dummy,
19680 empty, debug section called .arm.atpcs. */
19685 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
19689 bfd_set_section_flags
19690 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
19691 bfd_set_section_size (stdoutput
, sec
, 0);
19692 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
19698 /* Record the CPU type as well. */
19699 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
19700 mach
= bfd_mach_arm_iWMMXt2
;
19701 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
19702 mach
= bfd_mach_arm_iWMMXt
;
19703 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
19704 mach
= bfd_mach_arm_XScale
;
19705 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
19706 mach
= bfd_mach_arm_ep9312
;
19707 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
19708 mach
= bfd_mach_arm_5TE
;
19709 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
19711 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19712 mach
= bfd_mach_arm_5T
;
19714 mach
= bfd_mach_arm_5
;
19716 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
19718 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19719 mach
= bfd_mach_arm_4T
;
19721 mach
= bfd_mach_arm_4
;
19723 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
19724 mach
= bfd_mach_arm_3M
;
19725 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
19726 mach
= bfd_mach_arm_3
;
19727 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
19728 mach
= bfd_mach_arm_2a
;
19729 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
19730 mach
= bfd_mach_arm_2
;
19732 mach
= bfd_mach_arm_unknown
;
19734 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
19737 /* Command line processing. */
19740 Invocation line includes a switch not recognized by the base assembler.
19741 See if it's a processor-specific option.
19743 This routine is somewhat complicated by the need for backwards
19744 compatibility (since older releases of gcc can't be changed).
19745 The new options try to make the interface as compatible as
19748 New options (supported) are:
19750 -mcpu=<cpu name> Assemble for selected processor
19751 -march=<architecture name> Assemble for selected architecture
19752 -mfpu=<fpu architecture> Assemble for selected FPU.
19753 -EB/-mbig-endian Big-endian
19754 -EL/-mlittle-endian Little-endian
19755 -k Generate PIC code
19756 -mthumb Start in Thumb mode
19757 -mthumb-interwork Code supports ARM/Thumb interworking
19759 For now we will also provide support for:
19761 -mapcs-32 32-bit Program counter
19762 -mapcs-26 26-bit Program counter
19763 -macps-float Floats passed in FP registers
19764 -mapcs-reentrant Reentrant code
19766 (sometime these will probably be replaced with -mapcs=<list of options>
19767 and -matpcs=<list of options>)
19769 The remaining options are only supported for back-wards compatibility.
19770 Cpu variants, the arm part is optional:
19771 -m[arm]1 Currently not supported.
19772 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19773 -m[arm]3 Arm 3 processor
19774 -m[arm]6[xx], Arm 6 processors
19775 -m[arm]7[xx][t][[d]m] Arm 7 processors
19776 -m[arm]8[10] Arm 8 processors
19777 -m[arm]9[20][tdmi] Arm 9 processors
19778 -mstrongarm[110[0]] StrongARM processors
19779 -mxscale XScale processors
19780 -m[arm]v[2345[t[e]]] Arm architectures
19781 -mall All (except the ARM1)
19783 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19784 -mfpe-old (No float load/store multiples)
19785 -mvfpxd VFP Single precision
19787 -mno-fpu Disable all floating point instructions
19789 The following CPU names are recognized:
19790 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19791 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19792 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19793 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19794 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19795 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19796 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19800 const char * md_shortopts
= "m:k";
19802 #ifdef ARM_BI_ENDIAN
19803 #define OPTION_EB (OPTION_MD_BASE + 0)
19804 #define OPTION_EL (OPTION_MD_BASE + 1)
19806 #if TARGET_BYTES_BIG_ENDIAN
19807 #define OPTION_EB (OPTION_MD_BASE + 0)
19809 #define OPTION_EL (OPTION_MD_BASE + 1)
19813 struct option md_longopts
[] =
19816 {"EB", no_argument
, NULL
, OPTION_EB
},
19819 {"EL", no_argument
, NULL
, OPTION_EL
},
19821 {NULL
, no_argument
, NULL
, 0}
19824 size_t md_longopts_size
= sizeof (md_longopts
);
19826 struct arm_option_table
19828 char *option
; /* Option name to match. */
19829 char *help
; /* Help information. */
19830 int *var
; /* Variable to change. */
19831 int value
; /* What to change it to. */
19832 char *deprecated
; /* If non-null, print this message. */
19835 struct arm_option_table arm_opts
[] =
19837 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
19838 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
19839 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19840 &support_interwork
, 1, NULL
},
19841 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
19842 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
19843 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
19845 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
19846 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
19847 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
19848 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
19851 /* These are recognized by the assembler, but have no affect on code. */
19852 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
19853 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
19854 {NULL
, NULL
, NULL
, 0, NULL
}
19857 struct arm_legacy_option_table
19859 char *option
; /* Option name to match. */
19860 const arm_feature_set
**var
; /* Variable to change. */
19861 const arm_feature_set value
; /* What to change it to. */
19862 char *deprecated
; /* If non-null, print this message. */
19865 const struct arm_legacy_option_table arm_legacy_opts
[] =
19867 /* DON'T add any new processors to this list -- we want the whole list
19868 to go away... Add them to the processors table instead. */
19869 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19870 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19871 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19872 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19873 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19874 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19875 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19876 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19877 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19878 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19879 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19880 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19881 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19882 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19883 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19884 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19885 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19886 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19887 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19888 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19889 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19890 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19891 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19892 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19893 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19894 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19895 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19896 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19897 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19898 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19899 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19900 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19901 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19902 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19903 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19904 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19905 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19906 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19907 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19908 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19909 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19910 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19911 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19912 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19913 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19914 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19915 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19916 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19917 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19918 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19919 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19920 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19921 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19922 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19923 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19924 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19925 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19926 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19927 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19928 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19929 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19930 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19931 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19932 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19933 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19934 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19935 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19936 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19937 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
19938 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
19939 N_("use -mcpu=strongarm110")},
19940 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
19941 N_("use -mcpu=strongarm1100")},
19942 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
19943 N_("use -mcpu=strongarm1110")},
19944 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
19945 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
19946 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
19948 /* Architecture variants -- don't add any more to this list either. */
19949 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19950 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19951 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19952 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19953 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19954 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19955 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19956 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19957 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19958 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19959 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19960 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19961 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19962 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19963 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19964 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19965 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19966 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19968 /* Floating point variants -- don't add any more to this list either. */
19969 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
19970 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
19971 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
19972 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
19973 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19975 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
19978 struct arm_cpu_option_table
19981 const arm_feature_set value
;
19982 /* For some CPUs we assume an FPU unless the user explicitly sets
19984 const arm_feature_set default_fpu
;
19985 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19987 const char *canonical_name
;
19990 /* This list should, at a minimum, contain all the cpu names
19991 recognized by GCC. */
19992 static const struct arm_cpu_option_table arm_cpus
[] =
19994 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
19995 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
19996 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
19997 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19998 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19999 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20000 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20001 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20002 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20003 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20004 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20005 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20006 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20007 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20008 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20009 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20010 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20011 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20012 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20013 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20014 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20015 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20016 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20017 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20018 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20019 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20020 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20021 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20022 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20023 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20024 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20025 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20026 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20027 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20028 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20029 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20030 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20031 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20032 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20033 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
20034 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20035 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20036 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20037 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20038 /* For V5 or later processors we default to using VFP; but the user
20039 should really set the FPU type explicitly. */
20040 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20041 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20042 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20043 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20044 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20045 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20046 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
20047 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20048 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20049 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
20050 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20051 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20052 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20053 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20054 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20055 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
20056 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20057 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20058 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20059 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
20060 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20061 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
20062 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
20063 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
20064 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
20065 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
20066 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
20067 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
20068 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
20069 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
20070 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
20071 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
20072 | FPU_NEON_EXT_V1
),
20074 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
20075 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
20076 /* ??? XSCALE is really an architecture. */
20077 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20078 /* ??? iwmmxt is not a processor. */
20079 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
20080 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
20081 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20083 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
20084 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
20087 struct arm_arch_option_table
20090 const arm_feature_set value
;
20091 const arm_feature_set default_fpu
;
20094 /* This list should, at a minimum, contain all the architecture names
20095 recognized by GCC. */
20096 static const struct arm_arch_option_table arm_archs
[] =
20098 {"all", ARM_ANY
, FPU_ARCH_FPA
},
20099 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
20100 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
20101 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20102 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20103 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
20104 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
20105 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
20106 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
20107 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
20108 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
20109 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
20110 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
20111 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
20112 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
20113 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
20114 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
20115 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20116 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20117 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
20118 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
20119 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
20120 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
20121 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
20122 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
20123 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
20124 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
20125 /* The official spelling of the ARMv7 profile variants is the dashed form.
20126 Accept the non-dashed form for compatibility with old toolchains. */
20127 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20128 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20129 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20130 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20131 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20132 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20133 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
20134 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
20135 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
20136 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
20139 /* ISA extensions in the co-processor space. */
20140 struct arm_option_cpu_value_table
20143 const arm_feature_set value
;
20146 static const struct arm_option_cpu_value_table arm_extensions
[] =
20148 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
20149 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
20150 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
20151 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
20152 {NULL
, ARM_ARCH_NONE
}
20155 /* This list should, at a minimum, contain all the fpu names
20156 recognized by GCC. */
20157 static const struct arm_option_cpu_value_table arm_fpus
[] =
20159 {"softfpa", FPU_NONE
},
20160 {"fpe", FPU_ARCH_FPE
},
20161 {"fpe2", FPU_ARCH_FPE
},
20162 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
20163 {"fpa", FPU_ARCH_FPA
},
20164 {"fpa10", FPU_ARCH_FPA
},
20165 {"fpa11", FPU_ARCH_FPA
},
20166 {"arm7500fe", FPU_ARCH_FPA
},
20167 {"softvfp", FPU_ARCH_VFP
},
20168 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
20169 {"vfp", FPU_ARCH_VFP_V2
},
20170 {"vfp9", FPU_ARCH_VFP_V2
},
20171 {"vfp3", FPU_ARCH_VFP_V3
},
20172 {"vfp10", FPU_ARCH_VFP_V2
},
20173 {"vfp10-r0", FPU_ARCH_VFP_V1
},
20174 {"vfpxd", FPU_ARCH_VFP_V1xD
},
20175 {"arm1020t", FPU_ARCH_VFP_V1
},
20176 {"arm1020e", FPU_ARCH_VFP_V2
},
20177 {"arm1136jfs", FPU_ARCH_VFP_V2
},
20178 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
20179 {"maverick", FPU_ARCH_MAVERICK
},
20180 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
20181 {NULL
, ARM_ARCH_NONE
}
20184 struct arm_option_value_table
20190 static const struct arm_option_value_table arm_float_abis
[] =
20192 {"hard", ARM_FLOAT_ABI_HARD
},
20193 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
20194 {"soft", ARM_FLOAT_ABI_SOFT
},
20199 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20200 static const struct arm_option_value_table arm_eabis
[] =
20202 {"gnu", EF_ARM_EABI_UNKNOWN
},
20203 {"4", EF_ARM_EABI_VER4
},
20204 {"5", EF_ARM_EABI_VER5
},
20209 struct arm_long_option_table
20211 char * option
; /* Substring to match. */
20212 char * help
; /* Help information. */
20213 int (* func
) (char * subopt
); /* Function to decode sub-option. */
20214 char * deprecated
; /* If non-null, print this message. */
20218 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
20220 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
20222 /* Copy the feature set, so that we can modify it. */
20223 *ext_set
= **opt_p
;
20226 while (str
!= NULL
&& *str
!= 0)
20228 const struct arm_option_cpu_value_table
* opt
;
20234 as_bad (_("invalid architectural extension"));
20239 ext
= strchr (str
, '+');
20242 optlen
= ext
- str
;
20244 optlen
= strlen (str
);
20248 as_bad (_("missing architectural extension"));
20252 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
20253 if (strncmp (opt
->name
, str
, optlen
) == 0)
20255 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
20259 if (opt
->name
== NULL
)
20261 as_bad (_("unknown architectural extnsion `%s'"), str
);
20272 arm_parse_cpu (char * str
)
20274 const struct arm_cpu_option_table
* opt
;
20275 char * ext
= strchr (str
, '+');
20279 optlen
= ext
- str
;
20281 optlen
= strlen (str
);
20285 as_bad (_("missing cpu name `%s'"), str
);
20289 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
20290 if (strncmp (opt
->name
, str
, optlen
) == 0)
20292 mcpu_cpu_opt
= &opt
->value
;
20293 mcpu_fpu_opt
= &opt
->default_fpu
;
20294 if (opt
->canonical_name
)
20295 strcpy(selected_cpu_name
, opt
->canonical_name
);
20299 for (i
= 0; i
< optlen
; i
++)
20300 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20301 selected_cpu_name
[i
] = 0;
20305 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
20310 as_bad (_("unknown cpu `%s'"), str
);
20315 arm_parse_arch (char * str
)
20317 const struct arm_arch_option_table
*opt
;
20318 char *ext
= strchr (str
, '+');
20322 optlen
= ext
- str
;
20324 optlen
= strlen (str
);
20328 as_bad (_("missing architecture name `%s'"), str
);
20332 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
20333 if (streq (opt
->name
, str
))
20335 march_cpu_opt
= &opt
->value
;
20336 march_fpu_opt
= &opt
->default_fpu
;
20337 strcpy(selected_cpu_name
, opt
->name
);
20340 return arm_parse_extension (ext
, &march_cpu_opt
);
20345 as_bad (_("unknown architecture `%s'\n"), str
);
20350 arm_parse_fpu (char * str
)
20352 const struct arm_option_cpu_value_table
* opt
;
20354 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20355 if (streq (opt
->name
, str
))
20357 mfpu_opt
= &opt
->value
;
20361 as_bad (_("unknown floating point format `%s'\n"), str
);
20366 arm_parse_float_abi (char * str
)
20368 const struct arm_option_value_table
* opt
;
20370 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
20371 if (streq (opt
->name
, str
))
20373 mfloat_abi_opt
= opt
->value
;
20377 as_bad (_("unknown floating point abi `%s'\n"), str
);
20383 arm_parse_eabi (char * str
)
20385 const struct arm_option_value_table
*opt
;
20387 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
20388 if (streq (opt
->name
, str
))
20390 meabi_flags
= opt
->value
;
20393 as_bad (_("unknown EABI `%s'\n"), str
);
20398 struct arm_long_option_table arm_long_opts
[] =
20400 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20401 arm_parse_cpu
, NULL
},
20402 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20403 arm_parse_arch
, NULL
},
20404 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20405 arm_parse_fpu
, NULL
},
20406 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20407 arm_parse_float_abi
, NULL
},
20409 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20410 arm_parse_eabi
, NULL
},
20412 {NULL
, NULL
, 0, NULL
}
20416 md_parse_option (int c
, char * arg
)
20418 struct arm_option_table
*opt
;
20419 const struct arm_legacy_option_table
*fopt
;
20420 struct arm_long_option_table
*lopt
;
20426 target_big_endian
= 1;
20432 target_big_endian
= 0;
20437 /* Listing option. Just ignore these, we don't support additional
20442 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20444 if (c
== opt
->option
[0]
20445 && ((arg
== NULL
&& opt
->option
[1] == 0)
20446 || streq (arg
, opt
->option
+ 1)))
20448 #if WARN_DEPRECATED
20449 /* If the option is deprecated, tell the user. */
20450 if (opt
->deprecated
!= NULL
)
20451 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20452 arg
? arg
: "", _(opt
->deprecated
));
20455 if (opt
->var
!= NULL
)
20456 *opt
->var
= opt
->value
;
20462 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
20464 if (c
== fopt
->option
[0]
20465 && ((arg
== NULL
&& fopt
->option
[1] == 0)
20466 || streq (arg
, fopt
->option
+ 1)))
20468 #if WARN_DEPRECATED
20469 /* If the option is deprecated, tell the user. */
20470 if (fopt
->deprecated
!= NULL
)
20471 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20472 arg
? arg
: "", _(fopt
->deprecated
));
20475 if (fopt
->var
!= NULL
)
20476 *fopt
->var
= &fopt
->value
;
20482 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20484 /* These options are expected to have an argument. */
20485 if (c
== lopt
->option
[0]
20487 && strncmp (arg
, lopt
->option
+ 1,
20488 strlen (lopt
->option
+ 1)) == 0)
20490 #if WARN_DEPRECATED
20491 /* If the option is deprecated, tell the user. */
20492 if (lopt
->deprecated
!= NULL
)
20493 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
20494 _(lopt
->deprecated
));
20497 /* Call the sup-option parser. */
20498 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
20509 md_show_usage (FILE * fp
)
20511 struct arm_option_table
*opt
;
20512 struct arm_long_option_table
*lopt
;
20514 fprintf (fp
, _(" ARM-specific assembler options:\n"));
20516 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20517 if (opt
->help
!= NULL
)
20518 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
20520 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20521 if (lopt
->help
!= NULL
)
20522 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
20526 -EB assemble code for a big-endian cpu\n"));
20531 -EL assemble code for a little-endian cpu\n"));
20540 arm_feature_set flags
;
20541 } cpu_arch_ver_table
;
20543 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20544 least features first. */
20545 static const cpu_arch_ver_table cpu_arch_ver
[] =
20550 {4, ARM_ARCH_V5TE
},
20551 {5, ARM_ARCH_V5TEJ
},
20555 {9, ARM_ARCH_V6T2
},
20556 {10, ARM_ARCH_V7A
},
20557 {10, ARM_ARCH_V7R
},
20558 {10, ARM_ARCH_V7M
},
20562 /* Set the public EABI object attributes. */
20564 aeabi_set_public_attributes (void)
20567 arm_feature_set flags
;
20568 arm_feature_set tmp
;
20569 const cpu_arch_ver_table
*p
;
20571 /* Choose the architecture based on the capabilities of the requested cpu
20572 (if any) and/or the instructions actually used. */
20573 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
20574 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
20575 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
20576 /*Allow the user to override the reported architecture. */
20579 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
20580 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
20585 for (p
= cpu_arch_ver
; p
->val
; p
++)
20587 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
20590 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
20594 /* Tag_CPU_name. */
20595 if (selected_cpu_name
[0])
20599 p
= selected_cpu_name
;
20600 if (strncmp(p
, "armv", 4) == 0)
20605 for (i
= 0; p
[i
]; i
++)
20606 p
[i
] = TOUPPER (p
[i
]);
20608 bfd_elf_add_proc_attr_string (stdoutput
, 5, p
);
20610 /* Tag_CPU_arch. */
20611 bfd_elf_add_proc_attr_int (stdoutput
, 6, arch
);
20612 /* Tag_CPU_arch_profile. */
20613 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
20614 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'A');
20615 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
20616 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'R');
20617 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
20618 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'M');
20619 /* Tag_ARM_ISA_use. */
20620 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
20621 bfd_elf_add_proc_attr_int (stdoutput
, 8, 1);
20622 /* Tag_THUMB_ISA_use. */
20623 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
20624 bfd_elf_add_proc_attr_int (stdoutput
, 9,
20625 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
20626 /* Tag_VFP_arch. */
20627 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
20628 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
20629 bfd_elf_add_proc_attr_int (stdoutput
, 10, 3);
20630 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
20631 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
20632 bfd_elf_add_proc_attr_int (stdoutput
, 10, 2);
20633 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
20634 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
20635 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
20636 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
20637 bfd_elf_add_proc_attr_int (stdoutput
, 10, 1);
20638 /* Tag_WMMX_arch. */
20639 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
20640 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
20641 bfd_elf_add_proc_attr_int (stdoutput
, 11, 1);
20642 /* Tag_NEON_arch. */
20643 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
20644 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
20645 bfd_elf_add_proc_attr_int (stdoutput
, 12, 1);
20648 /* Add the default contents for the .ARM.attributes section. */
20652 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
20655 aeabi_set_public_attributes ();
20657 #endif /* OBJ_ELF */
20660 /* Parse a .cpu directive. */
20663 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
20665 const struct arm_cpu_option_table
*opt
;
20669 name
= input_line_pointer
;
20670 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20671 input_line_pointer
++;
20672 saved_char
= *input_line_pointer
;
20673 *input_line_pointer
= 0;
20675 /* Skip the first "all" entry. */
20676 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
20677 if (streq (opt
->name
, name
))
20679 mcpu_cpu_opt
= &opt
->value
;
20680 selected_cpu
= opt
->value
;
20681 if (opt
->canonical_name
)
20682 strcpy(selected_cpu_name
, opt
->canonical_name
);
20686 for (i
= 0; opt
->name
[i
]; i
++)
20687 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20688 selected_cpu_name
[i
] = 0;
20690 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20691 *input_line_pointer
= saved_char
;
20692 demand_empty_rest_of_line ();
20695 as_bad (_("unknown cpu `%s'"), name
);
20696 *input_line_pointer
= saved_char
;
20697 ignore_rest_of_line ();
20701 /* Parse a .arch directive. */
20704 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
20706 const struct arm_arch_option_table
*opt
;
20710 name
= input_line_pointer
;
20711 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20712 input_line_pointer
++;
20713 saved_char
= *input_line_pointer
;
20714 *input_line_pointer
= 0;
20716 /* Skip the first "all" entry. */
20717 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20718 if (streq (opt
->name
, name
))
20720 mcpu_cpu_opt
= &opt
->value
;
20721 selected_cpu
= opt
->value
;
20722 strcpy(selected_cpu_name
, opt
->name
);
20723 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20724 *input_line_pointer
= saved_char
;
20725 demand_empty_rest_of_line ();
20729 as_bad (_("unknown architecture `%s'\n"), name
);
20730 *input_line_pointer
= saved_char
;
20731 ignore_rest_of_line ();
20735 /* Parse a .object_arch directive. */
20738 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
20740 const struct arm_arch_option_table
*opt
;
20744 name
= input_line_pointer
;
20745 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20746 input_line_pointer
++;
20747 saved_char
= *input_line_pointer
;
20748 *input_line_pointer
= 0;
20750 /* Skip the first "all" entry. */
20751 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20752 if (streq (opt
->name
, name
))
20754 object_arch
= &opt
->value
;
20755 *input_line_pointer
= saved_char
;
20756 demand_empty_rest_of_line ();
20760 as_bad (_("unknown architecture `%s'\n"), name
);
20761 *input_line_pointer
= saved_char
;
20762 ignore_rest_of_line ();
20766 /* Parse a .fpu directive. */
20769 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
20771 const struct arm_option_cpu_value_table
*opt
;
20775 name
= input_line_pointer
;
20776 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20777 input_line_pointer
++;
20778 saved_char
= *input_line_pointer
;
20779 *input_line_pointer
= 0;
20781 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20782 if (streq (opt
->name
, name
))
20784 mfpu_opt
= &opt
->value
;
20785 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20786 *input_line_pointer
= saved_char
;
20787 demand_empty_rest_of_line ();
20791 as_bad (_("unknown floating point format `%s'\n"), name
);
20792 *input_line_pointer
= saved_char
;
20793 ignore_rest_of_line ();
20796 /* Copy symbol information. */
20798 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
20800 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);