1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
45 #define WARN_DEPRECATED 1
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
56 symbolS
* table_entry
;
57 symbolS
* personality_routine
;
58 int personality_index
;
59 /* The segment containing the function. */
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes
;
66 /* The number of bytes pushed to the stack. */
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset
;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
76 /* Nonzero if an unwind_setfp directive has been seen. */
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored
:1;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency
= 0;
89 /* Results from operand parsing worker functions. */
93 PARSE_OPERAND_SUCCESS
,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result
;
101 ARM_FLOAT_ABI_SOFTFP
,
105 /* Types of processor to assemble for. */
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
132 #endif /* ifndef FPU_DEFAULT */
134 #define streq(a, b) (strcmp (a, b) == 0)
136 static arm_feature_set cpu_variant
;
137 static arm_feature_set arm_arch_used
;
138 static arm_feature_set thumb_arch_used
;
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26
= FALSE
;
142 static int atpcs
= FALSE
;
143 static int support_interwork
= FALSE
;
144 static int uses_apcs_float
= FALSE
;
145 static int pic_code
= FALSE
;
146 static int fix_v4bx
= FALSE
;
148 /* Variables that we set while parsing command-line options. Once all
149 options have been read we re-process these values to set the real
151 static const arm_feature_set
*legacy_cpu
= NULL
;
152 static const arm_feature_set
*legacy_fpu
= NULL
;
154 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
155 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
156 static const arm_feature_set
*march_cpu_opt
= NULL
;
157 static const arm_feature_set
*march_fpu_opt
= NULL
;
158 static const arm_feature_set
*mfpu_opt
= NULL
;
159 static const arm_feature_set
*object_arch
= NULL
;
161 /* Constants for known architecture features. */
162 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
163 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
164 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
165 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
166 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
167 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
168 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
169 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
170 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
173 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
176 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
177 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
178 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
179 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
180 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
181 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
182 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
183 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
184 static const arm_feature_set arm_ext_v4t_5
=
185 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
186 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
187 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
188 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
189 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
190 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
191 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
192 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
193 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
194 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
195 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
196 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
201 static const arm_feature_set arm_ext_m
=
202 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
204 static const arm_feature_set arm_arch_any
= ARM_ANY
;
205 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
206 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
207 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
209 static const arm_feature_set arm_cext_iwmmxt2
=
210 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
211 static const arm_feature_set arm_cext_iwmmxt
=
212 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
213 static const arm_feature_set arm_cext_xscale
=
214 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
215 static const arm_feature_set arm_cext_maverick
=
216 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
217 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
218 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
219 static const arm_feature_set fpu_vfp_ext_v1xd
=
220 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
221 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
222 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
223 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
224 static const arm_feature_set fpu_vfp_ext_d32
=
225 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
226 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
227 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
228 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
230 static int mfloat_abi_opt
= -1;
231 /* Record user cpu selection for object attributes. */
232 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
233 /* Must be long enough to hold any of the names in arm_cpus. */
234 static char selected_cpu_name
[16];
237 static int meabi_flags
= EABI_DEFAULT
;
239 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
245 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
250 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
251 symbolS
* GOT_symbol
;
254 /* 0: assemble for ARM,
255 1: assemble for Thumb,
256 2: assemble for Thumb even though target CPU does not support thumb
258 static int thumb_mode
= 0;
260 /* If unified_syntax is true, we are processing the new unified
261 ARM/Thumb syntax. Important differences from the old ARM mode:
263 - Immediate operands do not require a # prefix.
264 - Conditional affixes always appear at the end of the
265 instruction. (For backward compatibility, those instructions
266 that formerly had them in the middle, continue to accept them
268 - The IT instruction may appear, and if it does is validated
269 against subsequent conditional affixes. It does not generate
272 Important differences from the old Thumb mode:
274 - Immediate operands do not require a # prefix.
275 - Most of the V6T2 instructions are only available in unified mode.
276 - The .N and .W suffixes are recognized and honored (it is an error
277 if they cannot be honored).
278 - All instructions set the flags if and only if they have an 's' affix.
279 - Conditional affixes may be used. They are validated against
280 preceding IT instructions. Unlike ARM mode, you cannot use a
281 conditional affix except in the scope of an IT instruction. */
283 static bfd_boolean unified_syntax
= FALSE
;
298 enum neon_el_type type
;
302 #define NEON_MAX_TYPE_ELS 4
306 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
313 unsigned long instruction
;
317 /* "uncond_value" is set to the value in place of the conditional field in
318 unconditional versions of the instruction, or -1 if nothing is
321 struct neon_type vectype
;
322 /* Set to the opcode if the instruction needs relaxation.
323 Zero if the instruction is not relaxed. */
327 bfd_reloc_code_real_type type
;
336 struct neon_type_el vectype
;
337 unsigned present
: 1; /* Operand present. */
338 unsigned isreg
: 1; /* Operand was a register. */
339 unsigned immisreg
: 1; /* .imm field is a second register. */
340 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
341 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
342 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
343 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
344 instructions. This allows us to disambiguate ARM <-> vector insns. */
345 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
346 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
347 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
348 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
349 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
350 unsigned writeback
: 1; /* Operand has trailing ! */
351 unsigned preind
: 1; /* Preindexed address. */
352 unsigned postind
: 1; /* Postindexed address. */
353 unsigned negative
: 1; /* Index register was negated. */
354 unsigned shifted
: 1; /* Shift applied to operation. */
355 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
359 static struct arm_it inst
;
361 #define NUM_FLOAT_VALS 8
363 const char * fp_const
[] =
365 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
368 /* Number of littlenums required to hold an extended precision number. */
369 #define MAX_LITTLENUMS 6
371 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
381 #define CP_T_X 0x00008000
382 #define CP_T_Y 0x00400000
384 #define CONDS_BIT 0x00100000
385 #define LOAD_BIT 0x00100000
387 #define DOUBLE_LOAD_FLAG 0x00000001
391 const char * template;
395 #define COND_ALWAYS 0xE
399 const char *template;
403 struct asm_barrier_opt
405 const char *template;
409 /* The bit that distinguishes CPSR and SPSR. */
410 #define SPSR_BIT (1 << 22)
412 /* The individual PSR flag bits. */
413 #define PSR_c (1 << 16)
414 #define PSR_x (1 << 17)
415 #define PSR_s (1 << 18)
416 #define PSR_f (1 << 19)
421 bfd_reloc_code_real_type reloc
;
426 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
427 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
432 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
435 /* Bits for DEFINED field in neon_typed_alias. */
436 #define NTA_HASTYPE 1
437 #define NTA_HASINDEX 2
439 struct neon_typed_alias
441 unsigned char defined
;
443 struct neon_type_el eltype
;
446 /* ARM register categories. This includes coprocessor numbers and various
447 architecture extensions' registers. */
473 /* Structure for a hash table entry for a register.
474 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
475 information which states whether a vector type or index is specified (for a
476 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
480 unsigned char number
;
482 unsigned char builtin
;
483 struct neon_typed_alias
*neon
;
486 /* Diagnostics used when we don't get a register of the expected type. */
487 const char *const reg_expected_msgs
[] =
489 N_("ARM register expected"),
490 N_("bad or missing co-processor number"),
491 N_("co-processor register expected"),
492 N_("FPA register expected"),
493 N_("VFP single precision register expected"),
494 N_("VFP/Neon double precision register expected"),
495 N_("Neon quad precision register expected"),
496 N_("VFP single or double precision register expected"),
497 N_("Neon double or quad precision register expected"),
498 N_("VFP single, double or Neon quad precision register expected"),
499 N_("VFP system register expected"),
500 N_("Maverick MVF register expected"),
501 N_("Maverick MVD register expected"),
502 N_("Maverick MVFX register expected"),
503 N_("Maverick MVDX register expected"),
504 N_("Maverick MVAX register expected"),
505 N_("Maverick DSPSC register expected"),
506 N_("iWMMXt data register expected"),
507 N_("iWMMXt control register expected"),
508 N_("iWMMXt scalar register expected"),
509 N_("XScale accumulator register expected"),
512 /* Some well known registers that we refer to directly elsewhere. */
517 /* ARM instructions take 4bytes in the object file, Thumb instructions
523 /* Basic string to match. */
524 const char *template;
526 /* Parameters to instruction. */
527 unsigned char operands
[8];
529 /* Conditional tag - see opcode_lookup. */
530 unsigned int tag
: 4;
532 /* Basic instruction code. */
533 unsigned int avalue
: 28;
535 /* Thumb-format instruction code. */
538 /* Which architecture variant provides this instruction. */
539 const arm_feature_set
*avariant
;
540 const arm_feature_set
*tvariant
;
542 /* Function to call to encode instruction in ARM format. */
543 void (* aencode
) (void);
545 /* Function to call to encode instruction in Thumb format. */
546 void (* tencode
) (void);
549 /* Defines for various bits that we will want to toggle. */
550 #define INST_IMMEDIATE 0x02000000
551 #define OFFSET_REG 0x02000000
552 #define HWOFFSET_IMM 0x00400000
553 #define SHIFT_BY_REG 0x00000010
554 #define PRE_INDEX 0x01000000
555 #define INDEX_UP 0x00800000
556 #define WRITE_BACK 0x00200000
557 #define LDM_TYPE_2_OR_3 0x00400000
558 #define CPSI_MMOD 0x00020000
560 #define LITERAL_MASK 0xf000f000
561 #define OPCODE_MASK 0xfe1fffff
562 #define V4_STR_BIT 0x00000020
564 #define T2_SUBS_PC_LR 0xf3de8f00
566 #define DATA_OP_SHIFT 21
568 #define T2_OPCODE_MASK 0xfe1fffff
569 #define T2_DATA_OP_SHIFT 21
571 /* Codes to distinguish the arithmetic instructions. */
582 #define OPCODE_CMP 10
583 #define OPCODE_CMN 11
584 #define OPCODE_ORR 12
585 #define OPCODE_MOV 13
586 #define OPCODE_BIC 14
587 #define OPCODE_MVN 15
589 #define T2_OPCODE_AND 0
590 #define T2_OPCODE_BIC 1
591 #define T2_OPCODE_ORR 2
592 #define T2_OPCODE_ORN 3
593 #define T2_OPCODE_EOR 4
594 #define T2_OPCODE_ADD 8
595 #define T2_OPCODE_ADC 10
596 #define T2_OPCODE_SBC 11
597 #define T2_OPCODE_SUB 13
598 #define T2_OPCODE_RSB 14
600 #define T_OPCODE_MUL 0x4340
601 #define T_OPCODE_TST 0x4200
602 #define T_OPCODE_CMN 0x42c0
603 #define T_OPCODE_NEG 0x4240
604 #define T_OPCODE_MVN 0x43c0
606 #define T_OPCODE_ADD_R3 0x1800
607 #define T_OPCODE_SUB_R3 0x1a00
608 #define T_OPCODE_ADD_HI 0x4400
609 #define T_OPCODE_ADD_ST 0xb000
610 #define T_OPCODE_SUB_ST 0xb080
611 #define T_OPCODE_ADD_SP 0xa800
612 #define T_OPCODE_ADD_PC 0xa000
613 #define T_OPCODE_ADD_I8 0x3000
614 #define T_OPCODE_SUB_I8 0x3800
615 #define T_OPCODE_ADD_I3 0x1c00
616 #define T_OPCODE_SUB_I3 0x1e00
618 #define T_OPCODE_ASR_R 0x4100
619 #define T_OPCODE_LSL_R 0x4080
620 #define T_OPCODE_LSR_R 0x40c0
621 #define T_OPCODE_ROR_R 0x41c0
622 #define T_OPCODE_ASR_I 0x1000
623 #define T_OPCODE_LSL_I 0x0000
624 #define T_OPCODE_LSR_I 0x0800
626 #define T_OPCODE_MOV_I8 0x2000
627 #define T_OPCODE_CMP_I8 0x2800
628 #define T_OPCODE_CMP_LR 0x4280
629 #define T_OPCODE_MOV_HR 0x4600
630 #define T_OPCODE_CMP_HR 0x4500
632 #define T_OPCODE_LDR_PC 0x4800
633 #define T_OPCODE_LDR_SP 0x9800
634 #define T_OPCODE_STR_SP 0x9000
635 #define T_OPCODE_LDR_IW 0x6800
636 #define T_OPCODE_STR_IW 0x6000
637 #define T_OPCODE_LDR_IH 0x8800
638 #define T_OPCODE_STR_IH 0x8000
639 #define T_OPCODE_LDR_IB 0x7800
640 #define T_OPCODE_STR_IB 0x7000
641 #define T_OPCODE_LDR_RW 0x5800
642 #define T_OPCODE_STR_RW 0x5000
643 #define T_OPCODE_LDR_RH 0x5a00
644 #define T_OPCODE_STR_RH 0x5200
645 #define T_OPCODE_LDR_RB 0x5c00
646 #define T_OPCODE_STR_RB 0x5400
648 #define T_OPCODE_PUSH 0xb400
649 #define T_OPCODE_POP 0xbc00
651 #define T_OPCODE_BRANCH 0xe000
653 #define THUMB_SIZE 2 /* Size of thumb instruction. */
654 #define THUMB_PP_PC_LR 0x0100
655 #define THUMB_LOAD_BIT 0x0800
656 #define THUMB2_LOAD_BIT 0x00100000
658 #define BAD_ARGS _("bad arguments to instruction")
659 #define BAD_PC _("r15 not allowed here")
660 #define BAD_COND _("instruction cannot be conditional")
661 #define BAD_OVERLAP _("registers may not be the same")
662 #define BAD_HIREG _("lo register required")
663 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
664 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
665 #define BAD_BRANCH _("branch must be last instruction in IT block")
666 #define BAD_NOT_IT _("instruction not allowed in IT block")
667 #define BAD_FPU _("selected FPU does not support instruction")
669 static struct hash_control
*arm_ops_hsh
;
670 static struct hash_control
*arm_cond_hsh
;
671 static struct hash_control
*arm_shift_hsh
;
672 static struct hash_control
*arm_psr_hsh
;
673 static struct hash_control
*arm_v7m_psr_hsh
;
674 static struct hash_control
*arm_reg_hsh
;
675 static struct hash_control
*arm_reloc_hsh
;
676 static struct hash_control
*arm_barrier_opt_hsh
;
678 /* Stuff needed to resolve the label ambiguity
687 symbolS
* last_label_seen
;
688 static int label_is_thumb_function_name
= FALSE
;
690 /* Literal pool structure. Held on a per-section
691 and per-sub-section basis. */
693 #define MAX_LITERAL_POOL_SIZE 1024
694 typedef struct literal_pool
696 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
697 unsigned int next_free_entry
;
702 struct literal_pool
* next
;
705 /* Pointer to a linked list of literal pools. */
706 literal_pool
* list_of_pools
= NULL
;
708 /* State variables for IT block handling. */
709 static bfd_boolean current_it_mask
= 0;
710 static int current_cc
;
714 /* This array holds the chars that always start a comment. If the
715 pre-processor is disabled, these aren't very useful. */
716 const char comment_chars
[] = "@";
718 /* This array holds the chars that only start a comment at the beginning of
719 a line. If the line seems to have the form '# 123 filename'
720 .line and .file directives will appear in the pre-processed output. */
721 /* Note that input_file.c hand checks for '#' at the beginning of the
722 first line of the input file. This is because the compiler outputs
723 #NO_APP at the beginning of its output. */
724 /* Also note that comments like this one will always work. */
725 const char line_comment_chars
[] = "#";
727 const char line_separator_chars
[] = ";";
729 /* Chars that can be used to separate mant
730 from exp in floating point numbers. */
731 const char EXP_CHARS
[] = "eE";
733 /* Chars that mean this number is a floating point constant. */
737 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
739 /* Prefix characters that indicate the start of an immediate
741 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
743 /* Separator character handling. */
745 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
748 skip_past_char (char ** str
, char c
)
758 #define skip_past_comma(str) skip_past_char (str, ',')
760 /* Arithmetic expressions (possibly involving symbols). */
762 /* Return TRUE if anything in the expression is a bignum. */
765 walk_no_bignums (symbolS
* sp
)
767 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
770 if (symbol_get_value_expression (sp
)->X_add_symbol
)
772 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
773 || (symbol_get_value_expression (sp
)->X_op_symbol
774 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
780 static int in_my_get_expression
= 0;
782 /* Third argument to my_get_expression. */
783 #define GE_NO_PREFIX 0
784 #define GE_IMM_PREFIX 1
785 #define GE_OPT_PREFIX 2
786 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
787 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
788 #define GE_OPT_PREFIX_BIG 3
791 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
796 /* In unified syntax, all prefixes are optional. */
798 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
803 case GE_NO_PREFIX
: break;
805 if (!is_immediate_prefix (**str
))
807 inst
.error
= _("immediate expression requires a # prefix");
813 case GE_OPT_PREFIX_BIG
:
814 if (is_immediate_prefix (**str
))
820 memset (ep
, 0, sizeof (expressionS
));
822 save_in
= input_line_pointer
;
823 input_line_pointer
= *str
;
824 in_my_get_expression
= 1;
825 seg
= expression (ep
);
826 in_my_get_expression
= 0;
828 if (ep
->X_op
== O_illegal
)
830 /* We found a bad expression in md_operand(). */
831 *str
= input_line_pointer
;
832 input_line_pointer
= save_in
;
833 if (inst
.error
== NULL
)
834 inst
.error
= _("bad expression");
839 if (seg
!= absolute_section
840 && seg
!= text_section
841 && seg
!= data_section
842 && seg
!= bss_section
843 && seg
!= undefined_section
)
845 inst
.error
= _("bad segment");
846 *str
= input_line_pointer
;
847 input_line_pointer
= save_in
;
852 /* Get rid of any bignums now, so that we don't generate an error for which
853 we can't establish a line number later on. Big numbers are never valid
854 in instructions, which is where this routine is always called. */
855 if (prefix_mode
!= GE_OPT_PREFIX_BIG
856 && (ep
->X_op
== O_big
858 && (walk_no_bignums (ep
->X_add_symbol
)
860 && walk_no_bignums (ep
->X_op_symbol
))))))
862 inst
.error
= _("invalid constant");
863 *str
= input_line_pointer
;
864 input_line_pointer
= save_in
;
868 *str
= input_line_pointer
;
869 input_line_pointer
= save_in
;
873 /* Turn a string in input_line_pointer into a floating point constant
874 of type TYPE, and store the appropriate bytes in *LITP. The number
875 of LITTLENUMS emitted is stored in *SIZEP. An error message is
876 returned, or NULL on OK.
878 Note that fp constants aren't represent in the normal way on the ARM.
879 In big endian mode, things are as expected. However, in little endian
880 mode fp constants are big-endian word-wise, and little-endian byte-wise
881 within the words. For example, (double) 1.1 in big endian mode is
882 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
883 the byte sequence 99 99 f1 3f 9a 99 99 99.
885 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
888 md_atof (int type
, char * litP
, int * sizeP
)
891 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
923 return _("Unrecognized or unsupported floating point constant");
926 t
= atof_ieee (input_line_pointer
, type
, words
);
928 input_line_pointer
= t
;
929 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
931 if (target_big_endian
)
933 for (i
= 0; i
< prec
; i
++)
935 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
936 litP
+= sizeof (LITTLENUM_TYPE
);
941 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
942 for (i
= prec
- 1; i
>= 0; i
--)
944 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
945 litP
+= sizeof (LITTLENUM_TYPE
);
948 /* For a 4 byte float the order of elements in `words' is 1 0.
949 For an 8 byte float the order is 1 0 3 2. */
950 for (i
= 0; i
< prec
; i
+= 2)
952 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
953 sizeof (LITTLENUM_TYPE
));
954 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
955 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
956 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
963 /* We handle all bad expressions here, so that we can report the faulty
964 instruction in the error message. */
966 md_operand (expressionS
* expr
)
968 if (in_my_get_expression
)
969 expr
->X_op
= O_illegal
;
972 /* Immediate values. */
974 /* Generic immediate-value read function for use in directives.
975 Accepts anything that 'expression' can fold to a constant.
976 *val receives the number. */
979 immediate_for_directive (int *val
)
982 exp
.X_op
= O_illegal
;
984 if (is_immediate_prefix (*input_line_pointer
))
986 input_line_pointer
++;
990 if (exp
.X_op
!= O_constant
)
992 as_bad (_("expected #constant"));
993 ignore_rest_of_line ();
996 *val
= exp
.X_add_number
;
1001 /* Register parsing. */
1003 /* Generic register parser. CCP points to what should be the
1004 beginning of a register name. If it is indeed a valid register
1005 name, advance CCP over it and return the reg_entry structure;
1006 otherwise return NULL. Does not issue diagnostics. */
1008 static struct reg_entry
*
1009 arm_reg_parse_multi (char **ccp
)
1013 struct reg_entry
*reg
;
1015 #ifdef REGISTER_PREFIX
1016 if (*start
!= REGISTER_PREFIX
)
1020 #ifdef OPTIONAL_REGISTER_PREFIX
1021 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1026 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1031 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1033 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1043 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1044 enum arm_reg_type type
)
1046 /* Alternative syntaxes are accepted for a few register classes. */
1053 /* Generic coprocessor register names are allowed for these. */
1054 if (reg
&& reg
->type
== REG_TYPE_CN
)
1059 /* For backward compatibility, a bare number is valid here. */
1061 unsigned long processor
= strtoul (start
, ccp
, 10);
1062 if (*ccp
!= start
&& processor
<= 15)
1066 case REG_TYPE_MMXWC
:
1067 /* WC includes WCG. ??? I'm not sure this is true for all
1068 instructions that take WC registers. */
1069 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1080 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1081 return value is the register number or FAIL. */
1084 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1087 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1090 /* Do not allow a scalar (reg+index) to parse as a register. */
1091 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1094 if (reg
&& reg
->type
== type
)
1097 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1104 /* Parse a Neon type specifier. *STR should point at the leading '.'
1105 character. Does no verification at this stage that the type fits the opcode
1112 Can all be legally parsed by this function.
1114 Fills in neon_type struct pointer with parsed information, and updates STR
1115 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1116 type, FAIL if not. */
1119 parse_neon_type (struct neon_type
*type
, char **str
)
1126 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1128 enum neon_el_type thistype
= NT_untyped
;
1129 unsigned thissize
= -1u;
1136 /* Just a size without an explicit type. */
1140 switch (TOLOWER (*ptr
))
1142 case 'i': thistype
= NT_integer
; break;
1143 case 'f': thistype
= NT_float
; break;
1144 case 'p': thistype
= NT_poly
; break;
1145 case 's': thistype
= NT_signed
; break;
1146 case 'u': thistype
= NT_unsigned
; break;
1148 thistype
= NT_float
;
1153 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1159 /* .f is an abbreviation for .f32. */
1160 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1165 thissize
= strtoul (ptr
, &ptr
, 10);
1167 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1170 as_bad (_("bad size %d in type specifier"), thissize
);
1178 type
->el
[type
->elems
].type
= thistype
;
1179 type
->el
[type
->elems
].size
= thissize
;
1184 /* Empty/missing type is not a successful parse. */
1185 if (type
->elems
== 0)
1193 /* Errors may be set multiple times during parsing or bit encoding
1194 (particularly in the Neon bits), but usually the earliest error which is set
1195 will be the most meaningful. Avoid overwriting it with later (cascading)
1196 errors by calling this function. */
1199 first_error (const char *err
)
1205 /* Parse a single type, e.g. ".s32", leading period included. */
1207 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1210 struct neon_type optype
;
1214 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1216 if (optype
.elems
== 1)
1217 *vectype
= optype
.el
[0];
1220 first_error (_("only one type should be specified for operand"));
1226 first_error (_("vector type expected"));
1238 /* Special meanings for indices (which have a range of 0-7), which will fit into
1241 #define NEON_ALL_LANES 15
1242 #define NEON_INTERLEAVE_LANES 14
1244 /* Parse either a register or a scalar, with an optional type. Return the
1245 register number, and optionally fill in the actual type of the register
1246 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1247 type/index information in *TYPEINFO. */
1250 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1251 enum arm_reg_type
*rtype
,
1252 struct neon_typed_alias
*typeinfo
)
1255 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1256 struct neon_typed_alias atype
;
1257 struct neon_type_el parsetype
;
1261 atype
.eltype
.type
= NT_invtype
;
1262 atype
.eltype
.size
= -1;
1264 /* Try alternate syntax for some types of register. Note these are mutually
1265 exclusive with the Neon syntax extensions. */
1268 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1276 /* Undo polymorphism when a set of register types may be accepted. */
1277 if ((type
== REG_TYPE_NDQ
1278 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1279 || (type
== REG_TYPE_VFSD
1280 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1281 || (type
== REG_TYPE_NSDQ
1282 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1283 || reg
->type
== REG_TYPE_NQ
))
1284 || (type
== REG_TYPE_MMXWC
1285 && (reg
->type
== REG_TYPE_MMXWCG
)))
1288 if (type
!= reg
->type
)
1294 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1296 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1298 first_error (_("can't redefine type for operand"));
1301 atype
.defined
|= NTA_HASTYPE
;
1302 atype
.eltype
= parsetype
;
1305 if (skip_past_char (&str
, '[') == SUCCESS
)
1307 if (type
!= REG_TYPE_VFD
)
1309 first_error (_("only D registers may be indexed"));
1313 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1315 first_error (_("can't change index for operand"));
1319 atype
.defined
|= NTA_HASINDEX
;
1321 if (skip_past_char (&str
, ']') == SUCCESS
)
1322 atype
.index
= NEON_ALL_LANES
;
1327 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1329 if (exp
.X_op
!= O_constant
)
1331 first_error (_("constant expression required"));
1335 if (skip_past_char (&str
, ']') == FAIL
)
1338 atype
.index
= exp
.X_add_number
;
1353 /* Like arm_reg_parse, but allow allow the following extra features:
1354 - If RTYPE is non-zero, return the (possibly restricted) type of the
1355 register (e.g. Neon double or quad reg when either has been requested).
1356 - If this is a Neon vector type with additional type information, fill
1357 in the struct pointed to by VECTYPE (if non-NULL).
1358 This function will fault on encountering a scalar. */
1361 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1362 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1364 struct neon_typed_alias atype
;
1366 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1371 /* Do not allow a scalar (reg+index) to parse as a register. */
1372 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1374 first_error (_("register operand expected, but got scalar"));
1379 *vectype
= atype
.eltype
;
1386 #define NEON_SCALAR_REG(X) ((X) >> 4)
1387 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1389 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1390 have enough information to be able to do a good job bounds-checking. So, we
1391 just do easy checks here, and do further checks later. */
1394 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1398 struct neon_typed_alias atype
;
1400 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1402 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1405 if (atype
.index
== NEON_ALL_LANES
)
1407 first_error (_("scalar must have an index"));
1410 else if (atype
.index
>= 64 / elsize
)
1412 first_error (_("scalar index out of range"));
1417 *type
= atype
.eltype
;
1421 return reg
* 16 + atype
.index
;
1424 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1426 parse_reg_list (char ** strp
)
1428 char * str
= * strp
;
1432 /* We come back here if we get ranges concatenated by '+' or '|'. */
1447 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1449 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1459 first_error (_("bad range in register list"));
1463 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1465 if (range
& (1 << i
))
1467 (_("Warning: duplicated register (r%d) in register list"),
1475 if (range
& (1 << reg
))
1476 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1478 else if (reg
<= cur_reg
)
1479 as_tsktsk (_("Warning: register range not in ascending order"));
1484 while (skip_past_comma (&str
) != FAIL
1485 || (in_range
= 1, *str
++ == '-'));
1490 first_error (_("missing `}'"));
1498 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1501 if (expr
.X_op
== O_constant
)
1503 if (expr
.X_add_number
1504 != (expr
.X_add_number
& 0x0000ffff))
1506 inst
.error
= _("invalid register mask");
1510 if ((range
& expr
.X_add_number
) != 0)
1512 int regno
= range
& expr
.X_add_number
;
1515 regno
= (1 << regno
) - 1;
1517 (_("Warning: duplicated register (r%d) in register list"),
1521 range
|= expr
.X_add_number
;
1525 if (inst
.reloc
.type
!= 0)
1527 inst
.error
= _("expression too complex");
1531 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1532 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1533 inst
.reloc
.pc_rel
= 0;
1537 if (*str
== '|' || *str
== '+')
1543 while (another_range
);
1549 /* Types of registers in a list. */
1558 /* Parse a VFP register list. If the string is invalid return FAIL.
1559 Otherwise return the number of registers, and set PBASE to the first
1560 register. Parses registers of type ETYPE.
1561 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1562 - Q registers can be used to specify pairs of D registers
1563 - { } can be omitted from around a singleton register list
1564 FIXME: This is not implemented, as it would require backtracking in
1567 This could be done (the meaning isn't really ambiguous), but doesn't
1568 fit in well with the current parsing framework.
1569 - 32 D registers may be used (also true for VFPv3).
1570 FIXME: Types are ignored in these register lists, which is probably a
1574 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1579 enum arm_reg_type regtype
= 0;
1583 unsigned long mask
= 0;
1588 inst
.error
= _("expecting {");
1597 regtype
= REG_TYPE_VFS
;
1602 regtype
= REG_TYPE_VFD
;
1605 case REGLIST_NEON_D
:
1606 regtype
= REG_TYPE_NDQ
;
1610 if (etype
!= REGLIST_VFP_S
)
1612 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1613 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1617 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1620 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1627 base_reg
= max_regs
;
1631 int setmask
= 1, addregs
= 1;
1633 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1635 if (new_base
== FAIL
)
1637 first_error (_(reg_expected_msgs
[regtype
]));
1641 if (new_base
>= max_regs
)
1643 first_error (_("register out of range in list"));
1647 /* Note: a value of 2 * n is returned for the register Q<n>. */
1648 if (regtype
== REG_TYPE_NQ
)
1654 if (new_base
< base_reg
)
1655 base_reg
= new_base
;
1657 if (mask
& (setmask
<< new_base
))
1659 first_error (_("invalid register list"));
1663 if ((mask
>> new_base
) != 0 && ! warned
)
1665 as_tsktsk (_("register list not in ascending order"));
1669 mask
|= setmask
<< new_base
;
1672 if (*str
== '-') /* We have the start of a range expression */
1678 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1681 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1685 if (high_range
>= max_regs
)
1687 first_error (_("register out of range in list"));
1691 if (regtype
== REG_TYPE_NQ
)
1692 high_range
= high_range
+ 1;
1694 if (high_range
<= new_base
)
1696 inst
.error
= _("register range not in ascending order");
1700 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1702 if (mask
& (setmask
<< new_base
))
1704 inst
.error
= _("invalid register list");
1708 mask
|= setmask
<< new_base
;
1713 while (skip_past_comma (&str
) != FAIL
);
1717 /* Sanity check -- should have raised a parse error above. */
1718 if (count
== 0 || count
> max_regs
)
1723 /* Final test -- the registers must be consecutive. */
1725 for (i
= 0; i
< count
; i
++)
1727 if ((mask
& (1u << i
)) == 0)
1729 inst
.error
= _("non-contiguous register range");
1739 /* True if two alias types are the same. */
1742 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1750 if (a
->defined
!= b
->defined
)
1753 if ((a
->defined
& NTA_HASTYPE
) != 0
1754 && (a
->eltype
.type
!= b
->eltype
.type
1755 || a
->eltype
.size
!= b
->eltype
.size
))
1758 if ((a
->defined
& NTA_HASINDEX
) != 0
1759 && (a
->index
!= b
->index
))
1765 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1766 The base register is put in *PBASE.
1767 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1769 The register stride (minus one) is put in bit 4 of the return value.
1770 Bits [6:5] encode the list length (minus one).
1771 The type of the list elements is put in *ELTYPE, if non-NULL. */
1773 #define NEON_LANE(X) ((X) & 0xf)
1774 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1775 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1778 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1779 struct neon_type_el
*eltype
)
1786 int leading_brace
= 0;
1787 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1789 const char *const incr_error
= "register stride must be 1 or 2";
1790 const char *const type_error
= "mismatched element/structure types in list";
1791 struct neon_typed_alias firsttype
;
1793 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1798 struct neon_typed_alias atype
;
1799 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1803 first_error (_(reg_expected_msgs
[rtype
]));
1810 if (rtype
== REG_TYPE_NQ
)
1817 else if (reg_incr
== -1)
1819 reg_incr
= getreg
- base_reg
;
1820 if (reg_incr
< 1 || reg_incr
> 2)
1822 first_error (_(incr_error
));
1826 else if (getreg
!= base_reg
+ reg_incr
* count
)
1828 first_error (_(incr_error
));
1832 if (!neon_alias_types_same (&atype
, &firsttype
))
1834 first_error (_(type_error
));
1838 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1842 struct neon_typed_alias htype
;
1843 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1845 lane
= NEON_INTERLEAVE_LANES
;
1846 else if (lane
!= NEON_INTERLEAVE_LANES
)
1848 first_error (_(type_error
));
1853 else if (reg_incr
!= 1)
1855 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1859 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1862 first_error (_(reg_expected_msgs
[rtype
]));
1865 if (!neon_alias_types_same (&htype
, &firsttype
))
1867 first_error (_(type_error
));
1870 count
+= hireg
+ dregs
- getreg
;
1874 /* If we're using Q registers, we can't use [] or [n] syntax. */
1875 if (rtype
== REG_TYPE_NQ
)
1881 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1885 else if (lane
!= atype
.index
)
1887 first_error (_(type_error
));
1891 else if (lane
== -1)
1892 lane
= NEON_INTERLEAVE_LANES
;
1893 else if (lane
!= NEON_INTERLEAVE_LANES
)
1895 first_error (_(type_error
));
1900 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1902 /* No lane set by [x]. We must be interleaving structures. */
1904 lane
= NEON_INTERLEAVE_LANES
;
1907 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1908 || (count
> 1 && reg_incr
== -1))
1910 first_error (_("error parsing element/structure list"));
1914 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1916 first_error (_("expected }"));
1924 *eltype
= firsttype
.eltype
;
1929 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1932 /* Parse an explicit relocation suffix on an expression. This is
1933 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1934 arm_reloc_hsh contains no entries, so this function can only
1935 succeed if there is no () after the word. Returns -1 on error,
1936 BFD_RELOC_UNUSED if there wasn't any suffix. */
1938 parse_reloc (char **str
)
1940 struct reloc_entry
*r
;
1944 return BFD_RELOC_UNUSED
;
1949 while (*q
&& *q
!= ')' && *q
!= ',')
1954 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1961 /* Directives: register aliases. */
1963 static struct reg_entry
*
1964 insert_reg_alias (char *str
, int number
, int type
)
1966 struct reg_entry
*new;
1969 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1972 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1974 /* Only warn about a redefinition if it's not defined as the
1976 else if (new->number
!= number
|| new->type
!= type
)
1977 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1982 name
= xstrdup (str
);
1983 new = xmalloc (sizeof (struct reg_entry
));
1986 new->number
= number
;
1988 new->builtin
= FALSE
;
1991 if (hash_insert (arm_reg_hsh
, name
, (void *) new))
1998 insert_neon_reg_alias (char *str
, int number
, int type
,
1999 struct neon_typed_alias
*atype
)
2001 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2005 first_error (_("attempt to redefine typed alias"));
2011 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2012 *reg
->neon
= *atype
;
2016 /* Look for the .req directive. This is of the form:
2018 new_register_name .req existing_register_name
2020 If we find one, or if it looks sufficiently like one that we want to
2021 handle any error here, return TRUE. Otherwise return FALSE. */
2024 create_register_alias (char * newname
, char *p
)
2026 struct reg_entry
*old
;
2027 char *oldname
, *nbuf
;
2030 /* The input scrubber ensures that whitespace after the mnemonic is
2031 collapsed to single spaces. */
2033 if (strncmp (oldname
, " .req ", 6) != 0)
2037 if (*oldname
== '\0')
2040 old
= hash_find (arm_reg_hsh
, oldname
);
2043 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2047 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2048 the desired alias name, and p points to its end. If not, then
2049 the desired alias name is in the global original_case_string. */
2050 #ifdef TC_CASE_SENSITIVE
2053 newname
= original_case_string
;
2054 nlen
= strlen (newname
);
2057 nbuf
= alloca (nlen
+ 1);
2058 memcpy (nbuf
, newname
, nlen
);
2061 /* Create aliases under the new name as stated; an all-lowercase
2062 version of the new name; and an all-uppercase version of the new
2064 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2066 for (p
= nbuf
; *p
; p
++)
2069 if (strncmp (nbuf
, newname
, nlen
))
2071 /* If this attempt to create an additional alias fails, do not bother
2072 trying to create the all-lower case alias. We will fail and issue
2073 a second, duplicate error message. This situation arises when the
2074 programmer does something like:
2077 The second .req creates the "Foo" alias but then fails to create
2078 the artificial FOO alias because it has already been created by the
2080 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2084 for (p
= nbuf
; *p
; p
++)
2087 if (strncmp (nbuf
, newname
, nlen
))
2088 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2094 /* Create a Neon typed/indexed register alias using directives, e.g.:
2099 These typed registers can be used instead of the types specified after the
2100 Neon mnemonic, so long as all operands given have types. Types can also be
2101 specified directly, e.g.:
2102 vadd d0.s32, d1.s32, d2.s32 */
2105 create_neon_reg_alias (char *newname
, char *p
)
2107 enum arm_reg_type basetype
;
2108 struct reg_entry
*basereg
;
2109 struct reg_entry mybasereg
;
2110 struct neon_type ntype
;
2111 struct neon_typed_alias typeinfo
;
2112 char *namebuf
, *nameend
;
2115 typeinfo
.defined
= 0;
2116 typeinfo
.eltype
.type
= NT_invtype
;
2117 typeinfo
.eltype
.size
= -1;
2118 typeinfo
.index
= -1;
2122 if (strncmp (p
, " .dn ", 5) == 0)
2123 basetype
= REG_TYPE_VFD
;
2124 else if (strncmp (p
, " .qn ", 5) == 0)
2125 basetype
= REG_TYPE_NQ
;
2134 basereg
= arm_reg_parse_multi (&p
);
2136 if (basereg
&& basereg
->type
!= basetype
)
2138 as_bad (_("bad type for register"));
2142 if (basereg
== NULL
)
2145 /* Try parsing as an integer. */
2146 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2147 if (exp
.X_op
!= O_constant
)
2149 as_bad (_("expression must be constant"));
2152 basereg
= &mybasereg
;
2153 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2159 typeinfo
= *basereg
->neon
;
2161 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2163 /* We got a type. */
2164 if (typeinfo
.defined
& NTA_HASTYPE
)
2166 as_bad (_("can't redefine the type of a register alias"));
2170 typeinfo
.defined
|= NTA_HASTYPE
;
2171 if (ntype
.elems
!= 1)
2173 as_bad (_("you must specify a single type only"));
2176 typeinfo
.eltype
= ntype
.el
[0];
2179 if (skip_past_char (&p
, '[') == SUCCESS
)
2182 /* We got a scalar index. */
2184 if (typeinfo
.defined
& NTA_HASINDEX
)
2186 as_bad (_("can't redefine the index of a scalar alias"));
2190 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2192 if (exp
.X_op
!= O_constant
)
2194 as_bad (_("scalar index must be constant"));
2198 typeinfo
.defined
|= NTA_HASINDEX
;
2199 typeinfo
.index
= exp
.X_add_number
;
2201 if (skip_past_char (&p
, ']') == FAIL
)
2203 as_bad (_("expecting ]"));
2208 namelen
= nameend
- newname
;
2209 namebuf
= alloca (namelen
+ 1);
2210 strncpy (namebuf
, newname
, namelen
);
2211 namebuf
[namelen
] = '\0';
2213 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2214 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2216 /* Insert name in all uppercase. */
2217 for (p
= namebuf
; *p
; p
++)
2220 if (strncmp (namebuf
, newname
, namelen
))
2221 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2222 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2224 /* Insert name in all lowercase. */
2225 for (p
= namebuf
; *p
; p
++)
2228 if (strncmp (namebuf
, newname
, namelen
))
2229 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2230 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2235 /* Should never be called, as .req goes between the alias and the
2236 register name, not at the beginning of the line. */
2238 s_req (int a ATTRIBUTE_UNUSED
)
2240 as_bad (_("invalid syntax for .req directive"));
2244 s_dn (int a ATTRIBUTE_UNUSED
)
2246 as_bad (_("invalid syntax for .dn directive"));
2250 s_qn (int a ATTRIBUTE_UNUSED
)
2252 as_bad (_("invalid syntax for .qn directive"));
2255 /* The .unreq directive deletes an alias which was previously defined
2256 by .req. For example:
2262 s_unreq (int a ATTRIBUTE_UNUSED
)
2267 name
= input_line_pointer
;
2269 while (*input_line_pointer
!= 0
2270 && *input_line_pointer
!= ' '
2271 && *input_line_pointer
!= '\n')
2272 ++input_line_pointer
;
2274 saved_char
= *input_line_pointer
;
2275 *input_line_pointer
= 0;
2278 as_bad (_("invalid syntax for .unreq directive"));
2281 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2284 as_bad (_("unknown register alias '%s'"), name
);
2285 else if (reg
->builtin
)
2286 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2293 hash_delete (arm_reg_hsh
, name
, FALSE
);
2294 free ((char *) reg
->name
);
2299 /* Also locate the all upper case and all lower case versions.
2300 Do not complain if we cannot find one or the other as it
2301 was probably deleted above. */
2303 nbuf
= strdup (name
);
2304 for (p
= nbuf
; *p
; p
++)
2306 reg
= hash_find (arm_reg_hsh
, nbuf
);
2309 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2310 free ((char *) reg
->name
);
2316 for (p
= nbuf
; *p
; p
++)
2318 reg
= hash_find (arm_reg_hsh
, nbuf
);
2321 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2322 free ((char *) reg
->name
);
2332 *input_line_pointer
= saved_char
;
2333 demand_empty_rest_of_line ();
2336 /* Directives: Instruction set selection. */
2339 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2340 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2341 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2342 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2344 static enum mstate mapstate
= MAP_UNDEFINED
;
2347 mapping_state (enum mstate state
)
2350 const char * symname
;
2353 if (mapstate
== state
)
2354 /* The mapping symbol has already been emitted.
2355 There is nothing else to do. */
2364 type
= BSF_NO_FLAGS
;
2368 type
= BSF_NO_FLAGS
;
2372 type
= BSF_NO_FLAGS
;
2380 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2382 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2383 symbol_table_insert (symbolP
);
2384 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2389 THUMB_SET_FUNC (symbolP
, 0);
2390 ARM_SET_THUMB (symbolP
, 0);
2391 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2395 THUMB_SET_FUNC (symbolP
, 1);
2396 ARM_SET_THUMB (symbolP
, 1);
2397 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2406 #define mapping_state(x) /* nothing */
2409 /* Find the real, Thumb encoded start of a Thumb function. */
2412 find_real_start (symbolS
* symbolP
)
2415 const char * name
= S_GET_NAME (symbolP
);
2416 symbolS
* new_target
;
2418 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2419 #define STUB_NAME ".real_start_of"
2424 /* The compiler may generate BL instructions to local labels because
2425 it needs to perform a branch to a far away location. These labels
2426 do not have a corresponding ".real_start_of" label. We check
2427 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2428 the ".real_start_of" convention for nonlocal branches. */
2429 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2432 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2433 new_target
= symbol_find (real_start
);
2435 if (new_target
== NULL
)
2437 as_warn (_("Failed to find real start of function: %s\n"), name
);
2438 new_target
= symbolP
;
2445 opcode_select (int width
)
2452 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2453 as_bad (_("selected processor does not support THUMB opcodes"));
2456 /* No need to force the alignment, since we will have been
2457 coming from ARM mode, which is word-aligned. */
2458 record_alignment (now_seg
, 1);
2460 mapping_state (MAP_THUMB
);
2466 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2467 as_bad (_("selected processor does not support ARM opcodes"));
2472 frag_align (2, 0, 0);
2474 record_alignment (now_seg
, 1);
2476 mapping_state (MAP_ARM
);
2480 as_bad (_("invalid instruction size selected (%d)"), width
);
2485 s_arm (int ignore ATTRIBUTE_UNUSED
)
2488 demand_empty_rest_of_line ();
2492 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2495 demand_empty_rest_of_line ();
2499 s_code (int unused ATTRIBUTE_UNUSED
)
2503 temp
= get_absolute_expression ();
2508 opcode_select (temp
);
2512 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2517 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2519 /* If we are not already in thumb mode go into it, EVEN if
2520 the target processor does not support thumb instructions.
2521 This is used by gcc/config/arm/lib1funcs.asm for example
2522 to compile interworking support functions even if the
2523 target processor should not support interworking. */
2527 record_alignment (now_seg
, 1);
2530 demand_empty_rest_of_line ();
2534 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2538 /* The following label is the name/address of the start of a Thumb function.
2539 We need to know this for the interworking support. */
2540 label_is_thumb_function_name
= TRUE
;
2543 /* Perform a .set directive, but also mark the alias as
2544 being a thumb function. */
2547 s_thumb_set (int equiv
)
2549 /* XXX the following is a duplicate of the code for s_set() in read.c
2550 We cannot just call that code as we need to get at the symbol that
2557 /* Especial apologies for the random logic:
2558 This just grew, and could be parsed much more simply!
2560 name
= input_line_pointer
;
2561 delim
= get_symbol_end ();
2562 end_name
= input_line_pointer
;
2565 if (*input_line_pointer
!= ',')
2568 as_bad (_("expected comma after name \"%s\""), name
);
2570 ignore_rest_of_line ();
2574 input_line_pointer
++;
2577 if (name
[0] == '.' && name
[1] == '\0')
2579 /* XXX - this should not happen to .thumb_set. */
2583 if ((symbolP
= symbol_find (name
)) == NULL
2584 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2587 /* When doing symbol listings, play games with dummy fragments living
2588 outside the normal fragment chain to record the file and line info
2590 if (listing
& LISTING_SYMBOLS
)
2592 extern struct list_info_struct
* listing_tail
;
2593 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2595 memset (dummy_frag
, 0, sizeof (fragS
));
2596 dummy_frag
->fr_type
= rs_fill
;
2597 dummy_frag
->line
= listing_tail
;
2598 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2599 dummy_frag
->fr_symbol
= symbolP
;
2603 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2606 /* "set" symbols are local unless otherwise specified. */
2607 SF_SET_LOCAL (symbolP
);
2608 #endif /* OBJ_COFF */
2609 } /* Make a new symbol. */
2611 symbol_table_insert (symbolP
);
2616 && S_IS_DEFINED (symbolP
)
2617 && S_GET_SEGMENT (symbolP
) != reg_section
)
2618 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2620 pseudo_set (symbolP
);
2622 demand_empty_rest_of_line ();
2624 /* XXX Now we come to the Thumb specific bit of code. */
2626 THUMB_SET_FUNC (symbolP
, 1);
2627 ARM_SET_THUMB (symbolP
, 1);
2628 #if defined OBJ_ELF || defined OBJ_COFF
2629 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2633 /* Directives: Mode selection. */
2635 /* .syntax [unified|divided] - choose the new unified syntax
2636 (same for Arm and Thumb encoding, modulo slight differences in what
2637 can be represented) or the old divergent syntax for each mode. */
2639 s_syntax (int unused ATTRIBUTE_UNUSED
)
2643 name
= input_line_pointer
;
2644 delim
= get_symbol_end ();
2646 if (!strcasecmp (name
, "unified"))
2647 unified_syntax
= TRUE
;
2648 else if (!strcasecmp (name
, "divided"))
2649 unified_syntax
= FALSE
;
2652 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2655 *input_line_pointer
= delim
;
2656 demand_empty_rest_of_line ();
2659 /* Directives: sectioning and alignment. */
2661 /* Same as s_align_ptwo but align 0 => align 2. */
2664 s_align (int unused ATTRIBUTE_UNUSED
)
2669 long max_alignment
= 15;
2671 temp
= get_absolute_expression ();
2672 if (temp
> max_alignment
)
2673 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2676 as_bad (_("alignment negative. 0 assumed."));
2680 if (*input_line_pointer
== ',')
2682 input_line_pointer
++;
2683 temp_fill
= get_absolute_expression ();
2695 /* Only make a frag if we HAVE to. */
2696 if (temp
&& !need_pass_2
)
2698 if (!fill_p
&& subseg_text_p (now_seg
))
2699 frag_align_code (temp
, 0);
2701 frag_align (temp
, (int) temp_fill
, 0);
2703 demand_empty_rest_of_line ();
2705 record_alignment (now_seg
, temp
);
2709 s_bss (int ignore ATTRIBUTE_UNUSED
)
2711 /* We don't support putting frags in the BSS segment, we fake it by
2712 marking in_bss, then looking at s_skip for clues. */
2713 subseg_set (bss_section
, 0);
2714 demand_empty_rest_of_line ();
2715 mapping_state (MAP_DATA
);
2719 s_even (int ignore ATTRIBUTE_UNUSED
)
2721 /* Never make frag if expect extra pass. */
2723 frag_align (1, 0, 0);
2725 record_alignment (now_seg
, 1);
2727 demand_empty_rest_of_line ();
2730 /* Directives: Literal pools. */
2732 static literal_pool
*
2733 find_literal_pool (void)
2735 literal_pool
* pool
;
2737 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2739 if (pool
->section
== now_seg
2740 && pool
->sub_section
== now_subseg
)
2747 static literal_pool
*
2748 find_or_make_literal_pool (void)
2750 /* Next literal pool ID number. */
2751 static unsigned int latest_pool_num
= 1;
2752 literal_pool
* pool
;
2754 pool
= find_literal_pool ();
2758 /* Create a new pool. */
2759 pool
= xmalloc (sizeof (* pool
));
2763 pool
->next_free_entry
= 0;
2764 pool
->section
= now_seg
;
2765 pool
->sub_section
= now_subseg
;
2766 pool
->next
= list_of_pools
;
2767 pool
->symbol
= NULL
;
2769 /* Add it to the list. */
2770 list_of_pools
= pool
;
2773 /* New pools, and emptied pools, will have a NULL symbol. */
2774 if (pool
->symbol
== NULL
)
2776 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2777 (valueT
) 0, &zero_address_frag
);
2778 pool
->id
= latest_pool_num
++;
2785 /* Add the literal in the global 'inst'
2786 structure to the relevant literal pool. */
2789 add_to_lit_pool (void)
2791 literal_pool
* pool
;
2794 pool
= find_or_make_literal_pool ();
2796 /* Check if this literal value is already in the pool. */
2797 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2799 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2800 && (inst
.reloc
.exp
.X_op
== O_constant
)
2801 && (pool
->literals
[entry
].X_add_number
2802 == inst
.reloc
.exp
.X_add_number
)
2803 && (pool
->literals
[entry
].X_unsigned
2804 == inst
.reloc
.exp
.X_unsigned
))
2807 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2808 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2809 && (pool
->literals
[entry
].X_add_number
2810 == inst
.reloc
.exp
.X_add_number
)
2811 && (pool
->literals
[entry
].X_add_symbol
2812 == inst
.reloc
.exp
.X_add_symbol
)
2813 && (pool
->literals
[entry
].X_op_symbol
2814 == inst
.reloc
.exp
.X_op_symbol
))
2818 /* Do we need to create a new entry? */
2819 if (entry
== pool
->next_free_entry
)
2821 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2823 inst
.error
= _("literal pool overflow");
2827 pool
->literals
[entry
] = inst
.reloc
.exp
;
2828 pool
->next_free_entry
+= 1;
2831 inst
.reloc
.exp
.X_op
= O_symbol
;
2832 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2833 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2838 /* Can't use symbol_new here, so have to create a symbol and then at
2839 a later date assign it a value. Thats what these functions do. */
2842 symbol_locate (symbolS
* symbolP
,
2843 const char * name
, /* It is copied, the caller can modify. */
2844 segT segment
, /* Segment identifier (SEG_<something>). */
2845 valueT valu
, /* Symbol value. */
2846 fragS
* frag
) /* Associated fragment. */
2848 unsigned int name_length
;
2849 char * preserved_copy_of_name
;
2851 name_length
= strlen (name
) + 1; /* +1 for \0. */
2852 obstack_grow (¬es
, name
, name_length
);
2853 preserved_copy_of_name
= obstack_finish (¬es
);
2855 #ifdef tc_canonicalize_symbol_name
2856 preserved_copy_of_name
=
2857 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2860 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2862 S_SET_SEGMENT (symbolP
, segment
);
2863 S_SET_VALUE (symbolP
, valu
);
2864 symbol_clear_list_pointers (symbolP
);
2866 symbol_set_frag (symbolP
, frag
);
2868 /* Link to end of symbol chain. */
2870 extern int symbol_table_frozen
;
2872 if (symbol_table_frozen
)
2876 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2878 obj_symbol_new_hook (symbolP
);
2880 #ifdef tc_symbol_new_hook
2881 tc_symbol_new_hook (symbolP
);
2885 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2886 #endif /* DEBUG_SYMS */
2891 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2894 literal_pool
* pool
;
2897 pool
= find_literal_pool ();
2899 || pool
->symbol
== NULL
2900 || pool
->next_free_entry
== 0)
2903 mapping_state (MAP_DATA
);
2905 /* Align pool as you have word accesses.
2906 Only make a frag if we have to. */
2908 frag_align (2, 0, 0);
2910 record_alignment (now_seg
, 2);
2912 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2914 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2915 (valueT
) frag_now_fix (), frag_now
);
2916 symbol_table_insert (pool
->symbol
);
2918 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2920 #if defined OBJ_COFF || defined OBJ_ELF
2921 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2924 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2925 /* First output the expression in the instruction to the pool. */
2926 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2928 /* Mark the pool as empty. */
2929 pool
->next_free_entry
= 0;
2930 pool
->symbol
= NULL
;
2934 /* Forward declarations for functions below, in the MD interface
2936 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2937 static valueT
create_unwind_entry (int);
2938 static void start_unwind_section (const segT
, int);
2939 static void add_unwind_opcode (valueT
, int);
2940 static void flush_pending_unwind (void);
2942 /* Directives: Data. */
2945 s_arm_elf_cons (int nbytes
)
2949 #ifdef md_flush_pending_output
2950 md_flush_pending_output ();
2953 if (is_it_end_of_statement ())
2955 demand_empty_rest_of_line ();
2959 #ifdef md_cons_align
2960 md_cons_align (nbytes
);
2963 mapping_state (MAP_DATA
);
2967 char *base
= input_line_pointer
;
2971 if (exp
.X_op
!= O_symbol
)
2972 emit_expr (&exp
, (unsigned int) nbytes
);
2975 char *before_reloc
= input_line_pointer
;
2976 reloc
= parse_reloc (&input_line_pointer
);
2979 as_bad (_("unrecognized relocation suffix"));
2980 ignore_rest_of_line ();
2983 else if (reloc
== BFD_RELOC_UNUSED
)
2984 emit_expr (&exp
, (unsigned int) nbytes
);
2987 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2988 int size
= bfd_get_reloc_size (howto
);
2990 if (reloc
== BFD_RELOC_ARM_PLT32
)
2992 as_bad (_("(plt) is only valid on branch targets"));
2993 reloc
= BFD_RELOC_UNUSED
;
2998 as_bad (_("%s relocations do not fit in %d bytes"),
2999 howto
->name
, nbytes
);
3002 /* We've parsed an expression stopping at O_symbol.
3003 But there may be more expression left now that we
3004 have parsed the relocation marker. Parse it again.
3005 XXX Surely there is a cleaner way to do this. */
3006 char *p
= input_line_pointer
;
3008 char *save_buf
= alloca (input_line_pointer
- base
);
3009 memcpy (save_buf
, base
, input_line_pointer
- base
);
3010 memmove (base
+ (input_line_pointer
- before_reloc
),
3011 base
, before_reloc
- base
);
3013 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3015 memcpy (base
, save_buf
, p
- base
);
3017 offset
= nbytes
- size
;
3018 p
= frag_more ((int) nbytes
);
3019 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3020 size
, &exp
, 0, reloc
);
3025 while (*input_line_pointer
++ == ',');
3027 /* Put terminator back into stream. */
3028 input_line_pointer
--;
3029 demand_empty_rest_of_line ();
3033 /* Parse a .rel31 directive. */
3036 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3043 if (*input_line_pointer
== '1')
3044 highbit
= 0x80000000;
3045 else if (*input_line_pointer
!= '0')
3046 as_bad (_("expected 0 or 1"));
3048 input_line_pointer
++;
3049 if (*input_line_pointer
!= ',')
3050 as_bad (_("missing comma"));
3051 input_line_pointer
++;
3053 #ifdef md_flush_pending_output
3054 md_flush_pending_output ();
3057 #ifdef md_cons_align
3061 mapping_state (MAP_DATA
);
3066 md_number_to_chars (p
, highbit
, 4);
3067 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3068 BFD_RELOC_ARM_PREL31
);
3070 demand_empty_rest_of_line ();
3073 /* Directives: AEABI stack-unwind tables. */
3075 /* Parse an unwind_fnstart directive. Simply records the current location. */
3078 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3080 demand_empty_rest_of_line ();
3081 /* Mark the start of the function. */
3082 unwind
.proc_start
= expr_build_dot ();
3084 /* Reset the rest of the unwind info. */
3085 unwind
.opcode_count
= 0;
3086 unwind
.table_entry
= NULL
;
3087 unwind
.personality_routine
= NULL
;
3088 unwind
.personality_index
= -1;
3089 unwind
.frame_size
= 0;
3090 unwind
.fp_offset
= 0;
3093 unwind
.sp_restored
= 0;
3097 /* Parse a handlerdata directive. Creates the exception handling table entry
3098 for the function. */
3101 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3103 demand_empty_rest_of_line ();
3104 if (unwind
.table_entry
)
3105 as_bad (_("duplicate .handlerdata directive"));
3107 create_unwind_entry (1);
3110 /* Parse an unwind_fnend directive. Generates the index table entry. */
3113 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3119 demand_empty_rest_of_line ();
3121 /* Add eh table entry. */
3122 if (unwind
.table_entry
== NULL
)
3123 val
= create_unwind_entry (0);
3127 /* Add index table entry. This is two words. */
3128 start_unwind_section (unwind
.saved_seg
, 1);
3129 frag_align (2, 0, 0);
3130 record_alignment (now_seg
, 2);
3132 ptr
= frag_more (8);
3133 where
= frag_now_fix () - 8;
3135 /* Self relative offset of the function start. */
3136 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3137 BFD_RELOC_ARM_PREL31
);
3139 /* Indicate dependency on EHABI-defined personality routines to the
3140 linker, if it hasn't been done already. */
3141 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3142 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3144 static const char *const name
[] =
3146 "__aeabi_unwind_cpp_pr0",
3147 "__aeabi_unwind_cpp_pr1",
3148 "__aeabi_unwind_cpp_pr2"
3150 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3151 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3152 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3153 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3154 = marked_pr_dependency
;
3158 /* Inline exception table entry. */
3159 md_number_to_chars (ptr
+ 4, val
, 4);
3161 /* Self relative offset of the table entry. */
3162 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3163 BFD_RELOC_ARM_PREL31
);
3165 /* Restore the original section. */
3166 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3170 /* Parse an unwind_cantunwind directive. */
3173 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3175 demand_empty_rest_of_line ();
3176 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3177 as_bad (_("personality routine specified for cantunwind frame"));
3179 unwind
.personality_index
= -2;
3183 /* Parse a personalityindex directive. */
3186 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3190 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3191 as_bad (_("duplicate .personalityindex directive"));
3195 if (exp
.X_op
!= O_constant
3196 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3198 as_bad (_("bad personality routine number"));
3199 ignore_rest_of_line ();
3203 unwind
.personality_index
= exp
.X_add_number
;
3205 demand_empty_rest_of_line ();
3209 /* Parse a personality directive. */
3212 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3216 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3217 as_bad (_("duplicate .personality directive"));
3219 name
= input_line_pointer
;
3220 c
= get_symbol_end ();
3221 p
= input_line_pointer
;
3222 unwind
.personality_routine
= symbol_find_or_make (name
);
3224 demand_empty_rest_of_line ();
3228 /* Parse a directive saving core registers. */
3231 s_arm_unwind_save_core (void)
3237 range
= parse_reg_list (&input_line_pointer
);
3240 as_bad (_("expected register list"));
3241 ignore_rest_of_line ();
3245 demand_empty_rest_of_line ();
3247 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3248 into .unwind_save {..., sp...}. We aren't bothered about the value of
3249 ip because it is clobbered by calls. */
3250 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3251 && (range
& 0x3000) == 0x1000)
3253 unwind
.opcode_count
--;
3254 unwind
.sp_restored
= 0;
3255 range
= (range
| 0x2000) & ~0x1000;
3256 unwind
.pending_offset
= 0;
3262 /* See if we can use the short opcodes. These pop a block of up to 8
3263 registers starting with r4, plus maybe r14. */
3264 for (n
= 0; n
< 8; n
++)
3266 /* Break at the first non-saved register. */
3267 if ((range
& (1 << (n
+ 4))) == 0)
3270 /* See if there are any other bits set. */
3271 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3273 /* Use the long form. */
3274 op
= 0x8000 | ((range
>> 4) & 0xfff);
3275 add_unwind_opcode (op
, 2);
3279 /* Use the short form. */
3281 op
= 0xa8; /* Pop r14. */
3283 op
= 0xa0; /* Do not pop r14. */
3285 add_unwind_opcode (op
, 1);
3292 op
= 0xb100 | (range
& 0xf);
3293 add_unwind_opcode (op
, 2);
3296 /* Record the number of bytes pushed. */
3297 for (n
= 0; n
< 16; n
++)
3299 if (range
& (1 << n
))
3300 unwind
.frame_size
+= 4;
3305 /* Parse a directive saving FPA registers. */
3308 s_arm_unwind_save_fpa (int reg
)
3314 /* Get Number of registers to transfer. */
3315 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3318 exp
.X_op
= O_illegal
;
3320 if (exp
.X_op
!= O_constant
)
3322 as_bad (_("expected , <constant>"));
3323 ignore_rest_of_line ();
3327 num_regs
= exp
.X_add_number
;
3329 if (num_regs
< 1 || num_regs
> 4)
3331 as_bad (_("number of registers must be in the range [1:4]"));
3332 ignore_rest_of_line ();
3336 demand_empty_rest_of_line ();
3341 op
= 0xb4 | (num_regs
- 1);
3342 add_unwind_opcode (op
, 1);
3347 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3348 add_unwind_opcode (op
, 2);
3350 unwind
.frame_size
+= num_regs
* 12;
3354 /* Parse a directive saving VFP registers for ARMv6 and above. */
3357 s_arm_unwind_save_vfp_armv6 (void)
3362 int num_vfpv3_regs
= 0;
3363 int num_regs_below_16
;
3365 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3368 as_bad (_("expected register list"));
3369 ignore_rest_of_line ();
3373 demand_empty_rest_of_line ();
3375 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3376 than FSTMX/FLDMX-style ones). */
3378 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3380 num_vfpv3_regs
= count
;
3381 else if (start
+ count
> 16)
3382 num_vfpv3_regs
= start
+ count
- 16;
3384 if (num_vfpv3_regs
> 0)
3386 int start_offset
= start
> 16 ? start
- 16 : 0;
3387 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3388 add_unwind_opcode (op
, 2);
3391 /* Generate opcode for registers numbered in the range 0 .. 15. */
3392 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3393 assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3394 if (num_regs_below_16
> 0)
3396 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3397 add_unwind_opcode (op
, 2);
3400 unwind
.frame_size
+= count
* 8;
3404 /* Parse a directive saving VFP registers for pre-ARMv6. */
3407 s_arm_unwind_save_vfp (void)
3413 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3416 as_bad (_("expected register list"));
3417 ignore_rest_of_line ();
3421 demand_empty_rest_of_line ();
3426 op
= 0xb8 | (count
- 1);
3427 add_unwind_opcode (op
, 1);
3432 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3433 add_unwind_opcode (op
, 2);
3435 unwind
.frame_size
+= count
* 8 + 4;
3439 /* Parse a directive saving iWMMXt data registers. */
3442 s_arm_unwind_save_mmxwr (void)
3450 if (*input_line_pointer
== '{')
3451 input_line_pointer
++;
3455 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3459 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3464 as_tsktsk (_("register list not in ascending order"));
3467 if (*input_line_pointer
== '-')
3469 input_line_pointer
++;
3470 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3473 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3476 else if (reg
>= hi_reg
)
3478 as_bad (_("bad register range"));
3481 for (; reg
< hi_reg
; reg
++)
3485 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3487 if (*input_line_pointer
== '}')
3488 input_line_pointer
++;
3490 demand_empty_rest_of_line ();
3492 /* Generate any deferred opcodes because we're going to be looking at
3494 flush_pending_unwind ();
3496 for (i
= 0; i
< 16; i
++)
3498 if (mask
& (1 << i
))
3499 unwind
.frame_size
+= 8;
3502 /* Attempt to combine with a previous opcode. We do this because gcc
3503 likes to output separate unwind directives for a single block of
3505 if (unwind
.opcode_count
> 0)
3507 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3508 if ((i
& 0xf8) == 0xc0)
3511 /* Only merge if the blocks are contiguous. */
3514 if ((mask
& 0xfe00) == (1 << 9))
3516 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3517 unwind
.opcode_count
--;
3520 else if (i
== 6 && unwind
.opcode_count
>= 2)
3522 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3526 op
= 0xffff << (reg
- 1);
3528 && ((mask
& op
) == (1u << (reg
- 1))))
3530 op
= (1 << (reg
+ i
+ 1)) - 1;
3531 op
&= ~((1 << reg
) - 1);
3533 unwind
.opcode_count
-= 2;
3540 /* We want to generate opcodes in the order the registers have been
3541 saved, ie. descending order. */
3542 for (reg
= 15; reg
>= -1; reg
--)
3544 /* Save registers in blocks. */
3546 || !(mask
& (1 << reg
)))
3548 /* We found an unsaved reg. Generate opcodes to save the
3555 op
= 0xc0 | (hi_reg
- 10);
3556 add_unwind_opcode (op
, 1);
3561 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3562 add_unwind_opcode (op
, 2);
3571 ignore_rest_of_line ();
3575 s_arm_unwind_save_mmxwcg (void)
3582 if (*input_line_pointer
== '{')
3583 input_line_pointer
++;
3587 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3591 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3597 as_tsktsk (_("register list not in ascending order"));
3600 if (*input_line_pointer
== '-')
3602 input_line_pointer
++;
3603 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3606 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3609 else if (reg
>= hi_reg
)
3611 as_bad (_("bad register range"));
3614 for (; reg
< hi_reg
; reg
++)
3618 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3620 if (*input_line_pointer
== '}')
3621 input_line_pointer
++;
3623 demand_empty_rest_of_line ();
3625 /* Generate any deferred opcodes because we're going to be looking at
3627 flush_pending_unwind ();
3629 for (reg
= 0; reg
< 16; reg
++)
3631 if (mask
& (1 << reg
))
3632 unwind
.frame_size
+= 4;
3635 add_unwind_opcode (op
, 2);
3638 ignore_rest_of_line ();
3642 /* Parse an unwind_save directive.
3643 If the argument is non-zero, this is a .vsave directive. */
3646 s_arm_unwind_save (int arch_v6
)
3649 struct reg_entry
*reg
;
3650 bfd_boolean had_brace
= FALSE
;
3652 /* Figure out what sort of save we have. */
3653 peek
= input_line_pointer
;
3661 reg
= arm_reg_parse_multi (&peek
);
3665 as_bad (_("register expected"));
3666 ignore_rest_of_line ();
3675 as_bad (_("FPA .unwind_save does not take a register list"));
3676 ignore_rest_of_line ();
3679 input_line_pointer
= peek
;
3680 s_arm_unwind_save_fpa (reg
->number
);
3683 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3686 s_arm_unwind_save_vfp_armv6 ();
3688 s_arm_unwind_save_vfp ();
3690 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3691 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3694 as_bad (_(".unwind_save does not support this kind of register"));
3695 ignore_rest_of_line ();
3700 /* Parse an unwind_movsp directive. */
3703 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3709 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3712 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3713 ignore_rest_of_line ();
3717 /* Optional constant. */
3718 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3720 if (immediate_for_directive (&offset
) == FAIL
)
3726 demand_empty_rest_of_line ();
3728 if (reg
== REG_SP
|| reg
== REG_PC
)
3730 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3734 if (unwind
.fp_reg
!= REG_SP
)
3735 as_bad (_("unexpected .unwind_movsp directive"));
3737 /* Generate opcode to restore the value. */
3739 add_unwind_opcode (op
, 1);
3741 /* Record the information for later. */
3742 unwind
.fp_reg
= reg
;
3743 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3744 unwind
.sp_restored
= 1;
3747 /* Parse an unwind_pad directive. */
3750 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3754 if (immediate_for_directive (&offset
) == FAIL
)
3759 as_bad (_("stack increment must be multiple of 4"));
3760 ignore_rest_of_line ();
3764 /* Don't generate any opcodes, just record the details for later. */
3765 unwind
.frame_size
+= offset
;
3766 unwind
.pending_offset
+= offset
;
3768 demand_empty_rest_of_line ();
3771 /* Parse an unwind_setfp directive. */
3774 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3780 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3781 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3784 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3786 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3788 as_bad (_("expected <reg>, <reg>"));
3789 ignore_rest_of_line ();
3793 /* Optional constant. */
3794 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3796 if (immediate_for_directive (&offset
) == FAIL
)
3802 demand_empty_rest_of_line ();
3804 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3806 as_bad (_("register must be either sp or set by a previous"
3807 "unwind_movsp directive"));
3811 /* Don't generate any opcodes, just record the information for later. */
3812 unwind
.fp_reg
= fp_reg
;
3815 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3817 unwind
.fp_offset
-= offset
;
3820 /* Parse an unwind_raw directive. */
3823 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3826 /* This is an arbitrary limit. */
3827 unsigned char op
[16];
3831 if (exp
.X_op
== O_constant
3832 && skip_past_comma (&input_line_pointer
) != FAIL
)
3834 unwind
.frame_size
+= exp
.X_add_number
;
3838 exp
.X_op
= O_illegal
;
3840 if (exp
.X_op
!= O_constant
)
3842 as_bad (_("expected <offset>, <opcode>"));
3843 ignore_rest_of_line ();
3849 /* Parse the opcode. */
3854 as_bad (_("unwind opcode too long"));
3855 ignore_rest_of_line ();
3857 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3859 as_bad (_("invalid unwind opcode"));
3860 ignore_rest_of_line ();
3863 op
[count
++] = exp
.X_add_number
;
3865 /* Parse the next byte. */
3866 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3872 /* Add the opcode bytes in reverse order. */
3874 add_unwind_opcode (op
[count
], 1);
3876 demand_empty_rest_of_line ();
3880 /* Parse a .eabi_attribute directive. */
3883 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3885 s_vendor_attribute (OBJ_ATTR_PROC
);
3887 #endif /* OBJ_ELF */
3889 static void s_arm_arch (int);
3890 static void s_arm_object_arch (int);
3891 static void s_arm_cpu (int);
3892 static void s_arm_fpu (int);
3897 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
3904 if (exp
.X_op
== O_symbol
)
3905 exp
.X_op
= O_secrel
;
3907 emit_expr (&exp
, 4);
3909 while (*input_line_pointer
++ == ',');
3911 input_line_pointer
--;
3912 demand_empty_rest_of_line ();
3916 /* This table describes all the machine specific pseudo-ops the assembler
3917 has to support. The fields are:
3918 pseudo-op name without dot
3919 function to call to execute this pseudo-op
3920 Integer arg to pass to the function. */
3922 const pseudo_typeS md_pseudo_table
[] =
3924 /* Never called because '.req' does not start a line. */
3925 { "req", s_req
, 0 },
3926 /* Following two are likewise never called. */
3929 { "unreq", s_unreq
, 0 },
3930 { "bss", s_bss
, 0 },
3931 { "align", s_align
, 0 },
3932 { "arm", s_arm
, 0 },
3933 { "thumb", s_thumb
, 0 },
3934 { "code", s_code
, 0 },
3935 { "force_thumb", s_force_thumb
, 0 },
3936 { "thumb_func", s_thumb_func
, 0 },
3937 { "thumb_set", s_thumb_set
, 0 },
3938 { "even", s_even
, 0 },
3939 { "ltorg", s_ltorg
, 0 },
3940 { "pool", s_ltorg
, 0 },
3941 { "syntax", s_syntax
, 0 },
3942 { "cpu", s_arm_cpu
, 0 },
3943 { "arch", s_arm_arch
, 0 },
3944 { "object_arch", s_arm_object_arch
, 0 },
3945 { "fpu", s_arm_fpu
, 0 },
3947 { "word", s_arm_elf_cons
, 4 },
3948 { "long", s_arm_elf_cons
, 4 },
3949 { "rel31", s_arm_rel31
, 0 },
3950 { "fnstart", s_arm_unwind_fnstart
, 0 },
3951 { "fnend", s_arm_unwind_fnend
, 0 },
3952 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3953 { "personality", s_arm_unwind_personality
, 0 },
3954 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3955 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3956 { "save", s_arm_unwind_save
, 0 },
3957 { "vsave", s_arm_unwind_save
, 1 },
3958 { "movsp", s_arm_unwind_movsp
, 0 },
3959 { "pad", s_arm_unwind_pad
, 0 },
3960 { "setfp", s_arm_unwind_setfp
, 0 },
3961 { "unwind_raw", s_arm_unwind_raw
, 0 },
3962 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3966 /* These are used for dwarf. */
3970 /* These are used for dwarf2. */
3971 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
3972 { "loc", dwarf2_directive_loc
, 0 },
3973 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
3975 { "extend", float_cons
, 'x' },
3976 { "ldouble", float_cons
, 'x' },
3977 { "packed", float_cons
, 'p' },
3979 {"secrel32", pe_directive_secrel
, 0},
3984 /* Parser functions used exclusively in instruction operands. */
3986 /* Generic immediate-value read function for use in insn parsing.
3987 STR points to the beginning of the immediate (the leading #);
3988 VAL receives the value; if the value is outside [MIN, MAX]
3989 issue an error. PREFIX_OPT is true if the immediate prefix is
3993 parse_immediate (char **str
, int *val
, int min
, int max
,
3994 bfd_boolean prefix_opt
)
3997 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3998 if (exp
.X_op
!= O_constant
)
4000 inst
.error
= _("constant expression required");
4004 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4006 inst
.error
= _("immediate value out of range");
4010 *val
= exp
.X_add_number
;
4014 /* Less-generic immediate-value read function with the possibility of loading a
4015 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4016 instructions. Puts the result directly in inst.operands[i]. */
4019 parse_big_immediate (char **str
, int i
)
4024 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4026 if (exp
.X_op
== O_constant
)
4028 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4029 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4030 O_constant. We have to be careful not to break compilation for
4031 32-bit X_add_number, though. */
4032 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4034 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4035 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4036 inst
.operands
[i
].regisimm
= 1;
4039 else if (exp
.X_op
== O_big
4040 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4041 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4043 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4044 /* Bignums have their least significant bits in
4045 generic_bignum[0]. Make sure we put 32 bits in imm and
4046 32 bits in reg, in a (hopefully) portable way. */
4047 assert (parts
!= 0);
4048 inst
.operands
[i
].imm
= 0;
4049 for (j
= 0; j
< parts
; j
++, idx
++)
4050 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4051 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4052 inst
.operands
[i
].reg
= 0;
4053 for (j
= 0; j
< parts
; j
++, idx
++)
4054 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4055 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4056 inst
.operands
[i
].regisimm
= 1;
4066 /* Returns the pseudo-register number of an FPA immediate constant,
4067 or FAIL if there isn't a valid constant here. */
4070 parse_fpa_immediate (char ** str
)
4072 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4078 /* First try and match exact strings, this is to guarantee
4079 that some formats will work even for cross assembly. */
4081 for (i
= 0; fp_const
[i
]; i
++)
4083 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4087 *str
+= strlen (fp_const
[i
]);
4088 if (is_end_of_line
[(unsigned char) **str
])
4094 /* Just because we didn't get a match doesn't mean that the constant
4095 isn't valid, just that it is in a format that we don't
4096 automatically recognize. Try parsing it with the standard
4097 expression routines. */
4099 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4101 /* Look for a raw floating point number. */
4102 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4103 && is_end_of_line
[(unsigned char) *save_in
])
4105 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4107 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4109 if (words
[j
] != fp_values
[i
][j
])
4113 if (j
== MAX_LITTLENUMS
)
4121 /* Try and parse a more complex expression, this will probably fail
4122 unless the code uses a floating point prefix (eg "0f"). */
4123 save_in
= input_line_pointer
;
4124 input_line_pointer
= *str
;
4125 if (expression (&exp
) == absolute_section
4126 && exp
.X_op
== O_big
4127 && exp
.X_add_number
< 0)
4129 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4131 if (gen_to_words (words
, 5, (long) 15) == 0)
4133 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4135 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4137 if (words
[j
] != fp_values
[i
][j
])
4141 if (j
== MAX_LITTLENUMS
)
4143 *str
= input_line_pointer
;
4144 input_line_pointer
= save_in
;
4151 *str
= input_line_pointer
;
4152 input_line_pointer
= save_in
;
4153 inst
.error
= _("invalid FPA immediate expression");
4157 /* Returns 1 if a number has "quarter-precision" float format
4158 0baBbbbbbc defgh000 00000000 00000000. */
4161 is_quarter_float (unsigned imm
)
4163 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4164 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4167 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4168 0baBbbbbbc defgh000 00000000 00000000.
4169 The zero and minus-zero cases need special handling, since they can't be
4170 encoded in the "quarter-precision" float format, but can nonetheless be
4171 loaded as integer constants. */
4174 parse_qfloat_immediate (char **ccp
, int *immed
)
4178 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4179 int found_fpchar
= 0;
4181 skip_past_char (&str
, '#');
4183 /* We must not accidentally parse an integer as a floating-point number. Make
4184 sure that the value we parse is not an integer by checking for special
4185 characters '.' or 'e'.
4186 FIXME: This is a horrible hack, but doing better is tricky because type
4187 information isn't in a very usable state at parse time. */
4189 skip_whitespace (fpnum
);
4191 if (strncmp (fpnum
, "0x", 2) == 0)
4195 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4196 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4206 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4208 unsigned fpword
= 0;
4211 /* Our FP word must be 32 bits (single-precision FP). */
4212 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4214 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4218 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4231 /* Shift operands. */
4234 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4237 struct asm_shift_name
4240 enum shift_kind kind
;
4243 /* Third argument to parse_shift. */
4244 enum parse_shift_mode
4246 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4247 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4248 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4249 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4250 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4253 /* Parse a <shift> specifier on an ARM data processing instruction.
4254 This has three forms:
4256 (LSL|LSR|ASL|ASR|ROR) Rs
4257 (LSL|LSR|ASL|ASR|ROR) #imm
4260 Note that ASL is assimilated to LSL in the instruction encoding, and
4261 RRX to ROR #0 (which cannot be written as such). */
4264 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4266 const struct asm_shift_name
*shift_name
;
4267 enum shift_kind shift
;
4272 for (p
= *str
; ISALPHA (*p
); p
++)
4277 inst
.error
= _("shift expression expected");
4281 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4283 if (shift_name
== NULL
)
4285 inst
.error
= _("shift expression expected");
4289 shift
= shift_name
->kind
;
4293 case NO_SHIFT_RESTRICT
:
4294 case SHIFT_IMMEDIATE
: break;
4296 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4297 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4299 inst
.error
= _("'LSL' or 'ASR' required");
4304 case SHIFT_LSL_IMMEDIATE
:
4305 if (shift
!= SHIFT_LSL
)
4307 inst
.error
= _("'LSL' required");
4312 case SHIFT_ASR_IMMEDIATE
:
4313 if (shift
!= SHIFT_ASR
)
4315 inst
.error
= _("'ASR' required");
4323 if (shift
!= SHIFT_RRX
)
4325 /* Whitespace can appear here if the next thing is a bare digit. */
4326 skip_whitespace (p
);
4328 if (mode
== NO_SHIFT_RESTRICT
4329 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4331 inst
.operands
[i
].imm
= reg
;
4332 inst
.operands
[i
].immisreg
= 1;
4334 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4337 inst
.operands
[i
].shift_kind
= shift
;
4338 inst
.operands
[i
].shifted
= 1;
4343 /* Parse a <shifter_operand> for an ARM data processing instruction:
4346 #<immediate>, <rotate>
4350 where <shift> is defined by parse_shift above, and <rotate> is a
4351 multiple of 2 between 0 and 30. Validation of immediate operands
4352 is deferred to md_apply_fix. */
4355 parse_shifter_operand (char **str
, int i
)
4360 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4362 inst
.operands
[i
].reg
= value
;
4363 inst
.operands
[i
].isreg
= 1;
4365 /* parse_shift will override this if appropriate */
4366 inst
.reloc
.exp
.X_op
= O_constant
;
4367 inst
.reloc
.exp
.X_add_number
= 0;
4369 if (skip_past_comma (str
) == FAIL
)
4372 /* Shift operation on register. */
4373 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4376 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4379 if (skip_past_comma (str
) == SUCCESS
)
4381 /* #x, y -- ie explicit rotation by Y. */
4382 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4385 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4387 inst
.error
= _("constant expression expected");
4391 value
= expr
.X_add_number
;
4392 if (value
< 0 || value
> 30 || value
% 2 != 0)
4394 inst
.error
= _("invalid rotation");
4397 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4399 inst
.error
= _("invalid constant");
4403 /* Convert to decoded value. md_apply_fix will put it back. */
4404 inst
.reloc
.exp
.X_add_number
4405 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4406 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4409 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4410 inst
.reloc
.pc_rel
= 0;
4414 /* Group relocation information. Each entry in the table contains the
4415 textual name of the relocation as may appear in assembler source
4416 and must end with a colon.
4417 Along with this textual name are the relocation codes to be used if
4418 the corresponding instruction is an ALU instruction (ADD or SUB only),
4419 an LDR, an LDRS, or an LDC. */
4421 struct group_reloc_table_entry
4432 /* Varieties of non-ALU group relocation. */
4439 static struct group_reloc_table_entry group_reloc_table
[] =
4440 { /* Program counter relative: */
4442 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4447 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4448 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4449 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4450 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4452 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4457 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4458 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4459 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4460 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4462 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4463 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4464 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4465 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4466 /* Section base relative */
4468 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4473 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4474 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4475 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4476 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4478 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4483 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4484 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4485 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4486 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4488 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4489 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4490 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4491 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4493 /* Given the address of a pointer pointing to the textual name of a group
4494 relocation as may appear in assembler source, attempt to find its details
4495 in group_reloc_table. The pointer will be updated to the character after
4496 the trailing colon. On failure, FAIL will be returned; SUCCESS
4497 otherwise. On success, *entry will be updated to point at the relevant
4498 group_reloc_table entry. */
4501 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4504 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4506 int length
= strlen (group_reloc_table
[i
].name
);
4508 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4509 && (*str
)[length
] == ':')
4511 *out
= &group_reloc_table
[i
];
4512 *str
+= (length
+ 1);
4520 /* Parse a <shifter_operand> for an ARM data processing instruction
4521 (as for parse_shifter_operand) where group relocations are allowed:
4524 #<immediate>, <rotate>
4525 #:<group_reloc>:<expression>
4529 where <group_reloc> is one of the strings defined in group_reloc_table.
4530 The hashes are optional.
4532 Everything else is as for parse_shifter_operand. */
4534 static parse_operand_result
4535 parse_shifter_operand_group_reloc (char **str
, int i
)
4537 /* Determine if we have the sequence of characters #: or just :
4538 coming next. If we do, then we check for a group relocation.
4539 If we don't, punt the whole lot to parse_shifter_operand. */
4541 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4542 || (*str
)[0] == ':')
4544 struct group_reloc_table_entry
*entry
;
4546 if ((*str
)[0] == '#')
4551 /* Try to parse a group relocation. Anything else is an error. */
4552 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4554 inst
.error
= _("unknown group relocation");
4555 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4558 /* We now have the group relocation table entry corresponding to
4559 the name in the assembler source. Next, we parse the expression. */
4560 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4561 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4563 /* Record the relocation type (always the ALU variant here). */
4564 inst
.reloc
.type
= entry
->alu_code
;
4565 assert (inst
.reloc
.type
!= 0);
4567 return PARSE_OPERAND_SUCCESS
;
4570 return parse_shifter_operand (str
, i
) == SUCCESS
4571 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4573 /* Never reached. */
4576 /* Parse all forms of an ARM address expression. Information is written
4577 to inst.operands[i] and/or inst.reloc.
4579 Preindexed addressing (.preind=1):
4581 [Rn, #offset] .reg=Rn .reloc.exp=offset
4582 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4583 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4584 .shift_kind=shift .reloc.exp=shift_imm
4586 These three may have a trailing ! which causes .writeback to be set also.
4588 Postindexed addressing (.postind=1, .writeback=1):
4590 [Rn], #offset .reg=Rn .reloc.exp=offset
4591 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4592 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4593 .shift_kind=shift .reloc.exp=shift_imm
4595 Unindexed addressing (.preind=0, .postind=0):
4597 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4601 [Rn]{!} shorthand for [Rn,#0]{!}
4602 =immediate .isreg=0 .reloc.exp=immediate
4603 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4605 It is the caller's responsibility to check for addressing modes not
4606 supported by the instruction, and to set inst.reloc.type. */
4608 static parse_operand_result
4609 parse_address_main (char **str
, int i
, int group_relocations
,
4610 group_reloc_type group_type
)
4615 if (skip_past_char (&p
, '[') == FAIL
)
4617 if (skip_past_char (&p
, '=') == FAIL
)
4619 /* bare address - translate to PC-relative offset */
4620 inst
.reloc
.pc_rel
= 1;
4621 inst
.operands
[i
].reg
= REG_PC
;
4622 inst
.operands
[i
].isreg
= 1;
4623 inst
.operands
[i
].preind
= 1;
4625 /* else a load-constant pseudo op, no special treatment needed here */
4627 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4628 return PARSE_OPERAND_FAIL
;
4631 return PARSE_OPERAND_SUCCESS
;
4634 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4636 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4637 return PARSE_OPERAND_FAIL
;
4639 inst
.operands
[i
].reg
= reg
;
4640 inst
.operands
[i
].isreg
= 1;
4642 if (skip_past_comma (&p
) == SUCCESS
)
4644 inst
.operands
[i
].preind
= 1;
4647 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4649 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4651 inst
.operands
[i
].imm
= reg
;
4652 inst
.operands
[i
].immisreg
= 1;
4654 if (skip_past_comma (&p
) == SUCCESS
)
4655 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4656 return PARSE_OPERAND_FAIL
;
4658 else if (skip_past_char (&p
, ':') == SUCCESS
)
4660 /* FIXME: '@' should be used here, but it's filtered out by generic
4661 code before we get to see it here. This may be subject to
4664 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4665 if (exp
.X_op
!= O_constant
)
4667 inst
.error
= _("alignment must be constant");
4668 return PARSE_OPERAND_FAIL
;
4670 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4671 inst
.operands
[i
].immisalign
= 1;
4672 /* Alignments are not pre-indexes. */
4673 inst
.operands
[i
].preind
= 0;
4677 if (inst
.operands
[i
].negative
)
4679 inst
.operands
[i
].negative
= 0;
4683 if (group_relocations
4684 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
4686 struct group_reloc_table_entry
*entry
;
4688 /* Skip over the #: or : sequence. */
4694 /* Try to parse a group relocation. Anything else is an
4696 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
4698 inst
.error
= _("unknown group relocation");
4699 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4702 /* We now have the group relocation table entry corresponding to
4703 the name in the assembler source. Next, we parse the
4705 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4706 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4708 /* Record the relocation type. */
4712 inst
.reloc
.type
= entry
->ldr_code
;
4716 inst
.reloc
.type
= entry
->ldrs_code
;
4720 inst
.reloc
.type
= entry
->ldc_code
;
4727 if (inst
.reloc
.type
== 0)
4729 inst
.error
= _("this group relocation is not allowed on this instruction");
4730 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4734 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4735 return PARSE_OPERAND_FAIL
;
4739 if (skip_past_char (&p
, ']') == FAIL
)
4741 inst
.error
= _("']' expected");
4742 return PARSE_OPERAND_FAIL
;
4745 if (skip_past_char (&p
, '!') == SUCCESS
)
4746 inst
.operands
[i
].writeback
= 1;
4748 else if (skip_past_comma (&p
) == SUCCESS
)
4750 if (skip_past_char (&p
, '{') == SUCCESS
)
4752 /* [Rn], {expr} - unindexed, with option */
4753 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4754 0, 255, TRUE
) == FAIL
)
4755 return PARSE_OPERAND_FAIL
;
4757 if (skip_past_char (&p
, '}') == FAIL
)
4759 inst
.error
= _("'}' expected at end of 'option' field");
4760 return PARSE_OPERAND_FAIL
;
4762 if (inst
.operands
[i
].preind
)
4764 inst
.error
= _("cannot combine index with option");
4765 return PARSE_OPERAND_FAIL
;
4768 return PARSE_OPERAND_SUCCESS
;
4772 inst
.operands
[i
].postind
= 1;
4773 inst
.operands
[i
].writeback
= 1;
4775 if (inst
.operands
[i
].preind
)
4777 inst
.error
= _("cannot combine pre- and post-indexing");
4778 return PARSE_OPERAND_FAIL
;
4782 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4784 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4786 /* We might be using the immediate for alignment already. If we
4787 are, OR the register number into the low-order bits. */
4788 if (inst
.operands
[i
].immisalign
)
4789 inst
.operands
[i
].imm
|= reg
;
4791 inst
.operands
[i
].imm
= reg
;
4792 inst
.operands
[i
].immisreg
= 1;
4794 if (skip_past_comma (&p
) == SUCCESS
)
4795 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4796 return PARSE_OPERAND_FAIL
;
4800 if (inst
.operands
[i
].negative
)
4802 inst
.operands
[i
].negative
= 0;
4805 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4806 return PARSE_OPERAND_FAIL
;
4811 /* If at this point neither .preind nor .postind is set, we have a
4812 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4813 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4815 inst
.operands
[i
].preind
= 1;
4816 inst
.reloc
.exp
.X_op
= O_constant
;
4817 inst
.reloc
.exp
.X_add_number
= 0;
4820 return PARSE_OPERAND_SUCCESS
;
4824 parse_address (char **str
, int i
)
4826 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
4830 static parse_operand_result
4831 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
4833 return parse_address_main (str
, i
, 1, type
);
4836 /* Parse an operand for a MOVW or MOVT instruction. */
4838 parse_half (char **str
)
4843 skip_past_char (&p
, '#');
4844 if (strncasecmp (p
, ":lower16:", 9) == 0)
4845 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4846 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4847 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4849 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4852 skip_whitespace (p
);
4855 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4858 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4860 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4862 inst
.error
= _("constant expression expected");
4865 if (inst
.reloc
.exp
.X_add_number
< 0
4866 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4868 inst
.error
= _("immediate value out of range");
4876 /* Miscellaneous. */
4878 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4879 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4881 parse_psr (char **str
)
4884 unsigned long psr_field
;
4885 const struct asm_psr
*psr
;
4888 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4889 feature for ease of use and backwards compatibility. */
4891 if (strncasecmp (p
, "SPSR", 4) == 0)
4892 psr_field
= SPSR_BIT
;
4893 else if (strncasecmp (p
, "CPSR", 4) == 0)
4900 while (ISALNUM (*p
) || *p
== '_');
4902 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4913 /* A suffix follows. */
4919 while (ISALNUM (*p
) || *p
== '_');
4921 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4925 psr_field
|= psr
->field
;
4930 goto error
; /* Garbage after "[CS]PSR". */
4932 psr_field
|= (PSR_c
| PSR_f
);
4938 inst
.error
= _("flag for {c}psr instruction expected");
4942 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4943 value suitable for splatting into the AIF field of the instruction. */
4946 parse_cps_flags (char **str
)
4955 case '\0': case ',':
4958 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4959 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4960 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4963 inst
.error
= _("unrecognized CPS flag");
4968 if (saw_a_flag
== 0)
4970 inst
.error
= _("missing CPS flags");
4978 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4979 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4982 parse_endian_specifier (char **str
)
4987 if (strncasecmp (s
, "BE", 2))
4989 else if (strncasecmp (s
, "LE", 2))
4993 inst
.error
= _("valid endian specifiers are be or le");
4997 if (ISALNUM (s
[2]) || s
[2] == '_')
4999 inst
.error
= _("valid endian specifiers are be or le");
5004 return little_endian
;
5007 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5008 value suitable for poking into the rotate field of an sxt or sxta
5009 instruction, or FAIL on error. */
5012 parse_ror (char **str
)
5017 if (strncasecmp (s
, "ROR", 3) == 0)
5021 inst
.error
= _("missing rotation field after comma");
5025 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5030 case 0: *str
= s
; return 0x0;
5031 case 8: *str
= s
; return 0x1;
5032 case 16: *str
= s
; return 0x2;
5033 case 24: *str
= s
; return 0x3;
5036 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5041 /* Parse a conditional code (from conds[] below). The value returned is in the
5042 range 0 .. 14, or FAIL. */
5044 parse_cond (char **str
)
5047 const struct asm_cond
*c
;
5049 /* Condition codes are always 2 characters, so matching up to
5050 3 characters is sufficient. */
5055 while (ISALPHA (*q
) && n
< 3)
5057 cond
[n
] = TOLOWER(*q
);
5062 c
= hash_find_n (arm_cond_hsh
, cond
, n
);
5065 inst
.error
= _("condition required");
5073 /* Parse an option for a barrier instruction. Returns the encoding for the
5076 parse_barrier (char **str
)
5079 const struct asm_barrier_opt
*o
;
5082 while (ISALPHA (*q
))
5085 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5093 /* Parse the operands of a table branch instruction. Similar to a memory
5096 parse_tb (char **str
)
5101 if (skip_past_char (&p
, '[') == FAIL
)
5103 inst
.error
= _("'[' expected");
5107 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5109 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5112 inst
.operands
[0].reg
= reg
;
5114 if (skip_past_comma (&p
) == FAIL
)
5116 inst
.error
= _("',' expected");
5120 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5122 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5125 inst
.operands
[0].imm
= reg
;
5127 if (skip_past_comma (&p
) == SUCCESS
)
5129 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5131 if (inst
.reloc
.exp
.X_add_number
!= 1)
5133 inst
.error
= _("invalid shift");
5136 inst
.operands
[0].shifted
= 1;
5139 if (skip_past_char (&p
, ']') == FAIL
)
5141 inst
.error
= _("']' expected");
5148 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5149 information on the types the operands can take and how they are encoded.
5150 Up to four operands may be read; this function handles setting the
5151 ".present" field for each read operand itself.
5152 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5153 else returns FAIL. */
5156 parse_neon_mov (char **str
, int *which_operand
)
5158 int i
= *which_operand
, val
;
5159 enum arm_reg_type rtype
;
5161 struct neon_type_el optype
;
5163 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5165 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5166 inst
.operands
[i
].reg
= val
;
5167 inst
.operands
[i
].isscalar
= 1;
5168 inst
.operands
[i
].vectype
= optype
;
5169 inst
.operands
[i
++].present
= 1;
5171 if (skip_past_comma (&ptr
) == FAIL
)
5174 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5177 inst
.operands
[i
].reg
= val
;
5178 inst
.operands
[i
].isreg
= 1;
5179 inst
.operands
[i
].present
= 1;
5181 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5184 /* Cases 0, 1, 2, 3, 5 (D only). */
5185 if (skip_past_comma (&ptr
) == FAIL
)
5188 inst
.operands
[i
].reg
= val
;
5189 inst
.operands
[i
].isreg
= 1;
5190 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5191 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5192 inst
.operands
[i
].isvec
= 1;
5193 inst
.operands
[i
].vectype
= optype
;
5194 inst
.operands
[i
++].present
= 1;
5196 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5198 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5199 Case 13: VMOV <Sd>, <Rm> */
5200 inst
.operands
[i
].reg
= val
;
5201 inst
.operands
[i
].isreg
= 1;
5202 inst
.operands
[i
].present
= 1;
5204 if (rtype
== REG_TYPE_NQ
)
5206 first_error (_("can't use Neon quad register here"));
5209 else if (rtype
!= REG_TYPE_VFS
)
5212 if (skip_past_comma (&ptr
) == FAIL
)
5214 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5216 inst
.operands
[i
].reg
= val
;
5217 inst
.operands
[i
].isreg
= 1;
5218 inst
.operands
[i
].present
= 1;
5221 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5224 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5225 Case 1: VMOV<c><q> <Dd>, <Dm>
5226 Case 8: VMOV.F32 <Sd>, <Sm>
5227 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5229 inst
.operands
[i
].reg
= val
;
5230 inst
.operands
[i
].isreg
= 1;
5231 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5232 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5233 inst
.operands
[i
].isvec
= 1;
5234 inst
.operands
[i
].vectype
= optype
;
5235 inst
.operands
[i
].present
= 1;
5237 if (skip_past_comma (&ptr
) == SUCCESS
)
5242 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5245 inst
.operands
[i
].reg
= val
;
5246 inst
.operands
[i
].isreg
= 1;
5247 inst
.operands
[i
++].present
= 1;
5249 if (skip_past_comma (&ptr
) == FAIL
)
5252 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5255 inst
.operands
[i
].reg
= val
;
5256 inst
.operands
[i
].isreg
= 1;
5257 inst
.operands
[i
++].present
= 1;
5260 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5261 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5262 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5263 Case 10: VMOV.F32 <Sd>, #<imm>
5264 Case 11: VMOV.F64 <Dd>, #<imm> */
5265 inst
.operands
[i
].immisfloat
= 1;
5266 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5267 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5268 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5272 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5276 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5279 inst
.operands
[i
].reg
= val
;
5280 inst
.operands
[i
].isreg
= 1;
5281 inst
.operands
[i
++].present
= 1;
5283 if (skip_past_comma (&ptr
) == FAIL
)
5286 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5288 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5289 inst
.operands
[i
].reg
= val
;
5290 inst
.operands
[i
].isscalar
= 1;
5291 inst
.operands
[i
].present
= 1;
5292 inst
.operands
[i
].vectype
= optype
;
5294 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5296 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5297 inst
.operands
[i
].reg
= val
;
5298 inst
.operands
[i
].isreg
= 1;
5299 inst
.operands
[i
++].present
= 1;
5301 if (skip_past_comma (&ptr
) == FAIL
)
5304 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5307 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5311 inst
.operands
[i
].reg
= val
;
5312 inst
.operands
[i
].isreg
= 1;
5313 inst
.operands
[i
].isvec
= 1;
5314 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5315 inst
.operands
[i
].vectype
= optype
;
5316 inst
.operands
[i
].present
= 1;
5318 if (rtype
== REG_TYPE_VFS
)
5322 if (skip_past_comma (&ptr
) == FAIL
)
5324 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5327 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5330 inst
.operands
[i
].reg
= val
;
5331 inst
.operands
[i
].isreg
= 1;
5332 inst
.operands
[i
].isvec
= 1;
5333 inst
.operands
[i
].issingle
= 1;
5334 inst
.operands
[i
].vectype
= optype
;
5335 inst
.operands
[i
].present
= 1;
5338 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5342 inst
.operands
[i
].reg
= val
;
5343 inst
.operands
[i
].isreg
= 1;
5344 inst
.operands
[i
].isvec
= 1;
5345 inst
.operands
[i
].issingle
= 1;
5346 inst
.operands
[i
].vectype
= optype
;
5347 inst
.operands
[i
++].present
= 1;
5352 first_error (_("parse error"));
5356 /* Successfully parsed the operands. Update args. */
5362 first_error (_("expected comma"));
5366 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5370 /* Matcher codes for parse_operands. */
5371 enum operand_parse_code
5373 OP_stop
, /* end of line */
5375 OP_RR
, /* ARM register */
5376 OP_RRnpc
, /* ARM register, not r15 */
5377 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5378 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5379 OP_RCP
, /* Coprocessor number */
5380 OP_RCN
, /* Coprocessor register */
5381 OP_RF
, /* FPA register */
5382 OP_RVS
, /* VFP single precision register */
5383 OP_RVD
, /* VFP double precision register (0..15) */
5384 OP_RND
, /* Neon double precision register (0..31) */
5385 OP_RNQ
, /* Neon quad precision register */
5386 OP_RVSD
, /* VFP single or double precision register */
5387 OP_RNDQ
, /* Neon double or quad precision register */
5388 OP_RNSDQ
, /* Neon single, double or quad precision register */
5389 OP_RNSC
, /* Neon scalar D[X] */
5390 OP_RVC
, /* VFP control register */
5391 OP_RMF
, /* Maverick F register */
5392 OP_RMD
, /* Maverick D register */
5393 OP_RMFX
, /* Maverick FX register */
5394 OP_RMDX
, /* Maverick DX register */
5395 OP_RMAX
, /* Maverick AX register */
5396 OP_RMDS
, /* Maverick DSPSC register */
5397 OP_RIWR
, /* iWMMXt wR register */
5398 OP_RIWC
, /* iWMMXt wC register */
5399 OP_RIWG
, /* iWMMXt wCG register */
5400 OP_RXA
, /* XScale accumulator register */
5402 OP_REGLST
, /* ARM register list */
5403 OP_VRSLST
, /* VFP single-precision register list */
5404 OP_VRDLST
, /* VFP double-precision register list */
5405 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5406 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5407 OP_NSTRLST
, /* Neon element/structure list */
5409 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5410 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5411 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5412 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5413 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5414 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5415 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5416 OP_VMOV
, /* Neon VMOV operands. */
5417 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5418 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5419 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5421 OP_I0
, /* immediate zero */
5422 OP_I7
, /* immediate value 0 .. 7 */
5423 OP_I15
, /* 0 .. 15 */
5424 OP_I16
, /* 1 .. 16 */
5425 OP_I16z
, /* 0 .. 16 */
5426 OP_I31
, /* 0 .. 31 */
5427 OP_I31w
, /* 0 .. 31, optional trailing ! */
5428 OP_I32
, /* 1 .. 32 */
5429 OP_I32z
, /* 0 .. 32 */
5430 OP_I63
, /* 0 .. 63 */
5431 OP_I63s
, /* -64 .. 63 */
5432 OP_I64
, /* 1 .. 64 */
5433 OP_I64z
, /* 0 .. 64 */
5434 OP_I255
, /* 0 .. 255 */
5436 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5437 OP_I7b
, /* 0 .. 7 */
5438 OP_I15b
, /* 0 .. 15 */
5439 OP_I31b
, /* 0 .. 31 */
5441 OP_SH
, /* shifter operand */
5442 OP_SHG
, /* shifter operand with possible group relocation */
5443 OP_ADDR
, /* Memory address expression (any mode) */
5444 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5445 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5446 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5447 OP_EXP
, /* arbitrary expression */
5448 OP_EXPi
, /* same, with optional immediate prefix */
5449 OP_EXPr
, /* same, with optional relocation suffix */
5450 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5452 OP_CPSF
, /* CPS flags */
5453 OP_ENDI
, /* Endianness specifier */
5454 OP_PSR
, /* CPSR/SPSR mask for msr */
5455 OP_COND
, /* conditional code */
5456 OP_TB
, /* Table branch. */
5458 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5459 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5461 OP_RRnpc_I0
, /* ARM register or literal 0 */
5462 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5463 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5464 OP_RF_IF
, /* FPA register or immediate */
5465 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5466 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5468 /* Optional operands. */
5469 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5470 OP_oI31b
, /* 0 .. 31 */
5471 OP_oI32b
, /* 1 .. 32 */
5472 OP_oIffffb
, /* 0 .. 65535 */
5473 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5475 OP_oRR
, /* ARM register */
5476 OP_oRRnpc
, /* ARM register, not the PC */
5477 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5478 OP_oRND
, /* Optional Neon double precision register */
5479 OP_oRNQ
, /* Optional Neon quad precision register */
5480 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5481 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5482 OP_oSHll
, /* LSL immediate */
5483 OP_oSHar
, /* ASR immediate */
5484 OP_oSHllar
, /* LSL or ASR immediate */
5485 OP_oROR
, /* ROR 0/8/16/24 */
5486 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5488 OP_FIRST_OPTIONAL
= OP_oI7b
5491 /* Generic instruction operand parser. This does no encoding and no
5492 semantic validation; it merely squirrels values away in the inst
5493 structure. Returns SUCCESS or FAIL depending on whether the
5494 specified grammar matched. */
5496 parse_operands (char *str
, const unsigned char *pattern
)
5498 unsigned const char *upat
= pattern
;
5499 char *backtrack_pos
= 0;
5500 const char *backtrack_error
= 0;
5501 int i
, val
, backtrack_index
= 0;
5502 enum arm_reg_type rtype
;
5503 parse_operand_result result
;
5505 #define po_char_or_fail(chr) do { \
5506 if (skip_past_char (&str, chr) == FAIL) \
5510 #define po_reg_or_fail(regtype) do { \
5511 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5512 &inst.operands[i].vectype); \
5515 first_error (_(reg_expected_msgs[regtype])); \
5518 inst.operands[i].reg = val; \
5519 inst.operands[i].isreg = 1; \
5520 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5521 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5522 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5523 || rtype == REG_TYPE_VFD \
5524 || rtype == REG_TYPE_NQ); \
5527 #define po_reg_or_goto(regtype, label) do { \
5528 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5529 &inst.operands[i].vectype); \
5533 inst.operands[i].reg = val; \
5534 inst.operands[i].isreg = 1; \
5535 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5536 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5537 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5538 || rtype == REG_TYPE_VFD \
5539 || rtype == REG_TYPE_NQ); \
5542 #define po_imm_or_fail(min, max, popt) do { \
5543 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5545 inst.operands[i].imm = val; \
5548 #define po_scalar_or_goto(elsz, label) do { \
5549 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5552 inst.operands[i].reg = val; \
5553 inst.operands[i].isscalar = 1; \
5556 #define po_misc_or_fail(expr) do { \
5561 #define po_misc_or_fail_no_backtrack(expr) do { \
5563 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5564 backtrack_pos = 0; \
5565 if (result != PARSE_OPERAND_SUCCESS) \
5569 skip_whitespace (str
);
5571 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5573 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5575 /* Remember where we are in case we need to backtrack. */
5576 assert (!backtrack_pos
);
5577 backtrack_pos
= str
;
5578 backtrack_error
= inst
.error
;
5579 backtrack_index
= i
;
5582 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5583 po_char_or_fail (',');
5591 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5592 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5593 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5594 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5595 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5596 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5598 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5600 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
5602 /* Also accept generic coprocessor regs for unknown registers. */
5604 po_reg_or_fail (REG_TYPE_CN
);
5606 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5607 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5608 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5609 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5610 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5611 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5612 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5613 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5614 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5615 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5617 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5619 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5620 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5622 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5624 /* Neon scalar. Using an element size of 8 means that some invalid
5625 scalars are accepted here, so deal with those in later code. */
5626 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5628 /* WARNING: We can expand to two operands here. This has the potential
5629 to totally confuse the backtracking mechanism! It will be OK at
5630 least as long as we don't try to use optional args as well,
5634 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5635 inst
.operands
[i
].present
= 1;
5637 skip_past_comma (&str
);
5638 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5641 /* Optional register operand was omitted. Unfortunately, it's in
5642 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5643 here (this is a bit grotty). */
5644 inst
.operands
[i
] = inst
.operands
[i
-1];
5645 inst
.operands
[i
-1].present
= 0;
5648 /* There's a possibility of getting a 64-bit immediate here, so
5649 we need special handling. */
5650 if (parse_big_immediate (&str
, i
) == FAIL
)
5652 inst
.error
= _("immediate value is out of range");
5660 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5663 po_imm_or_fail (0, 0, TRUE
);
5668 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5673 po_scalar_or_goto (8, try_rr
);
5676 po_reg_or_fail (REG_TYPE_RN
);
5682 po_scalar_or_goto (8, try_nsdq
);
5685 po_reg_or_fail (REG_TYPE_NSDQ
);
5691 po_scalar_or_goto (8, try_ndq
);
5694 po_reg_or_fail (REG_TYPE_NDQ
);
5700 po_scalar_or_goto (8, try_vfd
);
5703 po_reg_or_fail (REG_TYPE_VFD
);
5708 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5709 not careful then bad things might happen. */
5710 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5715 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5718 /* There's a possibility of getting a 64-bit immediate here, so
5719 we need special handling. */
5720 if (parse_big_immediate (&str
, i
) == FAIL
)
5722 inst
.error
= _("immediate value is out of range");
5730 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5733 po_imm_or_fail (0, 63, TRUE
);
5738 po_char_or_fail ('[');
5739 po_reg_or_fail (REG_TYPE_RN
);
5740 po_char_or_fail (']');
5745 po_reg_or_fail (REG_TYPE_RN
);
5746 if (skip_past_char (&str
, '!') == SUCCESS
)
5747 inst
.operands
[i
].writeback
= 1;
5751 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5752 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5753 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5754 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5755 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5756 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5757 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5758 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5759 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5760 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5761 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5762 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5764 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5766 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5767 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5769 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5770 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5771 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5773 /* Immediate variants */
5775 po_char_or_fail ('{');
5776 po_imm_or_fail (0, 255, TRUE
);
5777 po_char_or_fail ('}');
5781 /* The expression parser chokes on a trailing !, so we have
5782 to find it first and zap it. */
5785 while (*s
&& *s
!= ',')
5790 inst
.operands
[i
].writeback
= 1;
5792 po_imm_or_fail (0, 31, TRUE
);
5800 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5805 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5810 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5812 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5814 val
= parse_reloc (&str
);
5817 inst
.error
= _("unrecognized relocation suffix");
5820 else if (val
!= BFD_RELOC_UNUSED
)
5822 inst
.operands
[i
].imm
= val
;
5823 inst
.operands
[i
].hasreloc
= 1;
5828 /* Operand for MOVW or MOVT. */
5830 po_misc_or_fail (parse_half (&str
));
5833 /* Register or expression */
5834 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5835 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5837 /* Register or immediate */
5838 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5839 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5841 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5843 if (!is_immediate_prefix (*str
))
5846 val
= parse_fpa_immediate (&str
);
5849 /* FPA immediates are encoded as registers 8-15.
5850 parse_fpa_immediate has already applied the offset. */
5851 inst
.operands
[i
].reg
= val
;
5852 inst
.operands
[i
].isreg
= 1;
5855 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
5856 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
5858 /* Two kinds of register */
5861 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5863 || (rege
->type
!= REG_TYPE_MMXWR
5864 && rege
->type
!= REG_TYPE_MMXWC
5865 && rege
->type
!= REG_TYPE_MMXWCG
))
5867 inst
.error
= _("iWMMXt data or control register expected");
5870 inst
.operands
[i
].reg
= rege
->number
;
5871 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5877 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5879 || (rege
->type
!= REG_TYPE_MMXWC
5880 && rege
->type
!= REG_TYPE_MMXWCG
))
5882 inst
.error
= _("iWMMXt control register expected");
5885 inst
.operands
[i
].reg
= rege
->number
;
5886 inst
.operands
[i
].isreg
= 1;
5891 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5892 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5893 case OP_oROR
: val
= parse_ror (&str
); break;
5894 case OP_PSR
: val
= parse_psr (&str
); break;
5895 case OP_COND
: val
= parse_cond (&str
); break;
5896 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5899 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5900 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5903 val
= parse_psr (&str
);
5907 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5910 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5912 if (strncasecmp (str
, "APSR_", 5) == 0)
5919 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5920 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5921 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5922 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5923 default: found
= 16;
5927 inst
.operands
[i
].isvec
= 1;
5934 po_misc_or_fail (parse_tb (&str
));
5937 /* Register lists */
5939 val
= parse_reg_list (&str
);
5942 inst
.operands
[1].writeback
= 1;
5948 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5952 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5956 /* Allow Q registers too. */
5957 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5962 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5964 inst
.operands
[i
].issingle
= 1;
5969 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5974 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5975 &inst
.operands
[i
].vectype
);
5978 /* Addressing modes */
5980 po_misc_or_fail (parse_address (&str
, i
));
5984 po_misc_or_fail_no_backtrack (
5985 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
5989 po_misc_or_fail_no_backtrack (
5990 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
5994 po_misc_or_fail_no_backtrack (
5995 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
5999 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6003 po_misc_or_fail_no_backtrack (
6004 parse_shifter_operand_group_reloc (&str
, i
));
6008 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6012 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6016 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6020 as_fatal (_("unhandled operand code %d"), upat
[i
]);
6023 /* Various value-based sanity checks and shared operations. We
6024 do not signal immediate failures for the register constraints;
6025 this allows a syntax error to take precedence. */
6034 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6035 inst
.error
= BAD_PC
;
6053 inst
.operands
[i
].imm
= val
;
6060 /* If we get here, this operand was successfully parsed. */
6061 inst
.operands
[i
].present
= 1;
6065 inst
.error
= BAD_ARGS
;
6070 /* The parse routine should already have set inst.error, but set a
6071 default here just in case. */
6073 inst
.error
= _("syntax error");
6077 /* Do not backtrack over a trailing optional argument that
6078 absorbed some text. We will only fail again, with the
6079 'garbage following instruction' error message, which is
6080 probably less helpful than the current one. */
6081 if (backtrack_index
== i
&& backtrack_pos
!= str
6082 && upat
[i
+1] == OP_stop
)
6085 inst
.error
= _("syntax error");
6089 /* Try again, skipping the optional argument at backtrack_pos. */
6090 str
= backtrack_pos
;
6091 inst
.error
= backtrack_error
;
6092 inst
.operands
[backtrack_index
].present
= 0;
6093 i
= backtrack_index
;
6097 /* Check that we have parsed all the arguments. */
6098 if (*str
!= '\0' && !inst
.error
)
6099 inst
.error
= _("garbage following instruction");
6101 return inst
.error
? FAIL
: SUCCESS
;
6104 #undef po_char_or_fail
6105 #undef po_reg_or_fail
6106 #undef po_reg_or_goto
6107 #undef po_imm_or_fail
6108 #undef po_scalar_or_fail
6110 /* Shorthand macro for instruction encoding functions issuing errors. */
6111 #define constraint(expr, err) do { \
6119 /* Functions for operand encoding. ARM, then Thumb. */
6121 #define rotate_left(v, n) (v << n | v >> (32 - n))
6123 /* If VAL can be encoded in the immediate field of an ARM instruction,
6124 return the encoded form. Otherwise, return FAIL. */
6127 encode_arm_immediate (unsigned int val
)
6131 for (i
= 0; i
< 32; i
+= 2)
6132 if ((a
= rotate_left (val
, i
)) <= 0xff)
6133 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6138 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6139 return the encoded form. Otherwise, return FAIL. */
6141 encode_thumb32_immediate (unsigned int val
)
6148 for (i
= 1; i
<= 24; i
++)
6151 if ((val
& ~(0xff << i
)) == 0)
6152 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6156 if (val
== ((a
<< 16) | a
))
6158 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6162 if (val
== ((a
<< 16) | a
))
6163 return 0x200 | (a
>> 8);
6167 /* Encode a VFP SP or DP register number into inst.instruction. */
6170 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6172 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6175 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6178 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6181 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6186 first_error (_("D register out of range for selected VFP version"));
6194 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6198 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6202 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6206 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6210 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6214 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6222 /* Encode a <shift> in an ARM-format instruction. The immediate,
6223 if any, is handled by md_apply_fix. */
6225 encode_arm_shift (int i
)
6227 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6228 inst
.instruction
|= SHIFT_ROR
<< 5;
6231 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6232 if (inst
.operands
[i
].immisreg
)
6234 inst
.instruction
|= SHIFT_BY_REG
;
6235 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6238 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6243 encode_arm_shifter_operand (int i
)
6245 if (inst
.operands
[i
].isreg
)
6247 inst
.instruction
|= inst
.operands
[i
].reg
;
6248 encode_arm_shift (i
);
6251 inst
.instruction
|= INST_IMMEDIATE
;
6254 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6256 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6258 assert (inst
.operands
[i
].isreg
);
6259 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6261 if (inst
.operands
[i
].preind
)
6265 inst
.error
= _("instruction does not accept preindexed addressing");
6268 inst
.instruction
|= PRE_INDEX
;
6269 if (inst
.operands
[i
].writeback
)
6270 inst
.instruction
|= WRITE_BACK
;
6273 else if (inst
.operands
[i
].postind
)
6275 assert (inst
.operands
[i
].writeback
);
6277 inst
.instruction
|= WRITE_BACK
;
6279 else /* unindexed - only for coprocessor */
6281 inst
.error
= _("instruction does not accept unindexed addressing");
6285 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6286 && (((inst
.instruction
& 0x000f0000) >> 16)
6287 == ((inst
.instruction
& 0x0000f000) >> 12)))
6288 as_warn ((inst
.instruction
& LOAD_BIT
)
6289 ? _("destination register same as write-back base")
6290 : _("source register same as write-back base"));
6293 /* inst.operands[i] was set up by parse_address. Encode it into an
6294 ARM-format mode 2 load or store instruction. If is_t is true,
6295 reject forms that cannot be used with a T instruction (i.e. not
6298 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6300 encode_arm_addr_mode_common (i
, is_t
);
6302 if (inst
.operands
[i
].immisreg
)
6304 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6305 inst
.instruction
|= inst
.operands
[i
].imm
;
6306 if (!inst
.operands
[i
].negative
)
6307 inst
.instruction
|= INDEX_UP
;
6308 if (inst
.operands
[i
].shifted
)
6310 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6311 inst
.instruction
|= SHIFT_ROR
<< 5;
6314 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6315 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6319 else /* immediate offset in inst.reloc */
6321 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6322 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6326 /* inst.operands[i] was set up by parse_address. Encode it into an
6327 ARM-format mode 3 load or store instruction. Reject forms that
6328 cannot be used with such instructions. If is_t is true, reject
6329 forms that cannot be used with a T instruction (i.e. not
6332 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6334 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6336 inst
.error
= _("instruction does not accept scaled register index");
6340 encode_arm_addr_mode_common (i
, is_t
);
6342 if (inst
.operands
[i
].immisreg
)
6344 inst
.instruction
|= inst
.operands
[i
].imm
;
6345 if (!inst
.operands
[i
].negative
)
6346 inst
.instruction
|= INDEX_UP
;
6348 else /* immediate offset in inst.reloc */
6350 inst
.instruction
|= HWOFFSET_IMM
;
6351 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6352 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6356 /* inst.operands[i] was set up by parse_address. Encode it into an
6357 ARM-format instruction. Reject all forms which cannot be encoded
6358 into a coprocessor load/store instruction. If wb_ok is false,
6359 reject use of writeback; if unind_ok is false, reject use of
6360 unindexed addressing. If reloc_override is not 0, use it instead
6361 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6362 (in which case it is preserved). */
6365 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6367 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6369 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6371 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6373 assert (!inst
.operands
[i
].writeback
);
6376 inst
.error
= _("instruction does not support unindexed addressing");
6379 inst
.instruction
|= inst
.operands
[i
].imm
;
6380 inst
.instruction
|= INDEX_UP
;
6384 if (inst
.operands
[i
].preind
)
6385 inst
.instruction
|= PRE_INDEX
;
6387 if (inst
.operands
[i
].writeback
)
6389 if (inst
.operands
[i
].reg
== REG_PC
)
6391 inst
.error
= _("pc may not be used with write-back");
6396 inst
.error
= _("instruction does not support writeback");
6399 inst
.instruction
|= WRITE_BACK
;
6403 inst
.reloc
.type
= reloc_override
;
6404 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6405 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6406 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6409 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6411 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6417 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6418 Determine whether it can be performed with a move instruction; if
6419 it can, convert inst.instruction to that move instruction and
6420 return 1; if it can't, convert inst.instruction to a literal-pool
6421 load and return 0. If this is not a valid thing to do in the
6422 current context, set inst.error and return 1.
6424 inst.operands[i] describes the destination register. */
6427 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6432 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6436 if ((inst
.instruction
& tbit
) == 0)
6438 inst
.error
= _("invalid pseudo operation");
6441 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6443 inst
.error
= _("constant expression expected");
6446 if (inst
.reloc
.exp
.X_op
== O_constant
)
6450 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6452 /* This can be done with a mov(1) instruction. */
6453 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6454 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6460 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6463 /* This can be done with a mov instruction. */
6464 inst
.instruction
&= LITERAL_MASK
;
6465 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6466 inst
.instruction
|= value
& 0xfff;
6470 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6473 /* This can be done with a mvn instruction. */
6474 inst
.instruction
&= LITERAL_MASK
;
6475 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6476 inst
.instruction
|= value
& 0xfff;
6482 if (add_to_lit_pool () == FAIL
)
6484 inst
.error
= _("literal pool insertion failed");
6487 inst
.operands
[1].reg
= REG_PC
;
6488 inst
.operands
[1].isreg
= 1;
6489 inst
.operands
[1].preind
= 1;
6490 inst
.reloc
.pc_rel
= 1;
6491 inst
.reloc
.type
= (thumb_p
6492 ? BFD_RELOC_ARM_THUMB_OFFSET
6494 ? BFD_RELOC_ARM_HWLITERAL
6495 : BFD_RELOC_ARM_LITERAL
));
6499 /* Functions for instruction encoding, sorted by sub-architecture.
6500 First some generics; their names are taken from the conventional
6501 bit positions for register arguments in ARM format instructions. */
6511 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6517 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6518 inst
.instruction
|= inst
.operands
[1].reg
;
6524 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6525 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6531 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6532 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6538 unsigned Rn
= inst
.operands
[2].reg
;
6539 /* Enforce restrictions on SWP instruction. */
6540 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6541 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6542 _("Rn must not overlap other operands"));
6543 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6544 inst
.instruction
|= inst
.operands
[1].reg
;
6545 inst
.instruction
|= Rn
<< 16;
6551 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6552 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6553 inst
.instruction
|= inst
.operands
[2].reg
;
6559 inst
.instruction
|= inst
.operands
[0].reg
;
6560 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6561 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6567 inst
.instruction
|= inst
.operands
[0].imm
;
6573 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6574 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6577 /* ARM instructions, in alphabetical order by function name (except
6578 that wrapper functions appear immediately after the function they
6581 /* This is a pseudo-op of the form "adr rd, label" to be converted
6582 into a relative address of the form "add rd, pc, #label-.-8". */
6587 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6589 /* Frag hacking will turn this into a sub instruction if the offset turns
6590 out to be negative. */
6591 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6592 inst
.reloc
.pc_rel
= 1;
6593 inst
.reloc
.exp
.X_add_number
-= 8;
6596 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6597 into a relative address of the form:
6598 add rd, pc, #low(label-.-8)"
6599 add rd, rd, #high(label-.-8)" */
6604 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6606 /* Frag hacking will turn this into a sub instruction if the offset turns
6607 out to be negative. */
6608 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6609 inst
.reloc
.pc_rel
= 1;
6610 inst
.size
= INSN_SIZE
* 2;
6611 inst
.reloc
.exp
.X_add_number
-= 8;
6617 if (!inst
.operands
[1].present
)
6618 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6619 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6620 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6621 encode_arm_shifter_operand (2);
6627 if (inst
.operands
[0].present
)
6629 constraint ((inst
.instruction
& 0xf0) != 0x40
6630 && inst
.operands
[0].imm
!= 0xf,
6631 _("bad barrier type"));
6632 inst
.instruction
|= inst
.operands
[0].imm
;
6635 inst
.instruction
|= 0xf;
6641 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6642 constraint (msb
> 32, _("bit-field extends past end of register"));
6643 /* The instruction encoding stores the LSB and MSB,
6644 not the LSB and width. */
6645 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6646 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6647 inst
.instruction
|= (msb
- 1) << 16;
6655 /* #0 in second position is alternative syntax for bfc, which is
6656 the same instruction but with REG_PC in the Rm field. */
6657 if (!inst
.operands
[1].isreg
)
6658 inst
.operands
[1].reg
= REG_PC
;
6660 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6661 constraint (msb
> 32, _("bit-field extends past end of register"));
6662 /* The instruction encoding stores the LSB and MSB,
6663 not the LSB and width. */
6664 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6665 inst
.instruction
|= inst
.operands
[1].reg
;
6666 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6667 inst
.instruction
|= (msb
- 1) << 16;
6673 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6674 _("bit-field extends past end of register"));
6675 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6676 inst
.instruction
|= inst
.operands
[1].reg
;
6677 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6678 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6681 /* ARM V5 breakpoint instruction (argument parse)
6682 BKPT <16 bit unsigned immediate>
6683 Instruction is not conditional.
6684 The bit pattern given in insns[] has the COND_ALWAYS condition,
6685 and it is an error if the caller tried to override that. */
6690 /* Top 12 of 16 bits to bits 19:8. */
6691 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6693 /* Bottom 4 of 16 bits to bits 3:0. */
6694 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6698 encode_branch (int default_reloc
)
6700 if (inst
.operands
[0].hasreloc
)
6702 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6703 _("the only suffix valid here is '(plt)'"));
6704 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6708 inst
.reloc
.type
= default_reloc
;
6710 inst
.reloc
.pc_rel
= 1;
6717 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6718 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6721 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6728 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6730 if (inst
.cond
== COND_ALWAYS
)
6731 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6733 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6737 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6740 /* ARM V5 branch-link-exchange instruction (argument parse)
6741 BLX <target_addr> ie BLX(1)
6742 BLX{<condition>} <Rm> ie BLX(2)
6743 Unfortunately, there are two different opcodes for this mnemonic.
6744 So, the insns[].value is not used, and the code here zaps values
6745 into inst.instruction.
6746 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6751 if (inst
.operands
[0].isreg
)
6753 /* Arg is a register; the opcode provided by insns[] is correct.
6754 It is not illegal to do "blx pc", just useless. */
6755 if (inst
.operands
[0].reg
== REG_PC
)
6756 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6758 inst
.instruction
|= inst
.operands
[0].reg
;
6762 /* Arg is an address; this instruction cannot be executed
6763 conditionally, and the opcode must be adjusted. */
6764 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6765 inst
.instruction
= 0xfa000000;
6767 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6768 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6771 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6778 bfd_boolean want_reloc
;
6780 if (inst
.operands
[0].reg
== REG_PC
)
6781 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6783 inst
.instruction
|= inst
.operands
[0].reg
;
6784 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
6785 it is for ARMv4t or earlier. */
6786 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
6787 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
6791 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
6796 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
6800 /* ARM v5TEJ. Jump to Jazelle code. */
6805 if (inst
.operands
[0].reg
== REG_PC
)
6806 as_tsktsk (_("use of r15 in bxj is not really useful"));
6808 inst
.instruction
|= inst
.operands
[0].reg
;
6811 /* Co-processor data operation:
6812 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6813 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6817 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6818 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6819 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6820 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6821 inst
.instruction
|= inst
.operands
[4].reg
;
6822 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6828 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6829 encode_arm_shifter_operand (1);
6832 /* Transfer between coprocessor and ARM registers.
6833 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6838 No special properties. */
6843 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6844 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6845 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6846 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6847 inst
.instruction
|= inst
.operands
[4].reg
;
6848 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6851 /* Transfer between coprocessor register and pair of ARM registers.
6852 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6857 Two XScale instructions are special cases of these:
6859 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6860 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6862 Result unpredictable if Rd or Rn is R15. */
6867 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6868 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6869 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6870 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6871 inst
.instruction
|= inst
.operands
[4].reg
;
6877 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6878 if (inst
.operands
[1].present
)
6880 inst
.instruction
|= CPSI_MMOD
;
6881 inst
.instruction
|= inst
.operands
[1].imm
;
6888 inst
.instruction
|= inst
.operands
[0].imm
;
6894 /* There is no IT instruction in ARM mode. We
6895 process it but do not generate code for it. */
6902 int base_reg
= inst
.operands
[0].reg
;
6903 int range
= inst
.operands
[1].imm
;
6905 inst
.instruction
|= base_reg
<< 16;
6906 inst
.instruction
|= range
;
6908 if (inst
.operands
[1].writeback
)
6909 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6911 if (inst
.operands
[0].writeback
)
6913 inst
.instruction
|= WRITE_BACK
;
6914 /* Check for unpredictable uses of writeback. */
6915 if (inst
.instruction
& LOAD_BIT
)
6917 /* Not allowed in LDM type 2. */
6918 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6919 && ((range
& (1 << REG_PC
)) == 0))
6920 as_warn (_("writeback of base register is UNPREDICTABLE"));
6921 /* Only allowed if base reg not in list for other types. */
6922 else if (range
& (1 << base_reg
))
6923 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6927 /* Not allowed for type 2. */
6928 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6929 as_warn (_("writeback of base register is UNPREDICTABLE"));
6930 /* Only allowed if base reg not in list, or first in list. */
6931 else if ((range
& (1 << base_reg
))
6932 && (range
& ((1 << base_reg
) - 1)))
6933 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6938 /* ARMv5TE load-consecutive (argument parse)
6947 constraint (inst
.operands
[0].reg
% 2 != 0,
6948 _("first destination register must be even"));
6949 constraint (inst
.operands
[1].present
6950 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6951 _("can only load two consecutive registers"));
6952 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6953 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6955 if (!inst
.operands
[1].present
)
6956 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6958 if (inst
.instruction
& LOAD_BIT
)
6960 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6961 register and the first register written; we have to diagnose
6962 overlap between the base and the second register written here. */
6964 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6965 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6966 as_warn (_("base register written back, and overlaps "
6967 "second destination register"));
6969 /* For an index-register load, the index register must not overlap the
6970 destination (even if not write-back). */
6971 else if (inst
.operands
[2].immisreg
6972 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6973 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6974 as_warn (_("index register overlaps destination register"));
6977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6978 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6984 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6985 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6986 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6987 || inst
.operands
[1].negative
6988 /* This can arise if the programmer has written
6990 or if they have mistakenly used a register name as the last
6993 It is very difficult to distinguish between these two cases
6994 because "rX" might actually be a label. ie the register
6995 name has been occluded by a symbol of the same name. So we
6996 just generate a general 'bad addressing mode' type error
6997 message and leave it up to the programmer to discover the
6998 true cause and fix their mistake. */
6999 || (inst
.operands
[1].reg
== REG_PC
),
7002 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7003 || inst
.reloc
.exp
.X_add_number
!= 0,
7004 _("offset must be zero in ARM encoding"));
7006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7007 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7008 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7014 constraint (inst
.operands
[0].reg
% 2 != 0,
7015 _("even register required"));
7016 constraint (inst
.operands
[1].present
7017 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7018 _("can only load two consecutive registers"));
7019 /* If op 1 were present and equal to PC, this function wouldn't
7020 have been called in the first place. */
7021 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7023 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7024 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7030 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7031 if (!inst
.operands
[1].isreg
)
7032 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7034 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7040 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7042 if (inst
.operands
[1].preind
)
7044 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7045 || inst
.reloc
.exp
.X_add_number
!= 0,
7046 _("this instruction requires a post-indexed address"));
7048 inst
.operands
[1].preind
= 0;
7049 inst
.operands
[1].postind
= 1;
7050 inst
.operands
[1].writeback
= 1;
7052 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7053 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7056 /* Halfword and signed-byte load/store operations. */
7061 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7062 if (!inst
.operands
[1].isreg
)
7063 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7065 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7071 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7073 if (inst
.operands
[1].preind
)
7075 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7076 || inst
.reloc
.exp
.X_add_number
!= 0,
7077 _("this instruction requires a post-indexed address"));
7079 inst
.operands
[1].preind
= 0;
7080 inst
.operands
[1].postind
= 1;
7081 inst
.operands
[1].writeback
= 1;
7083 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7084 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7087 /* Co-processor register load/store.
7088 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7092 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7093 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7094 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7100 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7101 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7102 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7103 && !(inst
.instruction
& 0x00400000))
7104 as_tsktsk (_("Rd and Rm should be different in mla"));
7106 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7107 inst
.instruction
|= inst
.operands
[1].reg
;
7108 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7109 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7116 encode_arm_shifter_operand (1);
7119 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7126 top
= (inst
.instruction
& 0x00400000) != 0;
7127 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7128 _(":lower16: not allowed this instruction"));
7129 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7130 _(":upper16: not allowed instruction"));
7131 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7132 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7134 imm
= inst
.reloc
.exp
.X_add_number
;
7135 /* The value is in two pieces: 0:11, 16:19. */
7136 inst
.instruction
|= (imm
& 0x00000fff);
7137 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7141 static void do_vfp_nsyn_opcode (const char *);
7144 do_vfp_nsyn_mrs (void)
7146 if (inst
.operands
[0].isvec
)
7148 if (inst
.operands
[1].reg
!= 1)
7149 first_error (_("operand 1 must be FPSCR"));
7150 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7151 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7152 do_vfp_nsyn_opcode ("fmstat");
7154 else if (inst
.operands
[1].isvec
)
7155 do_vfp_nsyn_opcode ("fmrx");
7163 do_vfp_nsyn_msr (void)
7165 if (inst
.operands
[0].isvec
)
7166 do_vfp_nsyn_opcode ("fmxr");
7176 if (do_vfp_nsyn_mrs () == SUCCESS
)
7179 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7180 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7182 _("'CPSR' or 'SPSR' expected"));
7183 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7184 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7187 /* Two possible forms:
7188 "{C|S}PSR_<field>, Rm",
7189 "{C|S}PSR_f, #expression". */
7194 if (do_vfp_nsyn_msr () == SUCCESS
)
7197 inst
.instruction
|= inst
.operands
[0].imm
;
7198 if (inst
.operands
[1].isreg
)
7199 inst
.instruction
|= inst
.operands
[1].reg
;
7202 inst
.instruction
|= INST_IMMEDIATE
;
7203 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7204 inst
.reloc
.pc_rel
= 0;
7211 if (!inst
.operands
[2].present
)
7212 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7213 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7214 inst
.instruction
|= inst
.operands
[1].reg
;
7215 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7217 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7218 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7219 as_tsktsk (_("Rd and Rm should be different in mul"));
7222 /* Long Multiply Parser
7223 UMULL RdLo, RdHi, Rm, Rs
7224 SMULL RdLo, RdHi, Rm, Rs
7225 UMLAL RdLo, RdHi, Rm, Rs
7226 SMLAL RdLo, RdHi, Rm, Rs. */
7231 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7232 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7233 inst
.instruction
|= inst
.operands
[2].reg
;
7234 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7236 /* rdhi and rdlo must be different. */
7237 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7238 as_tsktsk (_("rdhi and rdlo must be different"));
7240 /* rdhi, rdlo and rm must all be different before armv6. */
7241 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7242 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7243 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7244 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7250 if (inst
.operands
[0].present
)
7252 /* Architectural NOP hints are CPSR sets with no bits selected. */
7253 inst
.instruction
&= 0xf0000000;
7254 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
7258 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7259 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7260 Condition defaults to COND_ALWAYS.
7261 Error if Rd, Rn or Rm are R15. */
7266 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7267 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7268 inst
.instruction
|= inst
.operands
[2].reg
;
7269 if (inst
.operands
[3].present
)
7270 encode_arm_shift (3);
7273 /* ARM V6 PKHTB (Argument Parse). */
7278 if (!inst
.operands
[3].present
)
7280 /* If the shift specifier is omitted, turn the instruction
7281 into pkhbt rd, rm, rn. */
7282 inst
.instruction
&= 0xfff00010;
7283 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7284 inst
.instruction
|= inst
.operands
[1].reg
;
7285 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7289 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7290 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7291 inst
.instruction
|= inst
.operands
[2].reg
;
7292 encode_arm_shift (3);
7296 /* ARMv5TE: Preload-Cache
7300 Syntactically, like LDR with B=1, W=0, L=1. */
7305 constraint (!inst
.operands
[0].isreg
,
7306 _("'[' expected after PLD mnemonic"));
7307 constraint (inst
.operands
[0].postind
,
7308 _("post-indexed expression used in preload instruction"));
7309 constraint (inst
.operands
[0].writeback
,
7310 _("writeback used in preload instruction"));
7311 constraint (!inst
.operands
[0].preind
,
7312 _("unindexed addressing used in preload instruction"));
7313 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7316 /* ARMv7: PLI <addr_mode> */
7320 constraint (!inst
.operands
[0].isreg
,
7321 _("'[' expected after PLI mnemonic"));
7322 constraint (inst
.operands
[0].postind
,
7323 _("post-indexed expression used in preload instruction"));
7324 constraint (inst
.operands
[0].writeback
,
7325 _("writeback used in preload instruction"));
7326 constraint (!inst
.operands
[0].preind
,
7327 _("unindexed addressing used in preload instruction"));
7328 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7329 inst
.instruction
&= ~PRE_INDEX
;
7335 inst
.operands
[1] = inst
.operands
[0];
7336 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7337 inst
.operands
[0].isreg
= 1;
7338 inst
.operands
[0].writeback
= 1;
7339 inst
.operands
[0].reg
= REG_SP
;
7343 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7344 word at the specified address and the following word
7346 Unconditionally executed.
7347 Error if Rn is R15. */
7352 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7353 if (inst
.operands
[0].writeback
)
7354 inst
.instruction
|= WRITE_BACK
;
7357 /* ARM V6 ssat (argument parse). */
7362 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7363 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7364 inst
.instruction
|= inst
.operands
[2].reg
;
7366 if (inst
.operands
[3].present
)
7367 encode_arm_shift (3);
7370 /* ARM V6 usat (argument parse). */
7375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7376 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7377 inst
.instruction
|= inst
.operands
[2].reg
;
7379 if (inst
.operands
[3].present
)
7380 encode_arm_shift (3);
7383 /* ARM V6 ssat16 (argument parse). */
7388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7389 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7390 inst
.instruction
|= inst
.operands
[2].reg
;
7396 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7397 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7398 inst
.instruction
|= inst
.operands
[2].reg
;
7401 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7402 preserving the other bits.
7404 setend <endian_specifier>, where <endian_specifier> is either
7410 if (inst
.operands
[0].imm
)
7411 inst
.instruction
|= 0x200;
7417 unsigned int Rm
= (inst
.operands
[1].present
7418 ? inst
.operands
[1].reg
7419 : inst
.operands
[0].reg
);
7421 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7422 inst
.instruction
|= Rm
;
7423 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7425 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7426 inst
.instruction
|= SHIFT_BY_REG
;
7429 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7435 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7436 inst
.reloc
.pc_rel
= 0;
7442 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7443 inst
.reloc
.pc_rel
= 0;
7446 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7447 SMLAxy{cond} Rd,Rm,Rs,Rn
7448 SMLAWy{cond} Rd,Rm,Rs,Rn
7449 Error if any register is R15. */
7454 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7455 inst
.instruction
|= inst
.operands
[1].reg
;
7456 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7457 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7460 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7461 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7462 Error if any register is R15.
7463 Warning if Rdlo == Rdhi. */
7468 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7469 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7470 inst
.instruction
|= inst
.operands
[2].reg
;
7471 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7473 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7474 as_tsktsk (_("rdhi and rdlo must be different"));
7477 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7478 SMULxy{cond} Rd,Rm,Rs
7479 Error if any register is R15. */
7484 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7485 inst
.instruction
|= inst
.operands
[1].reg
;
7486 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7489 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7490 the same for both ARM and Thumb-2. */
7497 if (inst
.operands
[0].present
)
7499 reg
= inst
.operands
[0].reg
;
7500 constraint (reg
!= 13, _("SRS base register must be r13"));
7505 inst
.instruction
|= reg
<< 16;
7506 inst
.instruction
|= inst
.operands
[1].imm
;
7507 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
7508 inst
.instruction
|= WRITE_BACK
;
7511 /* ARM V6 strex (argument parse). */
7516 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7517 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7518 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7519 || inst
.operands
[2].negative
7520 /* See comment in do_ldrex(). */
7521 || (inst
.operands
[2].reg
== REG_PC
),
7524 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7525 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7527 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7528 || inst
.reloc
.exp
.X_add_number
!= 0,
7529 _("offset must be zero in ARM encoding"));
7531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7532 inst
.instruction
|= inst
.operands
[1].reg
;
7533 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7534 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7540 constraint (inst
.operands
[1].reg
% 2 != 0,
7541 _("even register required"));
7542 constraint (inst
.operands
[2].present
7543 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7544 _("can only store two consecutive registers"));
7545 /* If op 2 were present and equal to PC, this function wouldn't
7546 have been called in the first place. */
7547 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7549 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7550 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7551 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7554 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7555 inst
.instruction
|= inst
.operands
[1].reg
;
7556 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7559 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7560 extends it to 32-bits, and adds the result to a value in another
7561 register. You can specify a rotation by 0, 8, 16, or 24 bits
7562 before extracting the 16-bit value.
7563 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7564 Condition defaults to COND_ALWAYS.
7565 Error if any register uses R15. */
7570 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7571 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7572 inst
.instruction
|= inst
.operands
[2].reg
;
7573 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7578 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7579 Condition defaults to COND_ALWAYS.
7580 Error if any register uses R15. */
7585 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7586 inst
.instruction
|= inst
.operands
[1].reg
;
7587 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7590 /* VFP instructions. In a logical order: SP variant first, monad
7591 before dyad, arithmetic then move then load/store. */
7594 do_vfp_sp_monadic (void)
7596 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7597 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7601 do_vfp_sp_dyadic (void)
7603 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7604 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7605 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7609 do_vfp_sp_compare_z (void)
7611 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7615 do_vfp_dp_sp_cvt (void)
7617 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7618 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7622 do_vfp_sp_dp_cvt (void)
7624 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7625 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7629 do_vfp_reg_from_sp (void)
7631 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7632 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7636 do_vfp_reg2_from_sp2 (void)
7638 constraint (inst
.operands
[2].imm
!= 2,
7639 _("only two consecutive VFP SP registers allowed here"));
7640 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7641 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7642 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7646 do_vfp_sp_from_reg (void)
7648 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7649 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7653 do_vfp_sp2_from_reg2 (void)
7655 constraint (inst
.operands
[0].imm
!= 2,
7656 _("only two consecutive VFP SP registers allowed here"));
7657 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7658 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7659 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7663 do_vfp_sp_ldst (void)
7665 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7666 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7670 do_vfp_dp_ldst (void)
7672 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7673 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7678 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7680 if (inst
.operands
[0].writeback
)
7681 inst
.instruction
|= WRITE_BACK
;
7683 constraint (ldstm_type
!= VFP_LDSTMIA
,
7684 _("this addressing mode requires base-register writeback"));
7685 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7686 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7687 inst
.instruction
|= inst
.operands
[1].imm
;
7691 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7695 if (inst
.operands
[0].writeback
)
7696 inst
.instruction
|= WRITE_BACK
;
7698 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7699 _("this addressing mode requires base-register writeback"));
7701 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7702 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7704 count
= inst
.operands
[1].imm
<< 1;
7705 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7708 inst
.instruction
|= count
;
7712 do_vfp_sp_ldstmia (void)
7714 vfp_sp_ldstm (VFP_LDSTMIA
);
7718 do_vfp_sp_ldstmdb (void)
7720 vfp_sp_ldstm (VFP_LDSTMDB
);
7724 do_vfp_dp_ldstmia (void)
7726 vfp_dp_ldstm (VFP_LDSTMIA
);
7730 do_vfp_dp_ldstmdb (void)
7732 vfp_dp_ldstm (VFP_LDSTMDB
);
7736 do_vfp_xp_ldstmia (void)
7738 vfp_dp_ldstm (VFP_LDSTMIAX
);
7742 do_vfp_xp_ldstmdb (void)
7744 vfp_dp_ldstm (VFP_LDSTMDBX
);
7748 do_vfp_dp_rd_rm (void)
7750 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7751 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7755 do_vfp_dp_rn_rd (void)
7757 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7758 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7762 do_vfp_dp_rd_rn (void)
7764 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7765 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7769 do_vfp_dp_rd_rn_rm (void)
7771 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7772 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7773 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7779 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7783 do_vfp_dp_rm_rd_rn (void)
7785 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7786 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7787 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7790 /* VFPv3 instructions. */
7792 do_vfp_sp_const (void)
7794 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7795 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7796 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7800 do_vfp_dp_const (void)
7802 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7803 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7804 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7808 vfp_conv (int srcsize
)
7810 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7811 inst
.instruction
|= (immbits
& 1) << 5;
7812 inst
.instruction
|= (immbits
>> 1);
7816 do_vfp_sp_conv_16 (void)
7818 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7823 do_vfp_dp_conv_16 (void)
7825 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7830 do_vfp_sp_conv_32 (void)
7832 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7837 do_vfp_dp_conv_32 (void)
7839 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7843 /* FPA instructions. Also in a logical order. */
7848 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7849 inst
.instruction
|= inst
.operands
[1].reg
;
7853 do_fpa_ldmstm (void)
7855 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7856 switch (inst
.operands
[1].imm
)
7858 case 1: inst
.instruction
|= CP_T_X
; break;
7859 case 2: inst
.instruction
|= CP_T_Y
; break;
7860 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7865 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7867 /* The instruction specified "ea" or "fd", so we can only accept
7868 [Rn]{!}. The instruction does not really support stacking or
7869 unstacking, so we have to emulate these by setting appropriate
7870 bits and offsets. */
7871 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7872 || inst
.reloc
.exp
.X_add_number
!= 0,
7873 _("this instruction does not support indexing"));
7875 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7876 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7878 if (!(inst
.instruction
& INDEX_UP
))
7879 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7881 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7883 inst
.operands
[2].preind
= 0;
7884 inst
.operands
[2].postind
= 1;
7888 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7891 /* iWMMXt instructions: strictly in alphabetical order. */
7894 do_iwmmxt_tandorc (void)
7896 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7900 do_iwmmxt_textrc (void)
7902 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7903 inst
.instruction
|= inst
.operands
[1].imm
;
7907 do_iwmmxt_textrm (void)
7909 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7910 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7911 inst
.instruction
|= inst
.operands
[2].imm
;
7915 do_iwmmxt_tinsr (void)
7917 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7918 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7919 inst
.instruction
|= inst
.operands
[2].imm
;
7923 do_iwmmxt_tmia (void)
7925 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7926 inst
.instruction
|= inst
.operands
[1].reg
;
7927 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7931 do_iwmmxt_waligni (void)
7933 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7934 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7935 inst
.instruction
|= inst
.operands
[2].reg
;
7936 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7940 do_iwmmxt_wmerge (void)
7942 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7943 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7944 inst
.instruction
|= inst
.operands
[2].reg
;
7945 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
7949 do_iwmmxt_wmov (void)
7951 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7952 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7953 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7954 inst
.instruction
|= inst
.operands
[1].reg
;
7958 do_iwmmxt_wldstbh (void)
7961 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7963 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7965 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7966 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7970 do_iwmmxt_wldstw (void)
7972 /* RIWR_RIWC clears .isreg for a control register. */
7973 if (!inst
.operands
[0].isreg
)
7975 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7976 inst
.instruction
|= 0xf0000000;
7979 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7980 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7984 do_iwmmxt_wldstd (void)
7986 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7987 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
7988 && inst
.operands
[1].immisreg
)
7990 inst
.instruction
&= ~0x1a000ff;
7991 inst
.instruction
|= (0xf << 28);
7992 if (inst
.operands
[1].preind
)
7993 inst
.instruction
|= PRE_INDEX
;
7994 if (!inst
.operands
[1].negative
)
7995 inst
.instruction
|= INDEX_UP
;
7996 if (inst
.operands
[1].writeback
)
7997 inst
.instruction
|= WRITE_BACK
;
7998 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7999 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8000 inst
.instruction
|= inst
.operands
[1].imm
;
8003 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8007 do_iwmmxt_wshufh (void)
8009 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8010 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8011 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8012 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8016 do_iwmmxt_wzero (void)
8018 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8019 inst
.instruction
|= inst
.operands
[0].reg
;
8020 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8021 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8025 do_iwmmxt_wrwrwr_or_imm5 (void)
8027 if (inst
.operands
[2].isreg
)
8030 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8031 _("immediate operand requires iWMMXt2"));
8033 if (inst
.operands
[2].imm
== 0)
8035 switch ((inst
.instruction
>> 20) & 0xf)
8041 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8042 inst
.operands
[2].imm
= 16;
8043 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8049 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8050 inst
.operands
[2].imm
= 32;
8051 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8058 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8060 wrn
= (inst
.instruction
>> 16) & 0xf;
8061 inst
.instruction
&= 0xff0fff0f;
8062 inst
.instruction
|= wrn
;
8063 /* Bail out here; the instruction is now assembled. */
8068 /* Map 32 -> 0, etc. */
8069 inst
.operands
[2].imm
&= 0x1f;
8070 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8074 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8075 operations first, then control, shift, and load/store. */
8077 /* Insns like "foo X,Y,Z". */
8080 do_mav_triple (void)
8082 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8083 inst
.instruction
|= inst
.operands
[1].reg
;
8084 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8087 /* Insns like "foo W,X,Y,Z".
8088 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8093 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8094 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8095 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8096 inst
.instruction
|= inst
.operands
[3].reg
;
8099 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8103 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8106 /* Maverick shift immediate instructions.
8107 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8108 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8113 int imm
= inst
.operands
[2].imm
;
8115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8116 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8118 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8119 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8120 Bit 4 should be 0. */
8121 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8123 inst
.instruction
|= imm
;
8126 /* XScale instructions. Also sorted arithmetic before move. */
8128 /* Xscale multiply-accumulate (argument parse)
8131 MIAxycc acc0,Rm,Rs. */
8136 inst
.instruction
|= inst
.operands
[1].reg
;
8137 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8140 /* Xscale move-accumulator-register (argument parse)
8142 MARcc acc0,RdLo,RdHi. */
8147 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8148 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8151 /* Xscale move-register-accumulator (argument parse)
8153 MRAcc RdLo,RdHi,acc0. */
8158 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8159 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8160 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8163 /* Encoding functions relevant only to Thumb. */
8165 /* inst.operands[i] is a shifted-register operand; encode
8166 it into inst.instruction in the format used by Thumb32. */
8169 encode_thumb32_shifted_operand (int i
)
8171 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8172 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8174 constraint (inst
.operands
[i
].immisreg
,
8175 _("shift by register not allowed in thumb mode"));
8176 inst
.instruction
|= inst
.operands
[i
].reg
;
8177 if (shift
== SHIFT_RRX
)
8178 inst
.instruction
|= SHIFT_ROR
<< 4;
8181 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8182 _("expression too complex"));
8184 constraint (value
> 32
8185 || (value
== 32 && (shift
== SHIFT_LSL
8186 || shift
== SHIFT_ROR
)),
8187 _("shift expression is too large"));
8191 else if (value
== 32)
8194 inst
.instruction
|= shift
<< 4;
8195 inst
.instruction
|= (value
& 0x1c) << 10;
8196 inst
.instruction
|= (value
& 0x03) << 6;
8201 /* inst.operands[i] was set up by parse_address. Encode it into a
8202 Thumb32 format load or store instruction. Reject forms that cannot
8203 be used with such instructions. If is_t is true, reject forms that
8204 cannot be used with a T instruction; if is_d is true, reject forms
8205 that cannot be used with a D instruction. */
8208 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8210 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8212 constraint (!inst
.operands
[i
].isreg
,
8213 _("Instruction does not support =N addresses"));
8215 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8216 if (inst
.operands
[i
].immisreg
)
8218 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8219 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8220 constraint (inst
.operands
[i
].negative
,
8221 _("Thumb does not support negative register indexing"));
8222 constraint (inst
.operands
[i
].postind
,
8223 _("Thumb does not support register post-indexing"));
8224 constraint (inst
.operands
[i
].writeback
,
8225 _("Thumb does not support register indexing with writeback"));
8226 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8227 _("Thumb supports only LSL in shifted register indexing"));
8229 inst
.instruction
|= inst
.operands
[i
].imm
;
8230 if (inst
.operands
[i
].shifted
)
8232 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8233 _("expression too complex"));
8234 constraint (inst
.reloc
.exp
.X_add_number
< 0
8235 || inst
.reloc
.exp
.X_add_number
> 3,
8236 _("shift out of range"));
8237 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8239 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8241 else if (inst
.operands
[i
].preind
)
8243 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8244 _("cannot use writeback with PC-relative addressing"));
8245 constraint (is_t
&& inst
.operands
[i
].writeback
,
8246 _("cannot use writeback with this instruction"));
8250 inst
.instruction
|= 0x01000000;
8251 if (inst
.operands
[i
].writeback
)
8252 inst
.instruction
|= 0x00200000;
8256 inst
.instruction
|= 0x00000c00;
8257 if (inst
.operands
[i
].writeback
)
8258 inst
.instruction
|= 0x00000100;
8260 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8262 else if (inst
.operands
[i
].postind
)
8264 assert (inst
.operands
[i
].writeback
);
8265 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8266 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8269 inst
.instruction
|= 0x00200000;
8271 inst
.instruction
|= 0x00000900;
8272 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8274 else /* unindexed - only for coprocessor */
8275 inst
.error
= _("instruction does not accept unindexed addressing");
8278 /* Table of Thumb instructions which exist in both 16- and 32-bit
8279 encodings (the latter only in post-V6T2 cores). The index is the
8280 value used in the insns table below. When there is more than one
8281 possible 16-bit encoding for the instruction, this table always
8283 Also contains several pseudo-instructions used during relaxation. */
8284 #define T16_32_TAB \
8285 X(adc, 4140, eb400000), \
8286 X(adcs, 4140, eb500000), \
8287 X(add, 1c00, eb000000), \
8288 X(adds, 1c00, eb100000), \
8289 X(addi, 0000, f1000000), \
8290 X(addis, 0000, f1100000), \
8291 X(add_pc,000f, f20f0000), \
8292 X(add_sp,000d, f10d0000), \
8293 X(adr, 000f, f20f0000), \
8294 X(and, 4000, ea000000), \
8295 X(ands, 4000, ea100000), \
8296 X(asr, 1000, fa40f000), \
8297 X(asrs, 1000, fa50f000), \
8298 X(b, e000, f000b000), \
8299 X(bcond, d000, f0008000), \
8300 X(bic, 4380, ea200000), \
8301 X(bics, 4380, ea300000), \
8302 X(cmn, 42c0, eb100f00), \
8303 X(cmp, 2800, ebb00f00), \
8304 X(cpsie, b660, f3af8400), \
8305 X(cpsid, b670, f3af8600), \
8306 X(cpy, 4600, ea4f0000), \
8307 X(dec_sp,80dd, f1ad0d00), \
8308 X(eor, 4040, ea800000), \
8309 X(eors, 4040, ea900000), \
8310 X(inc_sp,00dd, f10d0d00), \
8311 X(ldmia, c800, e8900000), \
8312 X(ldr, 6800, f8500000), \
8313 X(ldrb, 7800, f8100000), \
8314 X(ldrh, 8800, f8300000), \
8315 X(ldrsb, 5600, f9100000), \
8316 X(ldrsh, 5e00, f9300000), \
8317 X(ldr_pc,4800, f85f0000), \
8318 X(ldr_pc2,4800, f85f0000), \
8319 X(ldr_sp,9800, f85d0000), \
8320 X(lsl, 0000, fa00f000), \
8321 X(lsls, 0000, fa10f000), \
8322 X(lsr, 0800, fa20f000), \
8323 X(lsrs, 0800, fa30f000), \
8324 X(mov, 2000, ea4f0000), \
8325 X(movs, 2000, ea5f0000), \
8326 X(mul, 4340, fb00f000), \
8327 X(muls, 4340, ffffffff), /* no 32b muls */ \
8328 X(mvn, 43c0, ea6f0000), \
8329 X(mvns, 43c0, ea7f0000), \
8330 X(neg, 4240, f1c00000), /* rsb #0 */ \
8331 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8332 X(orr, 4300, ea400000), \
8333 X(orrs, 4300, ea500000), \
8334 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8335 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8336 X(rev, ba00, fa90f080), \
8337 X(rev16, ba40, fa90f090), \
8338 X(revsh, bac0, fa90f0b0), \
8339 X(ror, 41c0, fa60f000), \
8340 X(rors, 41c0, fa70f000), \
8341 X(sbc, 4180, eb600000), \
8342 X(sbcs, 4180, eb700000), \
8343 X(stmia, c000, e8800000), \
8344 X(str, 6000, f8400000), \
8345 X(strb, 7000, f8000000), \
8346 X(strh, 8000, f8200000), \
8347 X(str_sp,9000, f84d0000), \
8348 X(sub, 1e00, eba00000), \
8349 X(subs, 1e00, ebb00000), \
8350 X(subi, 8000, f1a00000), \
8351 X(subis, 8000, f1b00000), \
8352 X(sxtb, b240, fa4ff080), \
8353 X(sxth, b200, fa0ff080), \
8354 X(tst, 4200, ea100f00), \
8355 X(uxtb, b2c0, fa5ff080), \
8356 X(uxth, b280, fa1ff080), \
8357 X(nop, bf00, f3af8000), \
8358 X(yield, bf10, f3af8001), \
8359 X(wfe, bf20, f3af8002), \
8360 X(wfi, bf30, f3af8003), \
8361 X(sev, bf40, f3af9004), /* typo, 8004? */
8363 /* To catch errors in encoding functions, the codes are all offset by
8364 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8365 as 16-bit instructions. */
8366 #define X(a,b,c) T_MNEM_##a
8367 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8370 #define X(a,b,c) 0x##b
8371 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8372 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8375 #define X(a,b,c) 0x##c
8376 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8377 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8378 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8382 /* Thumb instruction encoders, in alphabetical order. */
8386 do_t_add_sub_w (void)
8390 Rd
= inst
.operands
[0].reg
;
8391 Rn
= inst
.operands
[1].reg
;
8393 constraint (Rd
== 15, _("PC not allowed as destination"));
8394 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8395 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8398 /* Parse an add or subtract instruction. We get here with inst.instruction
8399 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8406 Rd
= inst
.operands
[0].reg
;
8407 Rs
= (inst
.operands
[1].present
8408 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8409 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8417 flags
= (inst
.instruction
== T_MNEM_adds
8418 || inst
.instruction
== T_MNEM_subs
);
8420 narrow
= (current_it_mask
== 0);
8422 narrow
= (current_it_mask
!= 0);
8423 if (!inst
.operands
[2].isreg
)
8427 add
= (inst
.instruction
== T_MNEM_add
8428 || inst
.instruction
== T_MNEM_adds
);
8430 if (inst
.size_req
!= 4)
8432 /* Attempt to use a narrow opcode, with relaxation if
8434 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8435 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8436 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8437 opcode
= T_MNEM_add_sp
;
8438 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8439 opcode
= T_MNEM_add_pc
;
8440 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8443 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8445 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8449 inst
.instruction
= THUMB_OP16(opcode
);
8450 inst
.instruction
|= (Rd
<< 4) | Rs
;
8451 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8452 if (inst
.size_req
!= 2)
8453 inst
.relax
= opcode
;
8456 constraint (inst
.size_req
== 2, BAD_HIREG
);
8458 if (inst
.size_req
== 4
8459 || (inst
.size_req
!= 2 && !opcode
))
8463 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
8464 _("only SUBS PC, LR, #const allowed"));
8465 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8466 _("expression too complex"));
8467 constraint (inst
.reloc
.exp
.X_add_number
< 0
8468 || inst
.reloc
.exp
.X_add_number
> 0xff,
8469 _("immediate value out of range"));
8470 inst
.instruction
= T2_SUBS_PC_LR
8471 | inst
.reloc
.exp
.X_add_number
;
8472 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8475 else if (Rs
== REG_PC
)
8477 /* Always use addw/subw. */
8478 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8479 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8483 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8484 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8487 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8489 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8491 inst
.instruction
|= Rd
<< 8;
8492 inst
.instruction
|= Rs
<< 16;
8497 Rn
= inst
.operands
[2].reg
;
8498 /* See if we can do this with a 16-bit instruction. */
8499 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8501 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8506 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8507 || inst
.instruction
== T_MNEM_add
)
8510 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8514 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
8516 /* Thumb-1 cores (except v6-M) require at least one high
8517 register in a narrow non flag setting add. */
8518 if (Rd
> 7 || Rn
> 7
8519 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
8520 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
8527 inst
.instruction
= T_OPCODE_ADD_HI
;
8528 inst
.instruction
|= (Rd
& 8) << 4;
8529 inst
.instruction
|= (Rd
& 7);
8530 inst
.instruction
|= Rn
<< 3;
8535 /* If we get here, it can't be done in 16 bits. */
8536 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8537 _("shift must be constant"));
8538 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8539 inst
.instruction
|= Rd
<< 8;
8540 inst
.instruction
|= Rs
<< 16;
8541 encode_thumb32_shifted_operand (2);
8546 constraint (inst
.instruction
== T_MNEM_adds
8547 || inst
.instruction
== T_MNEM_subs
,
8550 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
8552 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
8553 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
8556 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8558 inst
.instruction
|= (Rd
<< 4) | Rs
;
8559 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8563 Rn
= inst
.operands
[2].reg
;
8564 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
8566 /* We now have Rd, Rs, and Rn set to registers. */
8567 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8569 /* Can't do this for SUB. */
8570 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
8571 inst
.instruction
= T_OPCODE_ADD_HI
;
8572 inst
.instruction
|= (Rd
& 8) << 4;
8573 inst
.instruction
|= (Rd
& 7);
8575 inst
.instruction
|= Rn
<< 3;
8577 inst
.instruction
|= Rs
<< 3;
8579 constraint (1, _("dest must overlap one source register"));
8583 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8584 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
8585 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8593 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
8595 /* Defer to section relaxation. */
8596 inst
.relax
= inst
.instruction
;
8597 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8598 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8600 else if (unified_syntax
&& inst
.size_req
!= 2)
8602 /* Generate a 32-bit opcode. */
8603 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8604 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8605 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8606 inst
.reloc
.pc_rel
= 1;
8610 /* Generate a 16-bit opcode. */
8611 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8612 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8613 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8614 inst
.reloc
.pc_rel
= 1;
8616 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8620 /* Arithmetic instructions for which there is just one 16-bit
8621 instruction encoding, and it allows only two low registers.
8622 For maximal compatibility with ARM syntax, we allow three register
8623 operands even when Thumb-32 instructions are not available, as long
8624 as the first two are identical. For instance, both "sbc r0,r1" and
8625 "sbc r0,r0,r1" are allowed. */
8631 Rd
= inst
.operands
[0].reg
;
8632 Rs
= (inst
.operands
[1].present
8633 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8634 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8635 Rn
= inst
.operands
[2].reg
;
8639 if (!inst
.operands
[2].isreg
)
8641 /* For an immediate, we always generate a 32-bit opcode;
8642 section relaxation will shrink it later if possible. */
8643 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8644 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8645 inst
.instruction
|= Rd
<< 8;
8646 inst
.instruction
|= Rs
<< 16;
8647 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8653 /* See if we can do this with a 16-bit instruction. */
8654 if (THUMB_SETS_FLAGS (inst
.instruction
))
8655 narrow
= current_it_mask
== 0;
8657 narrow
= current_it_mask
!= 0;
8659 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8661 if (inst
.operands
[2].shifted
)
8663 if (inst
.size_req
== 4)
8669 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8670 inst
.instruction
|= Rd
;
8671 inst
.instruction
|= Rn
<< 3;
8675 /* If we get here, it can't be done in 16 bits. */
8676 constraint (inst
.operands
[2].shifted
8677 && inst
.operands
[2].immisreg
,
8678 _("shift must be constant"));
8679 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8680 inst
.instruction
|= Rd
<< 8;
8681 inst
.instruction
|= Rs
<< 16;
8682 encode_thumb32_shifted_operand (2);
8687 /* On its face this is a lie - the instruction does set the
8688 flags. However, the only supported mnemonic in this mode
8690 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8692 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8693 _("unshifted register required"));
8694 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8695 constraint (Rd
!= Rs
,
8696 _("dest and source1 must be the same register"));
8698 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8699 inst
.instruction
|= Rd
;
8700 inst
.instruction
|= Rn
<< 3;
8704 /* Similarly, but for instructions where the arithmetic operation is
8705 commutative, so we can allow either of them to be different from
8706 the destination operand in a 16-bit instruction. For instance, all
8707 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8714 Rd
= inst
.operands
[0].reg
;
8715 Rs
= (inst
.operands
[1].present
8716 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8717 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8718 Rn
= inst
.operands
[2].reg
;
8722 if (!inst
.operands
[2].isreg
)
8724 /* For an immediate, we always generate a 32-bit opcode;
8725 section relaxation will shrink it later if possible. */
8726 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8727 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8728 inst
.instruction
|= Rd
<< 8;
8729 inst
.instruction
|= Rs
<< 16;
8730 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8736 /* See if we can do this with a 16-bit instruction. */
8737 if (THUMB_SETS_FLAGS (inst
.instruction
))
8738 narrow
= current_it_mask
== 0;
8740 narrow
= current_it_mask
!= 0;
8742 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8744 if (inst
.operands
[2].shifted
)
8746 if (inst
.size_req
== 4)
8753 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8754 inst
.instruction
|= Rd
;
8755 inst
.instruction
|= Rn
<< 3;
8760 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8761 inst
.instruction
|= Rd
;
8762 inst
.instruction
|= Rs
<< 3;
8767 /* If we get here, it can't be done in 16 bits. */
8768 constraint (inst
.operands
[2].shifted
8769 && inst
.operands
[2].immisreg
,
8770 _("shift must be constant"));
8771 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8772 inst
.instruction
|= Rd
<< 8;
8773 inst
.instruction
|= Rs
<< 16;
8774 encode_thumb32_shifted_operand (2);
8779 /* On its face this is a lie - the instruction does set the
8780 flags. However, the only supported mnemonic in this mode
8782 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8784 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8785 _("unshifted register required"));
8786 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8788 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8789 inst
.instruction
|= Rd
;
8792 inst
.instruction
|= Rn
<< 3;
8794 inst
.instruction
|= Rs
<< 3;
8796 constraint (1, _("dest must overlap one source register"));
8803 if (inst
.operands
[0].present
)
8805 constraint ((inst
.instruction
& 0xf0) != 0x40
8806 && inst
.operands
[0].imm
!= 0xf,
8807 _("bad barrier type"));
8808 inst
.instruction
|= inst
.operands
[0].imm
;
8811 inst
.instruction
|= 0xf;
8817 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8818 constraint (msb
> 32, _("bit-field extends past end of register"));
8819 /* The instruction encoding stores the LSB and MSB,
8820 not the LSB and width. */
8821 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8822 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8823 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8824 inst
.instruction
|= msb
- 1;
8832 /* #0 in second position is alternative syntax for bfc, which is
8833 the same instruction but with REG_PC in the Rm field. */
8834 if (!inst
.operands
[1].isreg
)
8835 inst
.operands
[1].reg
= REG_PC
;
8837 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8838 constraint (msb
> 32, _("bit-field extends past end of register"));
8839 /* The instruction encoding stores the LSB and MSB,
8840 not the LSB and width. */
8841 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8842 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8843 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8844 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8845 inst
.instruction
|= msb
- 1;
8851 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8852 _("bit-field extends past end of register"));
8853 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8854 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8855 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8856 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8857 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8860 /* ARM V5 Thumb BLX (argument parse)
8861 BLX <target_addr> which is BLX(1)
8862 BLX <Rm> which is BLX(2)
8863 Unfortunately, there are two different opcodes for this mnemonic.
8864 So, the insns[].value is not used, and the code here zaps values
8865 into inst.instruction.
8867 ??? How to take advantage of the additional two bits of displacement
8868 available in Thumb32 mode? Need new relocation? */
8873 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8874 if (inst
.operands
[0].isreg
)
8875 /* We have a register, so this is BLX(2). */
8876 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8879 /* No register. This must be BLX(1). */
8880 inst
.instruction
= 0xf000e800;
8882 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8883 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8886 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8887 inst
.reloc
.pc_rel
= 1;
8897 if (current_it_mask
)
8899 /* Conditional branches inside IT blocks are encoded as unconditional
8902 /* A branch must be the last instruction in an IT block. */
8903 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8908 if (cond
!= COND_ALWAYS
)
8909 opcode
= T_MNEM_bcond
;
8911 opcode
= inst
.instruction
;
8913 if (unified_syntax
&& inst
.size_req
== 4)
8915 inst
.instruction
= THUMB_OP32(opcode
);
8916 if (cond
== COND_ALWAYS
)
8917 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8920 assert (cond
!= 0xF);
8921 inst
.instruction
|= cond
<< 22;
8922 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8927 inst
.instruction
= THUMB_OP16(opcode
);
8928 if (cond
== COND_ALWAYS
)
8929 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8932 inst
.instruction
|= cond
<< 8;
8933 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8935 /* Allow section relaxation. */
8936 if (unified_syntax
&& inst
.size_req
!= 2)
8937 inst
.relax
= opcode
;
8940 inst
.reloc
.pc_rel
= 1;
8946 constraint (inst
.cond
!= COND_ALWAYS
,
8947 _("instruction is always unconditional"));
8948 if (inst
.operands
[0].present
)
8950 constraint (inst
.operands
[0].imm
> 255,
8951 _("immediate value out of range"));
8952 inst
.instruction
|= inst
.operands
[0].imm
;
8957 do_t_branch23 (void)
8959 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8960 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8961 inst
.reloc
.pc_rel
= 1;
8963 /* If the destination of the branch is a defined symbol which does not have
8964 the THUMB_FUNC attribute, then we must be calling a function which has
8965 the (interfacearm) attribute. We look for the Thumb entry point to that
8966 function and change the branch to refer to that function instead. */
8967 if ( inst
.reloc
.exp
.X_op
== O_symbol
8968 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8969 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8970 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8971 inst
.reloc
.exp
.X_add_symbol
=
8972 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8978 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8979 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8980 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8981 should cause the alignment to be checked once it is known. This is
8982 because BX PC only works if the instruction is word aligned. */
8988 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8989 if (inst
.operands
[0].reg
== REG_PC
)
8990 as_tsktsk (_("use of r15 in bxj is not really useful"));
8992 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8998 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8999 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9000 inst
.instruction
|= inst
.operands
[1].reg
;
9006 constraint (current_it_mask
, BAD_NOT_IT
);
9007 inst
.instruction
|= inst
.operands
[0].imm
;
9013 constraint (current_it_mask
, BAD_NOT_IT
);
9015 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9016 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9018 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9019 inst
.instruction
= 0xf3af8000;
9020 inst
.instruction
|= imod
<< 9;
9021 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9022 if (inst
.operands
[1].present
)
9023 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9027 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9028 && (inst
.operands
[0].imm
& 4),
9029 _("selected processor does not support 'A' form "
9030 "of this instruction"));
9031 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9032 _("Thumb does not support the 2-argument "
9033 "form of this instruction"));
9034 inst
.instruction
|= inst
.operands
[0].imm
;
9038 /* THUMB CPY instruction (argument parse). */
9043 if (inst
.size_req
== 4)
9045 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9046 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9047 inst
.instruction
|= inst
.operands
[1].reg
;
9051 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9052 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9053 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9060 constraint (current_it_mask
, BAD_NOT_IT
);
9061 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9062 inst
.instruction
|= inst
.operands
[0].reg
;
9063 inst
.reloc
.pc_rel
= 1;
9064 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9070 inst
.instruction
|= inst
.operands
[0].imm
;
9076 if (!inst
.operands
[1].present
)
9077 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9078 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9079 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9080 inst
.instruction
|= inst
.operands
[2].reg
;
9086 if (unified_syntax
&& inst
.size_req
== 4)
9087 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9089 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9095 unsigned int cond
= inst
.operands
[0].imm
;
9097 constraint (current_it_mask
, BAD_NOT_IT
);
9098 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
9101 /* If the condition is a negative condition, invert the mask. */
9102 if ((cond
& 0x1) == 0x0)
9104 unsigned int mask
= inst
.instruction
& 0x000f;
9106 if ((mask
& 0x7) == 0)
9107 /* no conversion needed */;
9108 else if ((mask
& 0x3) == 0)
9110 else if ((mask
& 0x1) == 0)
9115 inst
.instruction
&= 0xfff0;
9116 inst
.instruction
|= mask
;
9119 inst
.instruction
|= cond
<< 4;
9122 /* Helper function used for both push/pop and ldm/stm. */
9124 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9128 load
= (inst
.instruction
& (1 << 20)) != 0;
9130 if (mask
& (1 << 13))
9131 inst
.error
= _("SP not allowed in register list");
9134 if (mask
& (1 << 14)
9135 && mask
& (1 << 15))
9136 inst
.error
= _("LR and PC should not both be in register list");
9138 if ((mask
& (1 << base
)) != 0
9140 as_warn (_("base register should not be in register list "
9141 "when written back"));
9145 if (mask
& (1 << 15))
9146 inst
.error
= _("PC not allowed in register list");
9148 if (mask
& (1 << base
))
9149 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9152 if ((mask
& (mask
- 1)) == 0)
9154 /* Single register transfers implemented as str/ldr. */
9157 if (inst
.instruction
& (1 << 23))
9158 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9160 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9164 if (inst
.instruction
& (1 << 23))
9165 inst
.instruction
= 0x00800000; /* ia -> [base] */
9167 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9170 inst
.instruction
|= 0xf8400000;
9172 inst
.instruction
|= 0x00100000;
9174 mask
= ffs (mask
) - 1;
9178 inst
.instruction
|= WRITE_BACK
;
9180 inst
.instruction
|= mask
;
9181 inst
.instruction
|= base
<< 16;
9187 /* This really doesn't seem worth it. */
9188 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9189 _("expression too complex"));
9190 constraint (inst
.operands
[1].writeback
,
9191 _("Thumb load/store multiple does not support {reglist}^"));
9199 /* See if we can use a 16-bit instruction. */
9200 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9201 && inst
.size_req
!= 4
9202 && !(inst
.operands
[1].imm
& ~0xff))
9204 mask
= 1 << inst
.operands
[0].reg
;
9206 if (inst
.operands
[0].reg
<= 7
9207 && (inst
.instruction
== T_MNEM_stmia
9208 ? inst
.operands
[0].writeback
9209 : (inst
.operands
[0].writeback
9210 == !(inst
.operands
[1].imm
& mask
))))
9212 if (inst
.instruction
== T_MNEM_stmia
9213 && (inst
.operands
[1].imm
& mask
)
9214 && (inst
.operands
[1].imm
& (mask
- 1)))
9215 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9216 inst
.operands
[0].reg
);
9218 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9219 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9220 inst
.instruction
|= inst
.operands
[1].imm
;
9223 else if (inst
.operands
[0] .reg
== REG_SP
9224 && inst
.operands
[0].writeback
)
9226 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9227 ? T_MNEM_push
: T_MNEM_pop
);
9228 inst
.instruction
|= inst
.operands
[1].imm
;
9235 if (inst
.instruction
< 0xffff)
9236 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9238 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9239 inst
.operands
[0].writeback
);
9244 constraint (inst
.operands
[0].reg
> 7
9245 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9246 constraint (inst
.instruction
!= T_MNEM_ldmia
9247 && inst
.instruction
!= T_MNEM_stmia
,
9248 _("Thumb-2 instruction only valid in unified syntax"));
9249 if (inst
.instruction
== T_MNEM_stmia
)
9251 if (!inst
.operands
[0].writeback
)
9252 as_warn (_("this instruction will write back the base register"));
9253 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9254 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9255 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9256 inst
.operands
[0].reg
);
9260 if (!inst
.operands
[0].writeback
9261 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9262 as_warn (_("this instruction will write back the base register"));
9263 else if (inst
.operands
[0].writeback
9264 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9265 as_warn (_("this instruction will not write back the base register"));
9268 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9269 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9270 inst
.instruction
|= inst
.operands
[1].imm
;
9277 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9278 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9279 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9280 || inst
.operands
[1].negative
,
9283 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9284 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9285 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9291 if (!inst
.operands
[1].present
)
9293 constraint (inst
.operands
[0].reg
== REG_LR
,
9294 _("r14 not allowed as first register "
9295 "when second register is omitted"));
9296 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9298 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9301 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9302 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9303 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9309 unsigned long opcode
;
9312 opcode
= inst
.instruction
;
9315 if (!inst
.operands
[1].isreg
)
9317 if (opcode
<= 0xffff)
9318 inst
.instruction
= THUMB_OP32 (opcode
);
9319 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9322 if (inst
.operands
[1].isreg
9323 && !inst
.operands
[1].writeback
9324 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9325 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9327 && inst
.size_req
!= 4)
9329 /* Insn may have a 16-bit form. */
9330 Rn
= inst
.operands
[1].reg
;
9331 if (inst
.operands
[1].immisreg
)
9333 inst
.instruction
= THUMB_OP16 (opcode
);
9335 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9338 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9339 && opcode
!= T_MNEM_ldrsb
)
9340 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9341 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9348 if (inst
.reloc
.pc_rel
)
9349 opcode
= T_MNEM_ldr_pc2
;
9351 opcode
= T_MNEM_ldr_pc
;
9355 if (opcode
== T_MNEM_ldr
)
9356 opcode
= T_MNEM_ldr_sp
;
9358 opcode
= T_MNEM_str_sp
;
9360 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9364 inst
.instruction
= inst
.operands
[0].reg
;
9365 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9367 inst
.instruction
|= THUMB_OP16 (opcode
);
9368 if (inst
.size_req
== 2)
9369 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9371 inst
.relax
= opcode
;
9375 /* Definitely a 32-bit variant. */
9376 inst
.instruction
= THUMB_OP32 (opcode
);
9377 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9378 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9382 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9384 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9386 /* Only [Rn,Rm] is acceptable. */
9387 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9388 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9389 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9390 || inst
.operands
[1].negative
,
9391 _("Thumb does not support this addressing mode"));
9392 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9396 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9397 if (!inst
.operands
[1].isreg
)
9398 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9401 constraint (!inst
.operands
[1].preind
9402 || inst
.operands
[1].shifted
9403 || inst
.operands
[1].writeback
,
9404 _("Thumb does not support this addressing mode"));
9405 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9407 constraint (inst
.instruction
& 0x0600,
9408 _("byte or halfword not valid for base register"));
9409 constraint (inst
.operands
[1].reg
== REG_PC
9410 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9411 _("r15 based store not allowed"));
9412 constraint (inst
.operands
[1].immisreg
,
9413 _("invalid base register for register offset"));
9415 if (inst
.operands
[1].reg
== REG_PC
)
9416 inst
.instruction
= T_OPCODE_LDR_PC
;
9417 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9418 inst
.instruction
= T_OPCODE_LDR_SP
;
9420 inst
.instruction
= T_OPCODE_STR_SP
;
9422 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9423 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9427 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9428 if (!inst
.operands
[1].immisreg
)
9430 /* Immediate offset. */
9431 inst
.instruction
|= inst
.operands
[0].reg
;
9432 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9433 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9437 /* Register offset. */
9438 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9439 constraint (inst
.operands
[1].negative
,
9440 _("Thumb does not support this addressing mode"));
9443 switch (inst
.instruction
)
9445 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9446 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9447 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9448 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9449 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9450 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9451 case 0x5600 /* ldrsb */:
9452 case 0x5e00 /* ldrsh */: break;
9456 inst
.instruction
|= inst
.operands
[0].reg
;
9457 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9458 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9464 if (!inst
.operands
[1].present
)
9466 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9467 constraint (inst
.operands
[0].reg
== REG_LR
,
9468 _("r14 not allowed here"));
9470 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9471 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9472 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9478 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9479 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
9485 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9486 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9487 inst
.instruction
|= inst
.operands
[2].reg
;
9488 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9494 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9495 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9496 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9497 inst
.instruction
|= inst
.operands
[3].reg
;
9505 int r0off
= (inst
.instruction
== T_MNEM_mov
9506 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
9507 unsigned long opcode
;
9509 bfd_boolean low_regs
;
9511 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
9512 opcode
= inst
.instruction
;
9513 if (current_it_mask
)
9514 narrow
= opcode
!= T_MNEM_movs
;
9516 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
9517 if (inst
.size_req
== 4
9518 || inst
.operands
[1].shifted
)
9521 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9522 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
9523 && !inst
.operands
[1].shifted
9524 && inst
.operands
[0].reg
== REG_PC
9525 && inst
.operands
[1].reg
== REG_LR
)
9527 inst
.instruction
= T2_SUBS_PC_LR
;
9531 if (!inst
.operands
[1].isreg
)
9533 /* Immediate operand. */
9534 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
9536 if (low_regs
&& narrow
)
9538 inst
.instruction
= THUMB_OP16 (opcode
);
9539 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9540 if (inst
.size_req
== 2)
9541 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9543 inst
.relax
= opcode
;
9547 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9548 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9549 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9550 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9553 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
9554 && (inst
.instruction
== T_MNEM_mov
9555 || inst
.instruction
== T_MNEM_movs
))
9557 /* Register shifts are encoded as separate shift instructions. */
9558 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
9560 if (current_it_mask
)
9565 if (inst
.size_req
== 4)
9568 if (!low_regs
|| inst
.operands
[1].imm
> 7)
9571 if (inst
.operands
[0].reg
!= inst
.operands
[1].reg
)
9574 switch (inst
.operands
[1].shift_kind
)
9577 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
9580 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
9583 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
9586 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
9592 inst
.instruction
= opcode
;
9595 inst
.instruction
|= inst
.operands
[0].reg
;
9596 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
9601 inst
.instruction
|= CONDS_BIT
;
9603 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9604 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9605 inst
.instruction
|= inst
.operands
[1].imm
;
9610 /* Some mov with immediate shift have narrow variants.
9611 Register shifts are handled above. */
9612 if (low_regs
&& inst
.operands
[1].shifted
9613 && (inst
.instruction
== T_MNEM_mov
9614 || inst
.instruction
== T_MNEM_movs
))
9616 if (current_it_mask
)
9617 narrow
= (inst
.instruction
== T_MNEM_mov
);
9619 narrow
= (inst
.instruction
== T_MNEM_movs
);
9624 switch (inst
.operands
[1].shift_kind
)
9626 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9627 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9628 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9629 default: narrow
= FALSE
; break;
9635 inst
.instruction
|= inst
.operands
[0].reg
;
9636 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9637 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9641 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9642 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9643 encode_thumb32_shifted_operand (1);
9647 switch (inst
.instruction
)
9650 inst
.instruction
= T_OPCODE_MOV_HR
;
9651 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9652 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9653 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9657 /* We know we have low registers at this point.
9658 Generate ADD Rd, Rs, #0. */
9659 inst
.instruction
= T_OPCODE_ADD_I3
;
9660 inst
.instruction
|= inst
.operands
[0].reg
;
9661 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9667 inst
.instruction
= T_OPCODE_CMP_LR
;
9668 inst
.instruction
|= inst
.operands
[0].reg
;
9669 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9673 inst
.instruction
= T_OPCODE_CMP_HR
;
9674 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9675 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9676 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9683 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9684 if (inst
.operands
[1].isreg
)
9686 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
9688 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9689 since a MOV instruction produces unpredictable results. */
9690 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9691 inst
.instruction
= T_OPCODE_ADD_I3
;
9693 inst
.instruction
= T_OPCODE_CMP_LR
;
9695 inst
.instruction
|= inst
.operands
[0].reg
;
9696 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9700 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9701 inst
.instruction
= T_OPCODE_MOV_HR
;
9703 inst
.instruction
= T_OPCODE_CMP_HR
;
9709 constraint (inst
.operands
[0].reg
> 7,
9710 _("only lo regs allowed with immediate"));
9711 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9712 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9722 top
= (inst
.instruction
& 0x00800000) != 0;
9723 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
9725 constraint (top
, _(":lower16: not allowed this instruction"));
9726 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
9728 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
9730 constraint (!top
, _(":upper16: not allowed this instruction"));
9731 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
9734 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9735 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9737 imm
= inst
.reloc
.exp
.X_add_number
;
9738 inst
.instruction
|= (imm
& 0xf000) << 4;
9739 inst
.instruction
|= (imm
& 0x0800) << 15;
9740 inst
.instruction
|= (imm
& 0x0700) << 4;
9741 inst
.instruction
|= (imm
& 0x00ff);
9750 int r0off
= (inst
.instruction
== T_MNEM_mvn
9751 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
9754 if (inst
.size_req
== 4
9755 || inst
.instruction
> 0xffff
9756 || inst
.operands
[1].shifted
9757 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9759 else if (inst
.instruction
== T_MNEM_cmn
)
9761 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9762 narrow
= (current_it_mask
== 0);
9764 narrow
= (current_it_mask
!= 0);
9766 if (!inst
.operands
[1].isreg
)
9768 /* For an immediate, we always generate a 32-bit opcode;
9769 section relaxation will shrink it later if possible. */
9770 if (inst
.instruction
< 0xffff)
9771 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9772 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9773 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9774 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9778 /* See if we can do this with a 16-bit instruction. */
9781 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9782 inst
.instruction
|= inst
.operands
[0].reg
;
9783 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9787 constraint (inst
.operands
[1].shifted
9788 && inst
.operands
[1].immisreg
,
9789 _("shift must be constant"));
9790 if (inst
.instruction
< 0xffff)
9791 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9792 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9793 encode_thumb32_shifted_operand (1);
9799 constraint (inst
.instruction
> 0xffff
9800 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
9801 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
9802 _("unshifted register required"));
9803 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9806 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9807 inst
.instruction
|= inst
.operands
[0].reg
;
9808 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9817 if (do_vfp_nsyn_mrs () == SUCCESS
)
9820 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
9823 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
9824 _("selected processor does not support "
9825 "requested special purpose register"));
9829 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9830 _("selected processor does not support "
9831 "requested special purpose register"));
9832 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9833 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
9834 _("'CPSR' or 'SPSR' expected"));
9837 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9838 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9839 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
9847 if (do_vfp_nsyn_msr () == SUCCESS
)
9850 constraint (!inst
.operands
[1].isreg
,
9851 _("Thumb encoding does not support an immediate here"));
9852 flags
= inst
.operands
[0].imm
;
9855 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9856 _("selected processor does not support "
9857 "requested special purpose register"));
9861 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
9862 _("selected processor does not support "
9863 "requested special purpose register"));
9866 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9867 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
9868 inst
.instruction
|= (flags
& 0xff);
9869 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9875 if (!inst
.operands
[2].present
)
9876 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9878 /* There is no 32-bit MULS and no 16-bit MUL. */
9879 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
9881 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9882 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9883 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9884 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
9888 constraint (!unified_syntax
9889 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
9890 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9893 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9894 inst
.instruction
|= inst
.operands
[0].reg
;
9896 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9897 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9898 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
9899 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9901 constraint (1, _("dest must overlap one source register"));
9908 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9909 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9910 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9911 inst
.instruction
|= inst
.operands
[3].reg
;
9913 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9914 as_tsktsk (_("rdhi and rdlo must be different"));
9922 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
9924 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9925 inst
.instruction
|= inst
.operands
[0].imm
;
9929 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9930 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
9935 constraint (inst
.operands
[0].present
,
9936 _("Thumb does not support NOP with hints"));
9937 inst
.instruction
= 0x46c0;
9948 if (THUMB_SETS_FLAGS (inst
.instruction
))
9949 narrow
= (current_it_mask
== 0);
9951 narrow
= (current_it_mask
!= 0);
9952 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9954 if (inst
.size_req
== 4)
9959 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9960 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9961 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9965 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9966 inst
.instruction
|= inst
.operands
[0].reg
;
9967 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9972 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9974 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9976 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9977 inst
.instruction
|= inst
.operands
[0].reg
;
9978 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9985 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9986 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9987 inst
.instruction
|= inst
.operands
[2].reg
;
9988 if (inst
.operands
[3].present
)
9990 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9991 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9992 _("expression too complex"));
9993 inst
.instruction
|= (val
& 0x1c) << 10;
9994 inst
.instruction
|= (val
& 0x03) << 6;
10001 if (!inst
.operands
[3].present
)
10002 inst
.instruction
&= ~0x00000020;
10009 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10013 do_t_push_pop (void)
10017 constraint (inst
.operands
[0].writeback
,
10018 _("push/pop do not support {reglist}^"));
10019 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10020 _("expression too complex"));
10022 mask
= inst
.operands
[0].imm
;
10023 if ((mask
& ~0xff) == 0)
10024 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10025 else if ((inst
.instruction
== T_MNEM_push
10026 && (mask
& ~0xff) == 1 << REG_LR
)
10027 || (inst
.instruction
== T_MNEM_pop
10028 && (mask
& ~0xff) == 1 << REG_PC
))
10030 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10031 inst
.instruction
|= THUMB_PP_PC_LR
;
10032 inst
.instruction
|= mask
& 0xff;
10034 else if (unified_syntax
)
10036 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10037 encode_thumb2_ldmstm (13, mask
, TRUE
);
10041 inst
.error
= _("invalid register list to push/pop instruction");
10049 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10050 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10051 inst
.instruction
|= inst
.operands
[1].reg
;
10057 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10058 && inst
.size_req
!= 4)
10060 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10061 inst
.instruction
|= inst
.operands
[0].reg
;
10062 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10064 else if (unified_syntax
)
10066 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10067 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10068 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10069 inst
.instruction
|= inst
.operands
[1].reg
;
10072 inst
.error
= BAD_HIREG
;
10080 Rd
= inst
.operands
[0].reg
;
10081 Rs
= (inst
.operands
[1].present
10082 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10083 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10085 inst
.instruction
|= Rd
<< 8;
10086 inst
.instruction
|= Rs
<< 16;
10087 if (!inst
.operands
[2].isreg
)
10089 bfd_boolean narrow
;
10091 if ((inst
.instruction
& 0x00100000) != 0)
10092 narrow
= (current_it_mask
== 0);
10094 narrow
= (current_it_mask
!= 0);
10096 if (Rd
> 7 || Rs
> 7)
10099 if (inst
.size_req
== 4 || !unified_syntax
)
10102 if (inst
.reloc
.exp
.X_op
!= O_constant
10103 || inst
.reloc
.exp
.X_add_number
!= 0)
10106 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10107 relaxation, but it doesn't seem worth the hassle. */
10110 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10111 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10112 inst
.instruction
|= Rs
<< 3;
10113 inst
.instruction
|= Rd
;
10117 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10118 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10122 encode_thumb32_shifted_operand (2);
10128 constraint (current_it_mask
, BAD_NOT_IT
);
10129 if (inst
.operands
[0].imm
)
10130 inst
.instruction
|= 0x8;
10136 if (!inst
.operands
[1].present
)
10137 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
10139 if (unified_syntax
)
10141 bfd_boolean narrow
;
10144 switch (inst
.instruction
)
10147 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
10149 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
10151 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
10153 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
10157 if (THUMB_SETS_FLAGS (inst
.instruction
))
10158 narrow
= (current_it_mask
== 0);
10160 narrow
= (current_it_mask
!= 0);
10161 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10163 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
10165 if (inst
.operands
[2].isreg
10166 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
10167 || inst
.operands
[2].reg
> 7))
10169 if (inst
.size_req
== 4)
10174 if (inst
.operands
[2].isreg
)
10176 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10177 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10178 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10179 inst
.instruction
|= inst
.operands
[2].reg
;
10183 inst
.operands
[1].shifted
= 1;
10184 inst
.operands
[1].shift_kind
= shift_kind
;
10185 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
10186 ? T_MNEM_movs
: T_MNEM_mov
);
10187 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10188 encode_thumb32_shifted_operand (1);
10189 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10190 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10195 if (inst
.operands
[2].isreg
)
10197 switch (shift_kind
)
10199 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10200 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10201 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10202 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10206 inst
.instruction
|= inst
.operands
[0].reg
;
10207 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10211 switch (shift_kind
)
10213 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10214 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10215 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10218 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10219 inst
.instruction
|= inst
.operands
[0].reg
;
10220 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10226 constraint (inst
.operands
[0].reg
> 7
10227 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
10228 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10230 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
10232 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
10233 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
10234 _("source1 and dest must be same register"));
10236 switch (inst
.instruction
)
10238 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10239 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10240 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10241 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10245 inst
.instruction
|= inst
.operands
[0].reg
;
10246 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10250 switch (inst
.instruction
)
10252 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10253 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10254 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10255 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
10258 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10259 inst
.instruction
|= inst
.operands
[0].reg
;
10260 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10268 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10269 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10270 inst
.instruction
|= inst
.operands
[2].reg
;
10276 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10277 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10278 _("expression too complex"));
10279 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10280 inst
.instruction
|= (value
& 0xf000) >> 12;
10281 inst
.instruction
|= (value
& 0x0ff0);
10282 inst
.instruction
|= (value
& 0x000f) << 16;
10288 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10289 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10290 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10292 if (inst
.operands
[3].present
)
10294 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10295 _("expression too complex"));
10297 if (inst
.reloc
.exp
.X_add_number
!= 0)
10299 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10300 inst
.instruction
|= 0x00200000; /* sh bit */
10301 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10302 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10304 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10311 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10312 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10313 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10319 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10320 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10321 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10322 || inst
.operands
[2].negative
,
10325 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10326 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10327 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10328 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10334 if (!inst
.operands
[2].present
)
10335 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
10337 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10338 || inst
.operands
[0].reg
== inst
.operands
[2].reg
10339 || inst
.operands
[0].reg
== inst
.operands
[3].reg
10340 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
10343 inst
.instruction
|= inst
.operands
[0].reg
;
10344 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10345 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10346 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10352 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10353 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10354 inst
.instruction
|= inst
.operands
[2].reg
;
10355 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
10361 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
10362 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10363 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
10365 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10366 inst
.instruction
|= inst
.operands
[0].reg
;
10367 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10369 else if (unified_syntax
)
10371 if (inst
.instruction
<= 0xffff)
10372 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10373 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10374 inst
.instruction
|= inst
.operands
[1].reg
;
10375 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
10379 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
10380 _("Thumb encoding does not support rotation"));
10381 constraint (1, BAD_HIREG
);
10388 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
10396 half
= (inst
.instruction
& 0x10) != 0;
10397 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
10398 constraint (inst
.operands
[0].immisreg
,
10399 _("instruction requires register index"));
10400 constraint (inst
.operands
[0].imm
== 15,
10401 _("PC is not a valid index register"));
10402 constraint (!half
&& inst
.operands
[0].shifted
,
10403 _("instruction does not allow shifted index"));
10404 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
10410 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10411 inst
.instruction
|= inst
.operands
[1].imm
;
10412 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10414 if (inst
.operands
[3].present
)
10416 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10417 _("expression too complex"));
10418 if (inst
.reloc
.exp
.X_add_number
!= 0)
10420 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10421 inst
.instruction
|= 0x00200000; /* sh bit */
10423 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10424 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10426 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10433 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10434 inst
.instruction
|= inst
.operands
[1].imm
;
10435 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10438 /* Neon instruction encoder helpers. */
10440 /* Encodings for the different types for various Neon opcodes. */
10442 /* An "invalid" code for the following tables. */
10445 struct neon_tab_entry
10448 unsigned float_or_poly
;
10449 unsigned scalar_or_imm
;
10452 /* Map overloaded Neon opcodes to their respective encodings. */
10453 #define NEON_ENC_TAB \
10454 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10455 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10456 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10457 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10458 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10459 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10460 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10461 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10462 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10463 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10464 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10465 /* Register variants of the following two instructions are encoded as
10466 vcge / vcgt with the operands reversed. */ \
10467 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10468 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10469 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10470 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10471 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10472 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10473 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10474 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10475 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10476 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10477 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10478 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10479 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10480 X(vshl, 0x0000400, N_INV, 0x0800510), \
10481 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10482 X(vand, 0x0000110, N_INV, 0x0800030), \
10483 X(vbic, 0x0100110, N_INV, 0x0800030), \
10484 X(veor, 0x1000110, N_INV, N_INV), \
10485 X(vorn, 0x0300110, N_INV, 0x0800010), \
10486 X(vorr, 0x0200110, N_INV, 0x0800010), \
10487 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10488 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10489 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10490 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10491 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10492 X(vst1, 0x0000000, 0x0800000, N_INV), \
10493 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10494 X(vst2, 0x0000100, 0x0800100, N_INV), \
10495 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10496 X(vst3, 0x0000200, 0x0800200, N_INV), \
10497 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10498 X(vst4, 0x0000300, 0x0800300, N_INV), \
10499 X(vmovn, 0x1b20200, N_INV, N_INV), \
10500 X(vtrn, 0x1b20080, N_INV, N_INV), \
10501 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10502 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10503 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10504 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10505 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10506 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10507 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10508 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10509 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10513 #define X(OPC,I,F,S) N_MNEM_##OPC
10518 static const struct neon_tab_entry neon_enc_tab
[] =
10520 #define X(OPC,I,F,S) { (I), (F), (S) }
10525 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10526 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10527 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10528 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10529 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10530 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10531 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10532 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10533 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10534 #define NEON_ENC_SINGLE(X) \
10535 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10536 #define NEON_ENC_DOUBLE(X) \
10537 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10539 /* Define shapes for instruction operands. The following mnemonic characters
10540 are used in this table:
10542 F - VFP S<n> register
10543 D - Neon D<n> register
10544 Q - Neon Q<n> register
10548 L - D<n> register list
10550 This table is used to generate various data:
10551 - enumerations of the form NS_DDR to be used as arguments to
10553 - a table classifying shapes into single, double, quad, mixed.
10554 - a table used to drive neon_select_shape. */
10556 #define NEON_SHAPE_DEF \
10557 X(3, (D, D, D), DOUBLE), \
10558 X(3, (Q, Q, Q), QUAD), \
10559 X(3, (D, D, I), DOUBLE), \
10560 X(3, (Q, Q, I), QUAD), \
10561 X(3, (D, D, S), DOUBLE), \
10562 X(3, (Q, Q, S), QUAD), \
10563 X(2, (D, D), DOUBLE), \
10564 X(2, (Q, Q), QUAD), \
10565 X(2, (D, S), DOUBLE), \
10566 X(2, (Q, S), QUAD), \
10567 X(2, (D, R), DOUBLE), \
10568 X(2, (Q, R), QUAD), \
10569 X(2, (D, I), DOUBLE), \
10570 X(2, (Q, I), QUAD), \
10571 X(3, (D, L, D), DOUBLE), \
10572 X(2, (D, Q), MIXED), \
10573 X(2, (Q, D), MIXED), \
10574 X(3, (D, Q, I), MIXED), \
10575 X(3, (Q, D, I), MIXED), \
10576 X(3, (Q, D, D), MIXED), \
10577 X(3, (D, Q, Q), MIXED), \
10578 X(3, (Q, Q, D), MIXED), \
10579 X(3, (Q, D, S), MIXED), \
10580 X(3, (D, Q, S), MIXED), \
10581 X(4, (D, D, D, I), DOUBLE), \
10582 X(4, (Q, Q, Q, I), QUAD), \
10583 X(2, (F, F), SINGLE), \
10584 X(3, (F, F, F), SINGLE), \
10585 X(2, (F, I), SINGLE), \
10586 X(2, (F, D), MIXED), \
10587 X(2, (D, F), MIXED), \
10588 X(3, (F, F, I), MIXED), \
10589 X(4, (R, R, F, F), SINGLE), \
10590 X(4, (F, F, R, R), SINGLE), \
10591 X(3, (D, R, R), DOUBLE), \
10592 X(3, (R, R, D), DOUBLE), \
10593 X(2, (S, R), SINGLE), \
10594 X(2, (R, S), SINGLE), \
10595 X(2, (F, R), SINGLE), \
10596 X(2, (R, F), SINGLE)
10598 #define S2(A,B) NS_##A##B
10599 #define S3(A,B,C) NS_##A##B##C
10600 #define S4(A,B,C,D) NS_##A##B##C##D
10602 #define X(N, L, C) S##N L
10615 enum neon_shape_class
10623 #define X(N, L, C) SC_##C
10625 static enum neon_shape_class neon_shape_class
[] =
10643 /* Register widths of above. */
10644 static unsigned neon_shape_el_size
[] =
10655 struct neon_shape_info
10658 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
10661 #define S2(A,B) { SE_##A, SE_##B }
10662 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10663 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10665 #define X(N, L, C) { N, S##N L }
10667 static struct neon_shape_info neon_shape_tab
[] =
10677 /* Bit masks used in type checking given instructions.
10678 'N_EQK' means the type must be the same as (or based on in some way) the key
10679 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10680 set, various other bits can be set as well in order to modify the meaning of
10681 the type constraint. */
10683 enum neon_type_mask
10705 N_KEY
= 0x100000, /* key element (main type specifier). */
10706 N_EQK
= 0x200000, /* given operand has the same type & size as the key. */
10707 N_VFP
= 0x400000, /* VFP mode: operand size must match register width. */
10708 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
10709 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
10710 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
10711 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10712 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
10713 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
10714 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10716 N_MAX_NONSPECIAL
= N_F64
10719 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10721 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10722 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10723 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10724 #define N_SUF_32 (N_SU_32 | N_F32)
10725 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10726 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10728 /* Pass this as the first type argument to neon_check_type to ignore types
10730 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10732 /* Select a "shape" for the current instruction (describing register types or
10733 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10734 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10735 function of operand parsing, so this function doesn't need to be called.
10736 Shapes should be listed in order of decreasing length. */
10738 static enum neon_shape
10739 neon_select_shape (enum neon_shape shape
, ...)
10742 enum neon_shape first_shape
= shape
;
10744 /* Fix missing optional operands. FIXME: we don't know at this point how
10745 many arguments we should have, so this makes the assumption that we have
10746 > 1. This is true of all current Neon opcodes, I think, but may not be
10747 true in the future. */
10748 if (!inst
.operands
[1].present
)
10749 inst
.operands
[1] = inst
.operands
[0];
10751 va_start (ap
, shape
);
10753 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
10758 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
10760 if (!inst
.operands
[j
].present
)
10766 switch (neon_shape_tab
[shape
].el
[j
])
10769 if (!(inst
.operands
[j
].isreg
10770 && inst
.operands
[j
].isvec
10771 && inst
.operands
[j
].issingle
10772 && !inst
.operands
[j
].isquad
))
10777 if (!(inst
.operands
[j
].isreg
10778 && inst
.operands
[j
].isvec
10779 && !inst
.operands
[j
].isquad
10780 && !inst
.operands
[j
].issingle
))
10785 if (!(inst
.operands
[j
].isreg
10786 && !inst
.operands
[j
].isvec
))
10791 if (!(inst
.operands
[j
].isreg
10792 && inst
.operands
[j
].isvec
10793 && inst
.operands
[j
].isquad
10794 && !inst
.operands
[j
].issingle
))
10799 if (!(!inst
.operands
[j
].isreg
10800 && !inst
.operands
[j
].isscalar
))
10805 if (!(!inst
.operands
[j
].isreg
10806 && inst
.operands
[j
].isscalar
))
10820 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
10821 first_error (_("invalid instruction shape"));
10826 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10827 means the Q bit should be set). */
10830 neon_quad (enum neon_shape shape
)
10832 return neon_shape_class
[shape
] == SC_QUAD
;
10836 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
10839 /* Allow modification to be made to types which are constrained to be
10840 based on the key element, based on bits set alongside N_EQK. */
10841 if ((typebits
& N_EQK
) != 0)
10843 if ((typebits
& N_HLF
) != 0)
10845 else if ((typebits
& N_DBL
) != 0)
10847 if ((typebits
& N_SGN
) != 0)
10848 *g_type
= NT_signed
;
10849 else if ((typebits
& N_UNS
) != 0)
10850 *g_type
= NT_unsigned
;
10851 else if ((typebits
& N_INT
) != 0)
10852 *g_type
= NT_integer
;
10853 else if ((typebits
& N_FLT
) != 0)
10854 *g_type
= NT_float
;
10855 else if ((typebits
& N_SIZ
) != 0)
10856 *g_type
= NT_untyped
;
10860 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10861 operand type, i.e. the single type specified in a Neon instruction when it
10862 is the only one given. */
10864 static struct neon_type_el
10865 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
10867 struct neon_type_el dest
= *key
;
10869 assert ((thisarg
& N_EQK
) != 0);
10871 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
10876 /* Convert Neon type and size into compact bitmask representation. */
10878 static enum neon_type_mask
10879 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
10886 case 8: return N_8
;
10887 case 16: return N_16
;
10888 case 32: return N_32
;
10889 case 64: return N_64
;
10897 case 8: return N_I8
;
10898 case 16: return N_I16
;
10899 case 32: return N_I32
;
10900 case 64: return N_I64
;
10908 case 32: return N_F32
;
10909 case 64: return N_F64
;
10917 case 8: return N_P8
;
10918 case 16: return N_P16
;
10926 case 8: return N_S8
;
10927 case 16: return N_S16
;
10928 case 32: return N_S32
;
10929 case 64: return N_S64
;
10937 case 8: return N_U8
;
10938 case 16: return N_U16
;
10939 case 32: return N_U32
;
10940 case 64: return N_U64
;
10951 /* Convert compact Neon bitmask type representation to a type and size. Only
10952 handles the case where a single bit is set in the mask. */
10955 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10956 enum neon_type_mask mask
)
10958 if ((mask
& N_EQK
) != 0)
10961 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10963 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10965 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10967 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10972 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10974 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10975 *type
= NT_unsigned
;
10976 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10977 *type
= NT_integer
;
10978 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10979 *type
= NT_untyped
;
10980 else if ((mask
& (N_P8
| N_P16
)) != 0)
10982 else if ((mask
& (N_F32
| N_F64
)) != 0)
10990 /* Modify a bitmask of allowed types. This is only needed for type
10994 modify_types_allowed (unsigned allowed
, unsigned mods
)
10997 enum neon_el_type type
;
11003 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
11005 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
11007 neon_modify_type_size (mods
, &type
, &size
);
11008 destmask
|= type_chk_of_el_type (type
, size
);
11015 /* Check type and return type classification.
11016 The manual states (paraphrase): If one datatype is given, it indicates the
11018 - the second operand, if there is one
11019 - the operand, if there is no second operand
11020 - the result, if there are no operands.
11021 This isn't quite good enough though, so we use a concept of a "key" datatype
11022 which is set on a per-instruction basis, which is the one which matters when
11023 only one data type is written.
11024 Note: this function has side-effects (e.g. filling in missing operands). All
11025 Neon instructions should call it before performing bit encoding. */
11027 static struct neon_type_el
11028 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
11031 unsigned i
, pass
, key_el
= 0;
11032 unsigned types
[NEON_MAX_TYPE_ELS
];
11033 enum neon_el_type k_type
= NT_invtype
;
11034 unsigned k_size
= -1u;
11035 struct neon_type_el badtype
= {NT_invtype
, -1};
11036 unsigned key_allowed
= 0;
11038 /* Optional registers in Neon instructions are always (not) in operand 1.
11039 Fill in the missing operand here, if it was omitted. */
11040 if (els
> 1 && !inst
.operands
[1].present
)
11041 inst
.operands
[1] = inst
.operands
[0];
11043 /* Suck up all the varargs. */
11045 for (i
= 0; i
< els
; i
++)
11047 unsigned thisarg
= va_arg (ap
, unsigned);
11048 if (thisarg
== N_IGNORE_TYPE
)
11053 types
[i
] = thisarg
;
11054 if ((thisarg
& N_KEY
) != 0)
11059 if (inst
.vectype
.elems
> 0)
11060 for (i
= 0; i
< els
; i
++)
11061 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
11063 first_error (_("types specified in both the mnemonic and operands"));
11067 /* Duplicate inst.vectype elements here as necessary.
11068 FIXME: No idea if this is exactly the same as the ARM assembler,
11069 particularly when an insn takes one register and one non-register
11071 if (inst
.vectype
.elems
== 1 && els
> 1)
11074 inst
.vectype
.elems
= els
;
11075 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
11076 for (j
= 0; j
< els
; j
++)
11078 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11081 else if (inst
.vectype
.elems
== 0 && els
> 0)
11084 /* No types were given after the mnemonic, so look for types specified
11085 after each operand. We allow some flexibility here; as long as the
11086 "key" operand has a type, we can infer the others. */
11087 for (j
= 0; j
< els
; j
++)
11088 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
11089 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
11091 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
11093 for (j
= 0; j
< els
; j
++)
11094 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
11095 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11100 first_error (_("operand types can't be inferred"));
11104 else if (inst
.vectype
.elems
!= els
)
11106 first_error (_("type specifier has the wrong number of parts"));
11110 for (pass
= 0; pass
< 2; pass
++)
11112 for (i
= 0; i
< els
; i
++)
11114 unsigned thisarg
= types
[i
];
11115 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
11116 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
11117 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
11118 unsigned g_size
= inst
.vectype
.el
[i
].size
;
11120 /* Decay more-specific signed & unsigned types to sign-insensitive
11121 integer types if sign-specific variants are unavailable. */
11122 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
11123 && (types_allowed
& N_SU_ALL
) == 0)
11124 g_type
= NT_integer
;
11126 /* If only untyped args are allowed, decay any more specific types to
11127 them. Some instructions only care about signs for some element
11128 sizes, so handle that properly. */
11129 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
11130 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
11131 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
11132 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
11133 g_type
= NT_untyped
;
11137 if ((thisarg
& N_KEY
) != 0)
11141 key_allowed
= thisarg
& ~N_KEY
;
11146 if ((thisarg
& N_VFP
) != 0)
11148 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
11149 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
11151 /* In VFP mode, operands must match register widths. If we
11152 have a key operand, use its width, else use the width of
11153 the current operand. */
11159 if (regwidth
!= match
)
11161 first_error (_("operand size must match register width"));
11166 if ((thisarg
& N_EQK
) == 0)
11168 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
11170 if ((given_type
& types_allowed
) == 0)
11172 first_error (_("bad type in Neon instruction"));
11178 enum neon_el_type mod_k_type
= k_type
;
11179 unsigned mod_k_size
= k_size
;
11180 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
11181 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
11183 first_error (_("inconsistent types in Neon instruction"));
11191 return inst
.vectype
.el
[key_el
];
11194 /* Neon-style VFP instruction forwarding. */
11196 /* Thumb VFP instructions have 0xE in the condition field. */
11199 do_vfp_cond_or_thumb (void)
11202 inst
.instruction
|= 0xe0000000;
11204 inst
.instruction
|= inst
.cond
<< 28;
11207 /* Look up and encode a simple mnemonic, for use as a helper function for the
11208 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11209 etc. It is assumed that operand parsing has already been done, and that the
11210 operands are in the form expected by the given opcode (this isn't necessarily
11211 the same as the form in which they were parsed, hence some massaging must
11212 take place before this function is called).
11213 Checks current arch version against that in the looked-up opcode. */
11216 do_vfp_nsyn_opcode (const char *opname
)
11218 const struct asm_opcode
*opcode
;
11220 opcode
= hash_find (arm_ops_hsh
, opname
);
11225 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
11226 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
11231 inst
.instruction
= opcode
->tvalue
;
11232 opcode
->tencode ();
11236 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
11237 opcode
->aencode ();
11242 do_vfp_nsyn_add_sub (enum neon_shape rs
)
11244 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
11249 do_vfp_nsyn_opcode ("fadds");
11251 do_vfp_nsyn_opcode ("fsubs");
11256 do_vfp_nsyn_opcode ("faddd");
11258 do_vfp_nsyn_opcode ("fsubd");
11262 /* Check operand types to see if this is a VFP instruction, and if so call
11266 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
11268 enum neon_shape rs
;
11269 struct neon_type_el et
;
11274 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11275 et
= neon_check_type (2, rs
,
11276 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11280 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11281 et
= neon_check_type (3, rs
,
11282 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11289 if (et
.type
!= NT_invtype
)
11301 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
11303 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
11308 do_vfp_nsyn_opcode ("fmacs");
11310 do_vfp_nsyn_opcode ("fmscs");
11315 do_vfp_nsyn_opcode ("fmacd");
11317 do_vfp_nsyn_opcode ("fmscd");
11322 do_vfp_nsyn_mul (enum neon_shape rs
)
11325 do_vfp_nsyn_opcode ("fmuls");
11327 do_vfp_nsyn_opcode ("fmuld");
11331 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
11333 int is_neg
= (inst
.instruction
& 0x80) != 0;
11334 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
11339 do_vfp_nsyn_opcode ("fnegs");
11341 do_vfp_nsyn_opcode ("fabss");
11346 do_vfp_nsyn_opcode ("fnegd");
11348 do_vfp_nsyn_opcode ("fabsd");
11352 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11353 insns belong to Neon, and are handled elsewhere. */
11356 do_vfp_nsyn_ldm_stm (int is_dbmode
)
11358 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
11362 do_vfp_nsyn_opcode ("fldmdbs");
11364 do_vfp_nsyn_opcode ("fldmias");
11369 do_vfp_nsyn_opcode ("fstmdbs");
11371 do_vfp_nsyn_opcode ("fstmias");
11376 do_vfp_nsyn_sqrt (void)
11378 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11379 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11382 do_vfp_nsyn_opcode ("fsqrts");
11384 do_vfp_nsyn_opcode ("fsqrtd");
11388 do_vfp_nsyn_div (void)
11390 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11391 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11392 N_F32
| N_F64
| N_KEY
| N_VFP
);
11395 do_vfp_nsyn_opcode ("fdivs");
11397 do_vfp_nsyn_opcode ("fdivd");
11401 do_vfp_nsyn_nmul (void)
11403 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11404 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11405 N_F32
| N_F64
| N_KEY
| N_VFP
);
11409 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11410 do_vfp_sp_dyadic ();
11414 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11415 do_vfp_dp_rd_rn_rm ();
11417 do_vfp_cond_or_thumb ();
11421 do_vfp_nsyn_cmp (void)
11423 if (inst
.operands
[1].isreg
)
11425 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11426 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11430 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11431 do_vfp_sp_monadic ();
11435 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11436 do_vfp_dp_rd_rm ();
11441 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
11442 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
11444 switch (inst
.instruction
& 0x0fffffff)
11447 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
11450 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
11458 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11459 do_vfp_sp_compare_z ();
11463 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11467 do_vfp_cond_or_thumb ();
11471 nsyn_insert_sp (void)
11473 inst
.operands
[1] = inst
.operands
[0];
11474 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
11475 inst
.operands
[0].reg
= 13;
11476 inst
.operands
[0].isreg
= 1;
11477 inst
.operands
[0].writeback
= 1;
11478 inst
.operands
[0].present
= 1;
11482 do_vfp_nsyn_push (void)
11485 if (inst
.operands
[1].issingle
)
11486 do_vfp_nsyn_opcode ("fstmdbs");
11488 do_vfp_nsyn_opcode ("fstmdbd");
11492 do_vfp_nsyn_pop (void)
11495 if (inst
.operands
[1].issingle
)
11496 do_vfp_nsyn_opcode ("fldmias");
11498 do_vfp_nsyn_opcode ("fldmiad");
11501 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11502 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11505 neon_dp_fixup (unsigned i
)
11509 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11523 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11527 neon_logbits (unsigned x
)
11529 return ffs (x
) - 4;
11532 #define LOW4(R) ((R) & 0xf)
11533 #define HI1(R) (((R) >> 4) & 1)
11535 /* Encode insns with bit pattern:
11537 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11538 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11540 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11541 different meaning for some instruction. */
11544 neon_three_same (int isquad
, int ubit
, int size
)
11546 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11547 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11548 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11549 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11550 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11551 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11552 inst
.instruction
|= (isquad
!= 0) << 6;
11553 inst
.instruction
|= (ubit
!= 0) << 24;
11555 inst
.instruction
|= neon_logbits (size
) << 20;
11557 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11560 /* Encode instructions of the form:
11562 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11563 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11565 Don't write size if SIZE == -1. */
11568 neon_two_same (int qbit
, int ubit
, int size
)
11570 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11571 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11572 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11573 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11574 inst
.instruction
|= (qbit
!= 0) << 6;
11575 inst
.instruction
|= (ubit
!= 0) << 24;
11578 inst
.instruction
|= neon_logbits (size
) << 18;
11580 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11583 /* Neon instruction encoders, in approximate order of appearance. */
11586 do_neon_dyadic_i_su (void)
11588 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11589 struct neon_type_el et
= neon_check_type (3, rs
,
11590 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
11591 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11595 do_neon_dyadic_i64_su (void)
11597 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11598 struct neon_type_el et
= neon_check_type (3, rs
,
11599 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11600 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11604 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
11607 unsigned size
= et
.size
>> 3;
11608 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11609 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11610 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11611 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11612 inst
.instruction
|= (isquad
!= 0) << 6;
11613 inst
.instruction
|= immbits
<< 16;
11614 inst
.instruction
|= (size
>> 3) << 7;
11615 inst
.instruction
|= (size
& 0x7) << 19;
11617 inst
.instruction
|= (uval
!= 0) << 24;
11619 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11623 do_neon_shl_imm (void)
11625 if (!inst
.operands
[2].isreg
)
11627 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11628 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
11629 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11630 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
11634 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11635 struct neon_type_el et
= neon_check_type (3, rs
,
11636 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11639 /* VSHL/VQSHL 3-register variants have syntax such as:
11641 whereas other 3-register operations encoded by neon_three_same have
11644 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11646 tmp
= inst
.operands
[2].reg
;
11647 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11648 inst
.operands
[1].reg
= tmp
;
11649 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11650 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11655 do_neon_qshl_imm (void)
11657 if (!inst
.operands
[2].isreg
)
11659 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11660 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11662 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11663 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
11664 inst
.operands
[2].imm
);
11668 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11669 struct neon_type_el et
= neon_check_type (3, rs
,
11670 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11673 /* See note in do_neon_shl_imm. */
11674 tmp
= inst
.operands
[2].reg
;
11675 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11676 inst
.operands
[1].reg
= tmp
;
11677 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11678 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11683 do_neon_rshl (void)
11685 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11686 struct neon_type_el et
= neon_check_type (3, rs
,
11687 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11690 tmp
= inst
.operands
[2].reg
;
11691 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11692 inst
.operands
[1].reg
= tmp
;
11693 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11697 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
11699 /* Handle .I8 pseudo-instructions. */
11702 /* Unfortunately, this will make everything apart from zero out-of-range.
11703 FIXME is this the intended semantics? There doesn't seem much point in
11704 accepting .I8 if so. */
11705 immediate
|= immediate
<< 8;
11711 if (immediate
== (immediate
& 0x000000ff))
11713 *immbits
= immediate
;
11716 else if (immediate
== (immediate
& 0x0000ff00))
11718 *immbits
= immediate
>> 8;
11721 else if (immediate
== (immediate
& 0x00ff0000))
11723 *immbits
= immediate
>> 16;
11726 else if (immediate
== (immediate
& 0xff000000))
11728 *immbits
= immediate
>> 24;
11731 if ((immediate
& 0xffff) != (immediate
>> 16))
11732 goto bad_immediate
;
11733 immediate
&= 0xffff;
11736 if (immediate
== (immediate
& 0x000000ff))
11738 *immbits
= immediate
;
11741 else if (immediate
== (immediate
& 0x0000ff00))
11743 *immbits
= immediate
>> 8;
11748 first_error (_("immediate value out of range"));
11752 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11756 neon_bits_same_in_bytes (unsigned imm
)
11758 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
11759 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
11760 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
11761 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
11764 /* For immediate of above form, return 0bABCD. */
11767 neon_squash_bits (unsigned imm
)
11769 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
11770 | ((imm
& 0x01000000) >> 21);
11773 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11776 neon_qfloat_bits (unsigned imm
)
11778 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
11781 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11782 the instruction. *OP is passed as the initial value of the op field, and
11783 may be set to a different value depending on the constant (i.e.
11784 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11785 MVN). If the immediate looks like a repeated pattern then also
11786 try smaller element sizes. */
11789 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
11790 unsigned *immbits
, int *op
, int size
,
11791 enum neon_el_type type
)
11793 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11795 if (type
== NT_float
&& !float_p
)
11798 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
11800 if (size
!= 32 || *op
== 1)
11802 *immbits
= neon_qfloat_bits (immlo
);
11808 if (neon_bits_same_in_bytes (immhi
)
11809 && neon_bits_same_in_bytes (immlo
))
11813 *immbits
= (neon_squash_bits (immhi
) << 4)
11814 | neon_squash_bits (immlo
);
11819 if (immhi
!= immlo
)
11825 if (immlo
== (immlo
& 0x000000ff))
11830 else if (immlo
== (immlo
& 0x0000ff00))
11832 *immbits
= immlo
>> 8;
11835 else if (immlo
== (immlo
& 0x00ff0000))
11837 *immbits
= immlo
>> 16;
11840 else if (immlo
== (immlo
& 0xff000000))
11842 *immbits
= immlo
>> 24;
11845 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
11847 *immbits
= (immlo
>> 8) & 0xff;
11850 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
11852 *immbits
= (immlo
>> 16) & 0xff;
11856 if ((immlo
& 0xffff) != (immlo
>> 16))
11863 if (immlo
== (immlo
& 0x000000ff))
11868 else if (immlo
== (immlo
& 0x0000ff00))
11870 *immbits
= immlo
>> 8;
11874 if ((immlo
& 0xff) != (immlo
>> 8))
11879 if (immlo
== (immlo
& 0x000000ff))
11881 /* Don't allow MVN with 8-bit immediate. */
11891 /* Write immediate bits [7:0] to the following locations:
11893 |28/24|23 19|18 16|15 4|3 0|
11894 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11896 This function is used by VMOV/VMVN/VORR/VBIC. */
11899 neon_write_immbits (unsigned immbits
)
11901 inst
.instruction
|= immbits
& 0xf;
11902 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
11903 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
11906 /* Invert low-order SIZE bits of XHI:XLO. */
11909 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
11911 unsigned immlo
= xlo
? *xlo
: 0;
11912 unsigned immhi
= xhi
? *xhi
: 0;
11917 immlo
= (~immlo
) & 0xff;
11921 immlo
= (~immlo
) & 0xffff;
11925 immhi
= (~immhi
) & 0xffffffff;
11926 /* fall through. */
11929 immlo
= (~immlo
) & 0xffffffff;
11944 do_neon_logic (void)
11946 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
11948 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11949 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11950 /* U bit and size field were set as part of the bitmask. */
11951 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11952 neon_three_same (neon_quad (rs
), 0, -1);
11956 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11957 struct neon_type_el et
= neon_check_type (2, rs
,
11958 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11959 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11963 if (et
.type
== NT_invtype
)
11966 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11968 immbits
= inst
.operands
[1].imm
;
11971 /* .i64 is a pseudo-op, so the immediate must be a repeating
11973 if (immbits
!= (inst
.operands
[1].regisimm
?
11974 inst
.operands
[1].reg
: 0))
11976 /* Set immbits to an invalid constant. */
11977 immbits
= 0xdeadbeef;
11984 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11988 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11992 /* Pseudo-instruction for VBIC. */
11993 neon_invert_size (&immbits
, 0, et
.size
);
11994 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11998 /* Pseudo-instruction for VORR. */
11999 neon_invert_size (&immbits
, 0, et
.size
);
12000 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12010 inst
.instruction
|= neon_quad (rs
) << 6;
12011 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12012 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12013 inst
.instruction
|= cmode
<< 8;
12014 neon_write_immbits (immbits
);
12016 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12021 do_neon_bitfield (void)
12023 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12024 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12025 neon_three_same (neon_quad (rs
), 0, -1);
12029 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
12032 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12033 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
12035 if (et
.type
== NT_float
)
12037 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
12038 neon_three_same (neon_quad (rs
), 0, -1);
12042 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12043 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
12048 do_neon_dyadic_if_su (void)
12050 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12054 do_neon_dyadic_if_su_d (void)
12056 /* This version only allow D registers, but that constraint is enforced during
12057 operand parsing so we don't need to do anything extra here. */
12058 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12062 do_neon_dyadic_if_i_d (void)
12064 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12065 affected if we specify unsigned args. */
12066 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12069 enum vfp_or_neon_is_neon_bits
12072 NEON_CHECK_ARCH
= 2
12075 /* Call this function if an instruction which may have belonged to the VFP or
12076 Neon instruction sets, but turned out to be a Neon instruction (due to the
12077 operand types involved, etc.). We have to check and/or fix-up a couple of
12080 - Make sure the user hasn't attempted to make a Neon instruction
12082 - Alter the value in the condition code field if necessary.
12083 - Make sure that the arch supports Neon instructions.
12085 Which of these operations take place depends on bits from enum
12086 vfp_or_neon_is_neon_bits.
12088 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12089 current instruction's condition is COND_ALWAYS, the condition field is
12090 changed to inst.uncond_value. This is necessary because instructions shared
12091 between VFP and Neon may be conditional for the VFP variants only, and the
12092 unconditional Neon version must have, e.g., 0xF in the condition field. */
12095 vfp_or_neon_is_neon (unsigned check
)
12097 /* Conditions are always legal in Thumb mode (IT blocks). */
12098 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
12100 if (inst
.cond
!= COND_ALWAYS
)
12102 first_error (_(BAD_COND
));
12105 if (inst
.uncond_value
!= -1)
12106 inst
.instruction
|= inst
.uncond_value
<< 28;
12109 if ((check
& NEON_CHECK_ARCH
)
12110 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
12112 first_error (_(BAD_FPU
));
12120 do_neon_addsub_if_i (void)
12122 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
12125 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12128 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12129 affected if we specify unsigned args. */
12130 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
12133 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12135 V<op> A,B (A is operand 0, B is operand 2)
12140 so handle that case specially. */
12143 neon_exchange_operands (void)
12145 void *scratch
= alloca (sizeof (inst
.operands
[0]));
12146 if (inst
.operands
[1].present
)
12148 /* Swap operands[1] and operands[2]. */
12149 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
12150 inst
.operands
[1] = inst
.operands
[2];
12151 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
12155 inst
.operands
[1] = inst
.operands
[2];
12156 inst
.operands
[2] = inst
.operands
[0];
12161 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
12163 if (inst
.operands
[2].isreg
)
12166 neon_exchange_operands ();
12167 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
12171 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12172 struct neon_type_el et
= neon_check_type (2, rs
,
12173 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
12175 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12176 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12177 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12178 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12179 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12180 inst
.instruction
|= neon_quad (rs
) << 6;
12181 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12182 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12184 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12191 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
12195 do_neon_cmp_inv (void)
12197 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
12203 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
12206 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12207 scalars, which are encoded in 5 bits, M : Rm.
12208 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12209 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12213 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
12215 unsigned regno
= NEON_SCALAR_REG (scalar
);
12216 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
12221 if (regno
> 7 || elno
> 3)
12223 return regno
| (elno
<< 3);
12226 if (regno
> 15 || elno
> 1)
12228 return regno
| (elno
<< 4);
12232 first_error (_("scalar out of range for multiply instruction"));
12238 /* Encode multiply / multiply-accumulate scalar instructions. */
12241 neon_mul_mac (struct neon_type_el et
, int ubit
)
12245 /* Give a more helpful error message if we have an invalid type. */
12246 if (et
.type
== NT_invtype
)
12249 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
12250 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12251 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12252 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12253 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12254 inst
.instruction
|= LOW4 (scalar
);
12255 inst
.instruction
|= HI1 (scalar
) << 5;
12256 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12257 inst
.instruction
|= neon_logbits (et
.size
) << 20;
12258 inst
.instruction
|= (ubit
!= 0) << 24;
12260 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12264 do_neon_mac_maybe_scalar (void)
12266 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
12269 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12272 if (inst
.operands
[2].isscalar
)
12274 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12275 struct neon_type_el et
= neon_check_type (3, rs
,
12276 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
12277 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12278 neon_mul_mac (et
, neon_quad (rs
));
12282 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12283 affected if we specify unsigned args. */
12284 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12291 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12292 struct neon_type_el et
= neon_check_type (3, rs
,
12293 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12294 neon_three_same (neon_quad (rs
), 0, et
.size
);
12297 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12298 same types as the MAC equivalents. The polynomial type for this instruction
12299 is encoded the same as the integer type. */
12304 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
12307 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12310 if (inst
.operands
[2].isscalar
)
12311 do_neon_mac_maybe_scalar ();
12313 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
12317 do_neon_qdmulh (void)
12319 if (inst
.operands
[2].isscalar
)
12321 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12322 struct neon_type_el et
= neon_check_type (3, rs
,
12323 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12324 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12325 neon_mul_mac (et
, neon_quad (rs
));
12329 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12330 struct neon_type_el et
= neon_check_type (3, rs
,
12331 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12332 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12333 /* The U bit (rounding) comes from bit mask. */
12334 neon_three_same (neon_quad (rs
), 0, et
.size
);
12339 do_neon_fcmp_absolute (void)
12341 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12342 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12343 /* Size field comes from bit mask. */
12344 neon_three_same (neon_quad (rs
), 1, -1);
12348 do_neon_fcmp_absolute_inv (void)
12350 neon_exchange_operands ();
12351 do_neon_fcmp_absolute ();
12355 do_neon_step (void)
12357 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12358 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12359 neon_three_same (neon_quad (rs
), 0, -1);
12363 do_neon_abs_neg (void)
12365 enum neon_shape rs
;
12366 struct neon_type_el et
;
12368 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
12371 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12374 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12375 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
12377 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12378 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12379 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12380 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12381 inst
.instruction
|= neon_quad (rs
) << 6;
12382 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12383 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12385 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12391 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12392 struct neon_type_el et
= neon_check_type (2, rs
,
12393 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12394 int imm
= inst
.operands
[2].imm
;
12395 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12396 _("immediate out of range for insert"));
12397 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12403 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12404 struct neon_type_el et
= neon_check_type (2, rs
,
12405 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12406 int imm
= inst
.operands
[2].imm
;
12407 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12408 _("immediate out of range for insert"));
12409 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
12413 do_neon_qshlu_imm (void)
12415 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12416 struct neon_type_el et
= neon_check_type (2, rs
,
12417 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
12418 int imm
= inst
.operands
[2].imm
;
12419 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12420 _("immediate out of range for shift"));
12421 /* Only encodes the 'U present' variant of the instruction.
12422 In this case, signed types have OP (bit 8) set to 0.
12423 Unsigned types have OP set to 1. */
12424 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
12425 /* The rest of the bits are the same as other immediate shifts. */
12426 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12430 do_neon_qmovn (void)
12432 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12433 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12434 /* Saturating move where operands can be signed or unsigned, and the
12435 destination has the same signedness. */
12436 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12437 if (et
.type
== NT_unsigned
)
12438 inst
.instruction
|= 0xc0;
12440 inst
.instruction
|= 0x80;
12441 neon_two_same (0, 1, et
.size
/ 2);
12445 do_neon_qmovun (void)
12447 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12448 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12449 /* Saturating move with unsigned results. Operands must be signed. */
12450 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12451 neon_two_same (0, 1, et
.size
/ 2);
12455 do_neon_rshift_sat_narrow (void)
12457 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12458 or unsigned. If operands are unsigned, results must also be unsigned. */
12459 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12460 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12461 int imm
= inst
.operands
[2].imm
;
12462 /* This gets the bounds check, size encoding and immediate bits calculation
12466 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12467 VQMOVN.I<size> <Dd>, <Qm>. */
12470 inst
.operands
[2].present
= 0;
12471 inst
.instruction
= N_MNEM_vqmovn
;
12476 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12477 _("immediate out of range"));
12478 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
12482 do_neon_rshift_sat_narrow_u (void)
12484 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12485 or unsigned. If operands are unsigned, results must also be unsigned. */
12486 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12487 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12488 int imm
= inst
.operands
[2].imm
;
12489 /* This gets the bounds check, size encoding and immediate bits calculation
12493 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12494 VQMOVUN.I<size> <Dd>, <Qm>. */
12497 inst
.operands
[2].present
= 0;
12498 inst
.instruction
= N_MNEM_vqmovun
;
12503 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12504 _("immediate out of range"));
12505 /* FIXME: The manual is kind of unclear about what value U should have in
12506 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12508 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
12512 do_neon_movn (void)
12514 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12515 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12516 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12517 neon_two_same (0, 1, et
.size
/ 2);
12521 do_neon_rshift_narrow (void)
12523 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12524 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12525 int imm
= inst
.operands
[2].imm
;
12526 /* This gets the bounds check, size encoding and immediate bits calculation
12530 /* If immediate is zero then we are a pseudo-instruction for
12531 VMOVN.I<size> <Dd>, <Qm> */
12534 inst
.operands
[2].present
= 0;
12535 inst
.instruction
= N_MNEM_vmovn
;
12540 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12541 _("immediate out of range for narrowing operation"));
12542 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
12546 do_neon_shll (void)
12548 /* FIXME: Type checking when lengthening. */
12549 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
12550 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
12551 unsigned imm
= inst
.operands
[2].imm
;
12553 if (imm
== et
.size
)
12555 /* Maximum shift variant. */
12556 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12557 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12558 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12559 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12560 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12561 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12563 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12567 /* A more-specific type check for non-max versions. */
12568 et
= neon_check_type (2, NS_QDI
,
12569 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12570 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12571 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
12575 /* Check the various types for the VCVT instruction, and return which version
12576 the current instruction is. */
12579 neon_cvt_flavour (enum neon_shape rs
)
12581 #define CVT_VAR(C,X,Y) \
12582 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12583 if (et.type != NT_invtype) \
12585 inst.error = NULL; \
12588 struct neon_type_el et
;
12589 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
12590 || rs
== NS_FF
) ? N_VFP
: 0;
12591 /* The instruction versions which take an immediate take one register
12592 argument, which is extended to the width of the full register. Thus the
12593 "source" and "destination" registers must have the same width. Hack that
12594 here by making the size equal to the key (wider, in this case) operand. */
12595 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
12597 CVT_VAR (0, N_S32
, N_F32
);
12598 CVT_VAR (1, N_U32
, N_F32
);
12599 CVT_VAR (2, N_F32
, N_S32
);
12600 CVT_VAR (3, N_F32
, N_U32
);
12604 /* VFP instructions. */
12605 CVT_VAR (4, N_F32
, N_F64
);
12606 CVT_VAR (5, N_F64
, N_F32
);
12607 CVT_VAR (6, N_S32
, N_F64
| key
);
12608 CVT_VAR (7, N_U32
, N_F64
| key
);
12609 CVT_VAR (8, N_F64
| key
, N_S32
);
12610 CVT_VAR (9, N_F64
| key
, N_U32
);
12611 /* VFP instructions with bitshift. */
12612 CVT_VAR (10, N_F32
| key
, N_S16
);
12613 CVT_VAR (11, N_F32
| key
, N_U16
);
12614 CVT_VAR (12, N_F64
| key
, N_S16
);
12615 CVT_VAR (13, N_F64
| key
, N_U16
);
12616 CVT_VAR (14, N_S16
, N_F32
| key
);
12617 CVT_VAR (15, N_U16
, N_F32
| key
);
12618 CVT_VAR (16, N_S16
, N_F64
| key
);
12619 CVT_VAR (17, N_U16
, N_F64
| key
);
12625 /* Neon-syntax VFP conversions. */
12628 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
12630 const char *opname
= 0;
12632 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
12634 /* Conversions with immediate bitshift. */
12635 const char *enc
[] =
12657 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12659 opname
= enc
[flavour
];
12660 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12661 _("operands 0 and 1 must be the same register"));
12662 inst
.operands
[1] = inst
.operands
[2];
12663 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
12668 /* Conversions without bitshift. */
12669 const char *enc
[] =
12683 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12684 opname
= enc
[flavour
];
12688 do_vfp_nsyn_opcode (opname
);
12692 do_vfp_nsyn_cvtz (void)
12694 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
12695 int flavour
= neon_cvt_flavour (rs
);
12696 const char *enc
[] =
12708 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
12709 do_vfp_nsyn_opcode (enc
[flavour
]);
12715 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
12716 NS_FD
, NS_DF
, NS_FF
, NS_NULL
);
12717 int flavour
= neon_cvt_flavour (rs
);
12719 /* VFP rather than Neon conversions. */
12722 do_vfp_nsyn_cvt (rs
, flavour
);
12732 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12734 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12737 /* Fixed-point conversion with #0 immediate is encoded as an
12738 integer conversion. */
12739 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
12741 immbits
= 32 - inst
.operands
[2].imm
;
12742 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12744 inst
.instruction
|= enctab
[flavour
];
12745 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12746 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12747 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12748 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12749 inst
.instruction
|= neon_quad (rs
) << 6;
12750 inst
.instruction
|= 1 << 21;
12751 inst
.instruction
|= immbits
<< 16;
12753 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12761 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
12763 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12765 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12769 inst
.instruction
|= enctab
[flavour
];
12771 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12772 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12773 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12774 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12775 inst
.instruction
|= neon_quad (rs
) << 6;
12776 inst
.instruction
|= 2 << 18;
12778 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12783 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12784 do_vfp_nsyn_cvt (rs
, flavour
);
12789 neon_move_immediate (void)
12791 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12792 struct neon_type_el et
= neon_check_type (2, rs
,
12793 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12794 unsigned immlo
, immhi
= 0, immbits
;
12795 int op
, cmode
, float_p
;
12797 constraint (et
.type
== NT_invtype
,
12798 _("operand size must be specified for immediate VMOV"));
12800 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12801 op
= (inst
.instruction
& (1 << 5)) != 0;
12803 immlo
= inst
.operands
[1].imm
;
12804 if (inst
.operands
[1].regisimm
)
12805 immhi
= inst
.operands
[1].reg
;
12807 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
12808 _("immediate has bits set outside the operand size"));
12810 float_p
= inst
.operands
[1].immisfloat
;
12812 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
12813 et
.size
, et
.type
)) == FAIL
)
12815 /* Invert relevant bits only. */
12816 neon_invert_size (&immlo
, &immhi
, et
.size
);
12817 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12818 with one or the other; those cases are caught by
12819 neon_cmode_for_move_imm. */
12821 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
12822 &op
, et
.size
, et
.type
)) == FAIL
)
12824 first_error (_("immediate out of range"));
12829 inst
.instruction
&= ~(1 << 5);
12830 inst
.instruction
|= op
<< 5;
12832 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12833 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12834 inst
.instruction
|= neon_quad (rs
) << 6;
12835 inst
.instruction
|= cmode
<< 8;
12837 neon_write_immbits (immbits
);
12843 if (inst
.operands
[1].isreg
)
12845 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12847 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12848 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12849 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12850 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12851 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12852 inst
.instruction
|= neon_quad (rs
) << 6;
12856 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12857 neon_move_immediate ();
12860 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12863 /* Encode instructions of form:
12865 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12866 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
12869 neon_mixed_length (struct neon_type_el et
, unsigned size
)
12871 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12872 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12873 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12874 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12875 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12876 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12877 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
12878 inst
.instruction
|= neon_logbits (size
) << 20;
12880 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12884 do_neon_dyadic_long (void)
12886 /* FIXME: Type checking for lengthening op. */
12887 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12888 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12889 neon_mixed_length (et
, et
.size
);
12893 do_neon_abal (void)
12895 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12896 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12897 neon_mixed_length (et
, et
.size
);
12901 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12903 if (inst
.operands
[2].isscalar
)
12905 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12906 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12907 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12908 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12912 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12913 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12914 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12915 neon_mixed_length (et
, et
.size
);
12920 do_neon_mac_maybe_scalar_long (void)
12922 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12926 do_neon_dyadic_wide (void)
12928 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12929 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12930 neon_mixed_length (et
, et
.size
);
12934 do_neon_dyadic_narrow (void)
12936 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12937 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12938 /* Operand sign is unimportant, and the U bit is part of the opcode,
12939 so force the operand type to integer. */
12940 et
.type
= NT_integer
;
12941 neon_mixed_length (et
, et
.size
/ 2);
12945 do_neon_mul_sat_scalar_long (void)
12947 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12951 do_neon_vmull (void)
12953 if (inst
.operands
[2].isscalar
)
12954 do_neon_mac_maybe_scalar_long ();
12957 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12958 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12959 if (et
.type
== NT_poly
)
12960 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12962 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12963 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12964 zero. Should be OK as-is. */
12965 neon_mixed_length (et
, et
.size
);
12972 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12973 struct neon_type_el et
= neon_check_type (3, rs
,
12974 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12975 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12977 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
12978 _("shift out of range"));
12979 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12980 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12981 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12982 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12983 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12984 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12985 inst
.instruction
|= neon_quad (rs
) << 6;
12986 inst
.instruction
|= imm
<< 8;
12988 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12994 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12995 struct neon_type_el et
= neon_check_type (2, rs
,
12996 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12997 unsigned op
= (inst
.instruction
>> 7) & 3;
12998 /* N (width of reversed regions) is encoded as part of the bitmask. We
12999 extract it here to check the elements to be reversed are smaller.
13000 Otherwise we'd get a reserved instruction. */
13001 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
13002 assert (elsize
!= 0);
13003 constraint (et
.size
>= elsize
,
13004 _("elements must be smaller than reversal region"));
13005 neon_two_same (neon_quad (rs
), 1, et
.size
);
13011 if (inst
.operands
[1].isscalar
)
13013 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
13014 struct neon_type_el et
= neon_check_type (2, rs
,
13015 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13016 unsigned sizebits
= et
.size
>> 3;
13017 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13018 int logsize
= neon_logbits (et
.size
);
13019 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
13021 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
13024 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13025 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13026 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13027 inst
.instruction
|= LOW4 (dm
);
13028 inst
.instruction
|= HI1 (dm
) << 5;
13029 inst
.instruction
|= neon_quad (rs
) << 6;
13030 inst
.instruction
|= x
<< 17;
13031 inst
.instruction
|= sizebits
<< 16;
13033 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13037 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
13038 struct neon_type_el et
= neon_check_type (2, rs
,
13039 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13040 /* Duplicate ARM register to lanes of vector. */
13041 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
13044 case 8: inst
.instruction
|= 0x400000; break;
13045 case 16: inst
.instruction
|= 0x000020; break;
13046 case 32: inst
.instruction
|= 0x000000; break;
13049 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13050 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
13051 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
13052 inst
.instruction
|= neon_quad (rs
) << 21;
13053 /* The encoding for this instruction is identical for the ARM and Thumb
13054 variants, except for the condition field. */
13055 do_vfp_cond_or_thumb ();
13059 /* VMOV has particularly many variations. It can be one of:
13060 0. VMOV<c><q> <Qd>, <Qm>
13061 1. VMOV<c><q> <Dd>, <Dm>
13062 (Register operations, which are VORR with Rm = Rn.)
13063 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13064 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13066 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13067 (ARM register to scalar.)
13068 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13069 (Two ARM registers to vector.)
13070 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13071 (Scalar to ARM register.)
13072 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13073 (Vector to two ARM registers.)
13074 8. VMOV.F32 <Sd>, <Sm>
13075 9. VMOV.F64 <Dd>, <Dm>
13076 (VFP register moves.)
13077 10. VMOV.F32 <Sd>, #imm
13078 11. VMOV.F64 <Dd>, #imm
13079 (VFP float immediate load.)
13080 12. VMOV <Rd>, <Sm>
13081 (VFP single to ARM reg.)
13082 13. VMOV <Sd>, <Rm>
13083 (ARM reg to VFP single.)
13084 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13085 (Two ARM regs to two VFP singles.)
13086 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13087 (Two VFP singles to two ARM regs.)
13089 These cases can be disambiguated using neon_select_shape, except cases 1/9
13090 and 3/11 which depend on the operand type too.
13092 All the encoded bits are hardcoded by this function.
13094 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13095 Cases 5, 7 may be used with VFPv2 and above.
13097 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13098 can specify a type where it doesn't make sense to, and is ignored). */
13103 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
13104 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
13106 struct neon_type_el et
;
13107 const char *ldconst
= 0;
13111 case NS_DD
: /* case 1/9. */
13112 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13113 /* It is not an error here if no type is given. */
13115 if (et
.type
== NT_float
&& et
.size
== 64)
13117 do_vfp_nsyn_opcode ("fcpyd");
13120 /* fall through. */
13122 case NS_QQ
: /* case 0/1. */
13124 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13126 /* The architecture manual I have doesn't explicitly state which
13127 value the U bit should have for register->register moves, but
13128 the equivalent VORR instruction has U = 0, so do that. */
13129 inst
.instruction
= 0x0200110;
13130 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13131 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13132 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13133 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13134 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13135 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13136 inst
.instruction
|= neon_quad (rs
) << 6;
13138 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13142 case NS_DI
: /* case 3/11. */
13143 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13145 if (et
.type
== NT_float
&& et
.size
== 64)
13147 /* case 11 (fconstd). */
13148 ldconst
= "fconstd";
13149 goto encode_fconstd
;
13151 /* fall through. */
13153 case NS_QI
: /* case 2/3. */
13154 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13156 inst
.instruction
= 0x0800010;
13157 neon_move_immediate ();
13158 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13161 case NS_SR
: /* case 4. */
13163 unsigned bcdebits
= 0;
13164 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13165 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13166 int logsize
= neon_logbits (et
.size
);
13167 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
13168 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
13170 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13172 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13173 && et
.size
!= 32, _(BAD_FPU
));
13174 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13175 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13179 case 8: bcdebits
= 0x8; break;
13180 case 16: bcdebits
= 0x1; break;
13181 case 32: bcdebits
= 0x0; break;
13185 bcdebits
|= x
<< logsize
;
13187 inst
.instruction
= 0xe000b10;
13188 do_vfp_cond_or_thumb ();
13189 inst
.instruction
|= LOW4 (dn
) << 16;
13190 inst
.instruction
|= HI1 (dn
) << 7;
13191 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13192 inst
.instruction
|= (bcdebits
& 3) << 5;
13193 inst
.instruction
|= (bcdebits
>> 2) << 21;
13197 case NS_DRR
: /* case 5 (fmdrr). */
13198 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13201 inst
.instruction
= 0xc400b10;
13202 do_vfp_cond_or_thumb ();
13203 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
13204 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
13205 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13206 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13209 case NS_RS
: /* case 6. */
13211 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13212 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
13213 unsigned logsize
= neon_logbits (et
.size
);
13214 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13215 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
13216 unsigned abcdebits
= 0;
13218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13220 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13221 && et
.size
!= 32, _(BAD_FPU
));
13222 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13223 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13227 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
13228 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
13229 case 32: abcdebits
= 0x00; break;
13233 abcdebits
|= x
<< logsize
;
13234 inst
.instruction
= 0xe100b10;
13235 do_vfp_cond_or_thumb ();
13236 inst
.instruction
|= LOW4 (dn
) << 16;
13237 inst
.instruction
|= HI1 (dn
) << 7;
13238 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13239 inst
.instruction
|= (abcdebits
& 3) << 5;
13240 inst
.instruction
|= (abcdebits
>> 2) << 21;
13244 case NS_RRD
: /* case 7 (fmrrd). */
13245 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13248 inst
.instruction
= 0xc500b10;
13249 do_vfp_cond_or_thumb ();
13250 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13251 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13252 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13253 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13256 case NS_FF
: /* case 8 (fcpys). */
13257 do_vfp_nsyn_opcode ("fcpys");
13260 case NS_FI
: /* case 10 (fconsts). */
13261 ldconst
= "fconsts";
13263 if (is_quarter_float (inst
.operands
[1].imm
))
13265 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
13266 do_vfp_nsyn_opcode (ldconst
);
13269 first_error (_("immediate out of range"));
13272 case NS_RF
: /* case 12 (fmrs). */
13273 do_vfp_nsyn_opcode ("fmrs");
13276 case NS_FR
: /* case 13 (fmsr). */
13277 do_vfp_nsyn_opcode ("fmsr");
13280 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13281 (one of which is a list), but we have parsed four. Do some fiddling to
13282 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13284 case NS_RRFF
: /* case 14 (fmrrs). */
13285 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
13286 _("VFP registers must be adjacent"));
13287 inst
.operands
[2].imm
= 2;
13288 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13289 do_vfp_nsyn_opcode ("fmrrs");
13292 case NS_FFRR
: /* case 15 (fmsrr). */
13293 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
13294 _("VFP registers must be adjacent"));
13295 inst
.operands
[1] = inst
.operands
[2];
13296 inst
.operands
[2] = inst
.operands
[3];
13297 inst
.operands
[0].imm
= 2;
13298 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13299 do_vfp_nsyn_opcode ("fmsrr");
13308 do_neon_rshift_round_imm (void)
13310 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13311 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
13312 int imm
= inst
.operands
[2].imm
;
13314 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13317 inst
.operands
[2].present
= 0;
13322 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13323 _("immediate out of range for shift"));
13324 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
13329 do_neon_movl (void)
13331 struct neon_type_el et
= neon_check_type (2, NS_QD
,
13332 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13333 unsigned sizebits
= et
.size
>> 3;
13334 inst
.instruction
|= sizebits
<< 19;
13335 neon_two_same (0, et
.type
== NT_unsigned
, -1);
13341 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13342 struct neon_type_el et
= neon_check_type (2, rs
,
13343 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13344 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13345 neon_two_same (neon_quad (rs
), 1, et
.size
);
13349 do_neon_zip_uzp (void)
13351 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13352 struct neon_type_el et
= neon_check_type (2, rs
,
13353 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13354 if (rs
== NS_DD
&& et
.size
== 32)
13356 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13357 inst
.instruction
= N_MNEM_vtrn
;
13361 neon_two_same (neon_quad (rs
), 1, et
.size
);
13365 do_neon_sat_abs_neg (void)
13367 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13368 struct neon_type_el et
= neon_check_type (2, rs
,
13369 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13370 neon_two_same (neon_quad (rs
), 1, et
.size
);
13374 do_neon_pair_long (void)
13376 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13377 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
13378 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13379 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
13380 neon_two_same (neon_quad (rs
), 1, et
.size
);
13384 do_neon_recip_est (void)
13386 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13387 struct neon_type_el et
= neon_check_type (2, rs
,
13388 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
13389 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13390 neon_two_same (neon_quad (rs
), 1, et
.size
);
13396 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13397 struct neon_type_el et
= neon_check_type (2, rs
,
13398 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13399 neon_two_same (neon_quad (rs
), 1, et
.size
);
13405 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13406 struct neon_type_el et
= neon_check_type (2, rs
,
13407 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
13408 neon_two_same (neon_quad (rs
), 1, et
.size
);
13414 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13415 struct neon_type_el et
= neon_check_type (2, rs
,
13416 N_EQK
| N_INT
, N_8
| N_KEY
);
13417 neon_two_same (neon_quad (rs
), 1, et
.size
);
13423 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13424 neon_two_same (neon_quad (rs
), 1, -1);
13428 do_neon_tbl_tbx (void)
13430 unsigned listlenbits
;
13431 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
13433 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
13435 first_error (_("bad list length for table lookup"));
13439 listlenbits
= inst
.operands
[1].imm
- 1;
13440 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13441 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13442 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13443 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13444 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13445 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13446 inst
.instruction
|= listlenbits
<< 8;
13448 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13452 do_neon_ldm_stm (void)
13454 /* P, U and L bits are part of bitmask. */
13455 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
13456 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
13458 if (inst
.operands
[1].issingle
)
13460 do_vfp_nsyn_ldm_stm (is_dbmode
);
13464 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
13465 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13467 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
13468 _("register list must contain at least 1 and at most 16 "
13471 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
13472 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
13473 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13474 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
13476 inst
.instruction
|= offsetbits
;
13478 do_vfp_cond_or_thumb ();
13482 do_neon_ldr_str (void)
13484 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
13486 if (inst
.operands
[0].issingle
)
13489 do_vfp_nsyn_opcode ("flds");
13491 do_vfp_nsyn_opcode ("fsts");
13496 do_vfp_nsyn_opcode ("fldd");
13498 do_vfp_nsyn_opcode ("fstd");
13502 /* "interleave" version also handles non-interleaving register VLD1/VST1
13506 do_neon_ld_st_interleave (void)
13508 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
13509 N_8
| N_16
| N_32
| N_64
);
13510 unsigned alignbits
= 0;
13512 /* The bits in this table go:
13513 0: register stride of one (0) or two (1)
13514 1,2: register list length, minus one (1, 2, 3, 4).
13515 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13516 We use -1 for invalid entries. */
13517 const int typetable
[] =
13519 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13520 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13521 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13522 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13526 if (et
.type
== NT_invtype
)
13529 if (inst
.operands
[1].immisalign
)
13530 switch (inst
.operands
[1].imm
>> 8)
13532 case 64: alignbits
= 1; break;
13534 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13535 goto bad_alignment
;
13539 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13540 goto bad_alignment
;
13545 first_error (_("bad alignment"));
13549 inst
.instruction
|= alignbits
<< 4;
13550 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13552 /* Bits [4:6] of the immediate in a list specifier encode register stride
13553 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13554 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13555 up the right value for "type" in a table based on this value and the given
13556 list style, then stick it back. */
13557 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
13558 | (((inst
.instruction
>> 8) & 3) << 3);
13560 typebits
= typetable
[idx
];
13562 constraint (typebits
== -1, _("bad list type for instruction"));
13564 inst
.instruction
&= ~0xf00;
13565 inst
.instruction
|= typebits
<< 8;
13568 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13569 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13570 otherwise. The variable arguments are a list of pairs of legal (size, align)
13571 values, terminated with -1. */
13574 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
13577 int result
= FAIL
, thissize
, thisalign
;
13579 if (!inst
.operands
[1].immisalign
)
13585 va_start (ap
, do_align
);
13589 thissize
= va_arg (ap
, int);
13590 if (thissize
== -1)
13592 thisalign
= va_arg (ap
, int);
13594 if (size
== thissize
&& align
== thisalign
)
13597 while (result
!= SUCCESS
);
13601 if (result
== SUCCESS
)
13604 first_error (_("unsupported alignment for instruction"));
13610 do_neon_ld_st_lane (void)
13612 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13613 int align_good
, do_align
= 0;
13614 int logsize
= neon_logbits (et
.size
);
13615 int align
= inst
.operands
[1].imm
>> 8;
13616 int n
= (inst
.instruction
>> 8) & 3;
13617 int max_el
= 64 / et
.size
;
13619 if (et
.type
== NT_invtype
)
13622 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
13623 _("bad list length"));
13624 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
13625 _("scalar index out of range"));
13626 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
13628 _("stride of 2 unavailable when element size is 8"));
13632 case 0: /* VLD1 / VST1. */
13633 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
13635 if (align_good
== FAIL
)
13639 unsigned alignbits
= 0;
13642 case 16: alignbits
= 0x1; break;
13643 case 32: alignbits
= 0x3; break;
13646 inst
.instruction
|= alignbits
<< 4;
13650 case 1: /* VLD2 / VST2. */
13651 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
13653 if (align_good
== FAIL
)
13656 inst
.instruction
|= 1 << 4;
13659 case 2: /* VLD3 / VST3. */
13660 constraint (inst
.operands
[1].immisalign
,
13661 _("can't use alignment with this instruction"));
13664 case 3: /* VLD4 / VST4. */
13665 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13666 16, 64, 32, 64, 32, 128, -1);
13667 if (align_good
== FAIL
)
13671 unsigned alignbits
= 0;
13674 case 8: alignbits
= 0x1; break;
13675 case 16: alignbits
= 0x1; break;
13676 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
13679 inst
.instruction
|= alignbits
<< 4;
13686 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13687 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13688 inst
.instruction
|= 1 << (4 + logsize
);
13690 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
13691 inst
.instruction
|= logsize
<< 10;
13694 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13697 do_neon_ld_dup (void)
13699 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13700 int align_good
, do_align
= 0;
13702 if (et
.type
== NT_invtype
)
13705 switch ((inst
.instruction
>> 8) & 3)
13707 case 0: /* VLD1. */
13708 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
13709 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13710 &do_align
, 16, 16, 32, 32, -1);
13711 if (align_good
== FAIL
)
13713 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
13716 case 2: inst
.instruction
|= 1 << 5; break;
13717 default: first_error (_("bad list length")); return;
13719 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13722 case 1: /* VLD2. */
13723 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13724 &do_align
, 8, 16, 16, 32, 32, 64, -1);
13725 if (align_good
== FAIL
)
13727 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
13728 _("bad list length"));
13729 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13730 inst
.instruction
|= 1 << 5;
13731 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13734 case 2: /* VLD3. */
13735 constraint (inst
.operands
[1].immisalign
,
13736 _("can't use alignment with this instruction"));
13737 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
13738 _("bad list length"));
13739 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13740 inst
.instruction
|= 1 << 5;
13741 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13744 case 3: /* VLD4. */
13746 int align
= inst
.operands
[1].imm
>> 8;
13747 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13748 16, 64, 32, 64, 32, 128, -1);
13749 if (align_good
== FAIL
)
13751 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
13752 _("bad list length"));
13753 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13754 inst
.instruction
|= 1 << 5;
13755 if (et
.size
== 32 && align
== 128)
13756 inst
.instruction
|= 0x3 << 6;
13758 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13765 inst
.instruction
|= do_align
<< 4;
13768 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13769 apart from bits [11:4]. */
13772 do_neon_ldx_stx (void)
13774 switch (NEON_LANE (inst
.operands
[0].imm
))
13776 case NEON_INTERLEAVE_LANES
:
13777 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
13778 do_neon_ld_st_interleave ();
13781 case NEON_ALL_LANES
:
13782 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
13787 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
13788 do_neon_ld_st_lane ();
13791 /* L bit comes from bit mask. */
13792 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13793 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13794 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13796 if (inst
.operands
[1].postind
)
13798 int postreg
= inst
.operands
[1].imm
& 0xf;
13799 constraint (!inst
.operands
[1].immisreg
,
13800 _("post-index must be a register"));
13801 constraint (postreg
== 0xd || postreg
== 0xf,
13802 _("bad register for post-index"));
13803 inst
.instruction
|= postreg
;
13805 else if (inst
.operands
[1].writeback
)
13807 inst
.instruction
|= 0xd;
13810 inst
.instruction
|= 0xf;
13813 inst
.instruction
|= 0xf9000000;
13815 inst
.instruction
|= 0xf4000000;
13818 /* Overall per-instruction processing. */
13820 /* We need to be able to fix up arbitrary expressions in some statements.
13821 This is so that we can handle symbols that are an arbitrary distance from
13822 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13823 which returns part of an address in a form which will be valid for
13824 a data instruction. We do this by pushing the expression into a symbol
13825 in the expr_section, and creating a fix for that. */
13828 fix_new_arm (fragS
* frag
,
13843 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
13847 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
13852 /* Mark whether the fix is to a THUMB instruction, or an ARM
13854 new_fix
->tc_fix_data
= thumb_mode
;
13857 /* Create a frg for an instruction requiring relaxation. */
13859 output_relax_insn (void)
13865 /* The size of the instruction is unknown, so tie the debug info to the
13866 start of the instruction. */
13867 dwarf2_emit_insn (0);
13869 switch (inst
.reloc
.exp
.X_op
)
13872 sym
= inst
.reloc
.exp
.X_add_symbol
;
13873 offset
= inst
.reloc
.exp
.X_add_number
;
13877 offset
= inst
.reloc
.exp
.X_add_number
;
13880 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13884 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13885 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13886 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13889 /* Write a 32-bit thumb instruction to buf. */
13891 put_thumb32_insn (char * buf
, unsigned long insn
)
13893 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13894 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13898 output_inst (const char * str
)
13904 as_bad ("%s -- `%s'", inst
.error
, str
);
13909 output_relax_insn ();
13912 if (inst
.size
== 0)
13915 to
= frag_more (inst
.size
);
13917 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13919 assert (inst
.size
== (2 * THUMB_SIZE
));
13920 put_thumb32_insn (to
, inst
.instruction
);
13922 else if (inst
.size
> INSN_SIZE
)
13924 assert (inst
.size
== (2 * INSN_SIZE
));
13925 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13926 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13929 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13931 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13932 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13933 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13936 dwarf2_emit_insn (inst
.size
);
13939 /* Tag values used in struct asm_opcode's tag field. */
13942 OT_unconditional
, /* Instruction cannot be conditionalized.
13943 The ARM condition field is still 0xE. */
13944 OT_unconditionalF
, /* Instruction cannot be conditionalized
13945 and carries 0xF in its ARM condition field. */
13946 OT_csuffix
, /* Instruction takes a conditional suffix. */
13947 OT_csuffixF
, /* Some forms of the instruction take a conditional
13948 suffix, others place 0xF where the condition field
13950 OT_cinfix3
, /* Instruction takes a conditional infix,
13951 beginning at character index 3. (In
13952 unified mode, it becomes a suffix.) */
13953 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13954 tsts, cmps, cmns, and teqs. */
13955 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13956 character index 3, even in unified mode. Used for
13957 legacy instructions where suffix and infix forms
13958 may be ambiguous. */
13959 OT_csuf_or_in3
, /* Instruction takes either a conditional
13960 suffix or an infix at character index 3. */
13961 OT_odd_infix_unc
, /* This is the unconditional variant of an
13962 instruction that takes a conditional infix
13963 at an unusual position. In unified mode,
13964 this variant will accept a suffix. */
13965 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13966 are the conditional variants of instructions that
13967 take conditional infixes in unusual positions.
13968 The infix appears at character index
13969 (tag - OT_odd_infix_0). These are not accepted
13970 in unified mode. */
13973 /* Subroutine of md_assemble, responsible for looking up the primary
13974 opcode from the mnemonic the user wrote. STR points to the
13975 beginning of the mnemonic.
13977 This is not simply a hash table lookup, because of conditional
13978 variants. Most instructions have conditional variants, which are
13979 expressed with a _conditional affix_ to the mnemonic. If we were
13980 to encode each conditional variant as a literal string in the opcode
13981 table, it would have approximately 20,000 entries.
13983 Most mnemonics take this affix as a suffix, and in unified syntax,
13984 'most' is upgraded to 'all'. However, in the divided syntax, some
13985 instructions take the affix as an infix, notably the s-variants of
13986 the arithmetic instructions. Of those instructions, all but six
13987 have the infix appear after the third character of the mnemonic.
13989 Accordingly, the algorithm for looking up primary opcodes given
13992 1. Look up the identifier in the opcode table.
13993 If we find a match, go to step U.
13995 2. Look up the last two characters of the identifier in the
13996 conditions table. If we find a match, look up the first N-2
13997 characters of the identifier in the opcode table. If we
13998 find a match, go to step CE.
14000 3. Look up the fourth and fifth characters of the identifier in
14001 the conditions table. If we find a match, extract those
14002 characters from the identifier, and look up the remaining
14003 characters in the opcode table. If we find a match, go
14008 U. Examine the tag field of the opcode structure, in case this is
14009 one of the six instructions with its conditional infix in an
14010 unusual place. If it is, the tag tells us where to find the
14011 infix; look it up in the conditions table and set inst.cond
14012 accordingly. Otherwise, this is an unconditional instruction.
14013 Again set inst.cond accordingly. Return the opcode structure.
14015 CE. Examine the tag field to make sure this is an instruction that
14016 should receive a conditional suffix. If it is not, fail.
14017 Otherwise, set inst.cond from the suffix we already looked up,
14018 and return the opcode structure.
14020 CM. Examine the tag field to make sure this is an instruction that
14021 should receive a conditional infix after the third character.
14022 If it is not, fail. Otherwise, undo the edits to the current
14023 line of input and proceed as for case CE. */
14025 static const struct asm_opcode
*
14026 opcode_lookup (char **str
)
14030 const struct asm_opcode
*opcode
;
14031 const struct asm_cond
*cond
;
14033 bfd_boolean neon_supported
;
14035 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
14037 /* Scan up to the end of the mnemonic, which must end in white space,
14038 '.' (in unified mode, or for Neon instructions), or end of string. */
14039 for (base
= end
= *str
; *end
!= '\0'; end
++)
14040 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
14046 /* Handle a possible width suffix and/or Neon type suffix. */
14051 /* The .w and .n suffixes are only valid if the unified syntax is in
14053 if (unified_syntax
&& end
[1] == 'w')
14055 else if (unified_syntax
&& end
[1] == 'n')
14060 inst
.vectype
.elems
= 0;
14062 *str
= end
+ offset
;
14064 if (end
[offset
] == '.')
14066 /* See if we have a Neon type suffix (possible in either unified or
14067 non-unified ARM syntax mode). */
14068 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
14071 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
14077 /* Look for unaffixed or special-case affixed mnemonic. */
14078 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
14082 if (opcode
->tag
< OT_odd_infix_0
)
14084 inst
.cond
= COND_ALWAYS
;
14088 if (unified_syntax
)
14089 as_warn (_("conditional infixes are deprecated in unified syntax"));
14090 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
14091 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14094 inst
.cond
= cond
->value
;
14098 /* Cannot have a conditional suffix on a mnemonic of less than two
14100 if (end
- base
< 3)
14103 /* Look for suffixed mnemonic. */
14105 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14106 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
14107 if (opcode
&& cond
)
14110 switch (opcode
->tag
)
14112 case OT_cinfix3_legacy
:
14113 /* Ignore conditional suffixes matched on infix only mnemonics. */
14117 case OT_cinfix3_deprecated
:
14118 case OT_odd_infix_unc
:
14119 if (!unified_syntax
)
14121 /* else fall through */
14125 case OT_csuf_or_in3
:
14126 inst
.cond
= cond
->value
;
14129 case OT_unconditional
:
14130 case OT_unconditionalF
:
14133 inst
.cond
= cond
->value
;
14137 /* delayed diagnostic */
14138 inst
.error
= BAD_COND
;
14139 inst
.cond
= COND_ALWAYS
;
14148 /* Cannot have a usual-position infix on a mnemonic of less than
14149 six characters (five would be a suffix). */
14150 if (end
- base
< 6)
14153 /* Look for infixed mnemonic in the usual position. */
14155 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14159 memcpy (save
, affix
, 2);
14160 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
14161 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
14162 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
14163 memcpy (affix
, save
, 2);
14166 && (opcode
->tag
== OT_cinfix3
14167 || opcode
->tag
== OT_cinfix3_deprecated
14168 || opcode
->tag
== OT_csuf_or_in3
14169 || opcode
->tag
== OT_cinfix3_legacy
))
14173 && (opcode
->tag
== OT_cinfix3
14174 || opcode
->tag
== OT_cinfix3_deprecated
))
14175 as_warn (_("conditional infixes are deprecated in unified syntax"));
14177 inst
.cond
= cond
->value
;
14185 md_assemble (char *str
)
14188 const struct asm_opcode
* opcode
;
14190 /* Align the previous label if needed. */
14191 if (last_label_seen
!= NULL
)
14193 symbol_set_frag (last_label_seen
, frag_now
);
14194 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
14195 S_SET_SEGMENT (last_label_seen
, now_seg
);
14198 memset (&inst
, '\0', sizeof (inst
));
14199 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
14201 opcode
= opcode_lookup (&p
);
14204 /* It wasn't an instruction, but it might be a register alias of
14205 the form alias .req reg, or a Neon .dn/.qn directive. */
14206 if (!create_register_alias (str
, p
)
14207 && !create_neon_reg_alias (str
, p
))
14208 as_bad (_("bad instruction `%s'"), str
);
14213 if (opcode
->tag
== OT_cinfix3_deprecated
)
14214 as_warn (_("s suffix on comparison instruction is deprecated"));
14216 /* The value which unconditional instructions should have in place of the
14217 condition field. */
14218 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
14222 arm_feature_set variant
;
14224 variant
= cpu_variant
;
14225 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14226 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
14227 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
14228 /* Check that this instruction is supported for this CPU. */
14229 if (!opcode
->tvariant
14230 || (thumb_mode
== 1
14231 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
14233 as_bad (_("selected processor does not support `%s'"), str
);
14236 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
14237 && opcode
->tencode
!= do_t_branch
)
14239 as_bad (_("Thumb does not support conditional execution"));
14243 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
) && !inst
.size_req
)
14245 /* Implicit require narrow instructions on Thumb-1. This avoids
14246 relaxation accidentally introducing Thumb-2 instructions. */
14247 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
14248 && !ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
))
14252 /* Check conditional suffixes. */
14253 if (current_it_mask
)
14256 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
14257 current_it_mask
<<= 1;
14258 current_it_mask
&= 0x1f;
14259 /* The BKPT instruction is unconditional even in an IT block. */
14261 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
14263 as_bad (_("incorrect condition in IT block"));
14267 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
14269 as_bad (_("thumb conditional instruction not in IT block"));
14273 mapping_state (MAP_THUMB
);
14274 inst
.instruction
= opcode
->tvalue
;
14276 if (!parse_operands (p
, opcode
->operands
))
14277 opcode
->tencode ();
14279 /* Clear current_it_mask at the end of an IT block. */
14280 if (current_it_mask
== 0x10)
14281 current_it_mask
= 0;
14283 if (!(inst
.error
|| inst
.relax
))
14285 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
14286 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
14287 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
14289 as_bad (_("cannot honor width suffix -- `%s'"), str
);
14294 /* Something has gone badly wrong if we try to relax a fixed size
14296 assert (inst
.size_req
== 0 || !inst
.relax
);
14298 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14299 *opcode
->tvariant
);
14300 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14301 set those bits when Thumb-2 32-bit instructions are seen. ie.
14302 anything other than bl/blx and v6-M instructions.
14303 This is overly pessimistic for relaxable instructions. */
14304 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
14306 && !ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
))
14307 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14310 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
14314 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
14315 is_bx
= (opcode
->aencode
== do_bx
);
14317 /* Check that this instruction is supported for this CPU. */
14318 if (!(is_bx
&& fix_v4bx
)
14319 && !(opcode
->avariant
&&
14320 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
14322 as_bad (_("selected processor does not support `%s'"), str
);
14327 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
14331 mapping_state (MAP_ARM
);
14332 inst
.instruction
= opcode
->avalue
;
14333 if (opcode
->tag
== OT_unconditionalF
)
14334 inst
.instruction
|= 0xF << 28;
14336 inst
.instruction
|= inst
.cond
<< 28;
14337 inst
.size
= INSN_SIZE
;
14338 if (!parse_operands (p
, opcode
->operands
))
14339 opcode
->aencode ();
14340 /* Arm mode bx is marked as both v4T and v5 because it's still required
14341 on a hypothetical non-thumb v5 core. */
14343 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
14345 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
14346 *opcode
->avariant
);
14350 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14357 /* Various frobbings of labels and their addresses. */
14360 arm_start_line_hook (void)
14362 last_label_seen
= NULL
;
14366 arm_frob_label (symbolS
* sym
)
14368 last_label_seen
= sym
;
14370 ARM_SET_THUMB (sym
, thumb_mode
);
14372 #if defined OBJ_COFF || defined OBJ_ELF
14373 ARM_SET_INTERWORK (sym
, support_interwork
);
14376 /* Note - do not allow local symbols (.Lxxx) to be labelled
14377 as Thumb functions. This is because these labels, whilst
14378 they exist inside Thumb code, are not the entry points for
14379 possible ARM->Thumb calls. Also, these labels can be used
14380 as part of a computed goto or switch statement. eg gcc
14381 can generate code that looks like this:
14383 ldr r2, [pc, .Laaa]
14393 The first instruction loads the address of the jump table.
14394 The second instruction converts a table index into a byte offset.
14395 The third instruction gets the jump address out of the table.
14396 The fourth instruction performs the jump.
14398 If the address stored at .Laaa is that of a symbol which has the
14399 Thumb_Func bit set, then the linker will arrange for this address
14400 to have the bottom bit set, which in turn would mean that the
14401 address computation performed by the third instruction would end
14402 up with the bottom bit set. Since the ARM is capable of unaligned
14403 word loads, the instruction would then load the incorrect address
14404 out of the jump table, and chaos would ensue. */
14405 if (label_is_thumb_function_name
14406 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
14407 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
14409 /* When the address of a Thumb function is taken the bottom
14410 bit of that address should be set. This will allow
14411 interworking between Arm and Thumb functions to work
14414 THUMB_SET_FUNC (sym
, 1);
14416 label_is_thumb_function_name
= FALSE
;
14419 dwarf2_emit_label (sym
);
14423 arm_data_in_code (void)
14425 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
14427 *input_line_pointer
= '/';
14428 input_line_pointer
+= 5;
14429 *input_line_pointer
= 0;
14437 arm_canonicalize_symbol_name (char * name
)
14441 if (thumb_mode
&& (len
= strlen (name
)) > 5
14442 && streq (name
+ len
- 5, "/data"))
14443 *(name
+ len
- 5) = 0;
14448 /* Table of all register names defined by default. The user can
14449 define additional names with .req. Note that all register names
14450 should appear in both upper and lowercase variants. Some registers
14451 also have mixed-case names. */
14453 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14454 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14455 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14456 #define REGSET(p,t) \
14457 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14458 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14459 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14460 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14461 #define REGSETH(p,t) \
14462 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14463 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14464 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14465 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14466 #define REGSET2(p,t) \
14467 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14468 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14469 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14470 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14472 static const struct reg_entry reg_names
[] =
14474 /* ARM integer registers. */
14475 REGSET(r
, RN
), REGSET(R
, RN
),
14477 /* ATPCS synonyms. */
14478 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
14479 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
14480 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
14482 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
14483 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
14484 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
14486 /* Well-known aliases. */
14487 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
14488 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
14490 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
14491 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
14493 /* Coprocessor numbers. */
14494 REGSET(p
, CP
), REGSET(P
, CP
),
14496 /* Coprocessor register numbers. The "cr" variants are for backward
14498 REGSET(c
, CN
), REGSET(C
, CN
),
14499 REGSET(cr
, CN
), REGSET(CR
, CN
),
14501 /* FPA registers. */
14502 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
14503 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
14505 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
14506 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
14508 /* VFP SP registers. */
14509 REGSET(s
,VFS
), REGSET(S
,VFS
),
14510 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
14512 /* VFP DP Registers. */
14513 REGSET(d
,VFD
), REGSET(D
,VFD
),
14514 /* Extra Neon DP registers. */
14515 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
14517 /* Neon QP registers. */
14518 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
14520 /* VFP control registers. */
14521 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
14522 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
14523 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
14524 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
14525 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
14526 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
14528 /* Maverick DSP coprocessor registers. */
14529 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
14530 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
14532 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
14533 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
14534 REGDEF(dspsc
,0,DSPSC
),
14536 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
14537 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
14538 REGDEF(DSPSC
,0,DSPSC
),
14540 /* iWMMXt data registers - p0, c0-15. */
14541 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
14543 /* iWMMXt control registers - p1, c0-3. */
14544 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
14545 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
14546 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
14547 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
14549 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14550 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
14551 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
14552 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
14553 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
14555 /* XScale accumulator registers. */
14556 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
14562 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14563 within psr_required_here. */
14564 static const struct asm_psr psrs
[] =
14566 /* Backward compatibility notation. Note that "all" is no longer
14567 truly all possible PSR bits. */
14568 {"all", PSR_c
| PSR_f
},
14572 /* Individual flags. */
14577 /* Combinations of flags. */
14578 {"fs", PSR_f
| PSR_s
},
14579 {"fx", PSR_f
| PSR_x
},
14580 {"fc", PSR_f
| PSR_c
},
14581 {"sf", PSR_s
| PSR_f
},
14582 {"sx", PSR_s
| PSR_x
},
14583 {"sc", PSR_s
| PSR_c
},
14584 {"xf", PSR_x
| PSR_f
},
14585 {"xs", PSR_x
| PSR_s
},
14586 {"xc", PSR_x
| PSR_c
},
14587 {"cf", PSR_c
| PSR_f
},
14588 {"cs", PSR_c
| PSR_s
},
14589 {"cx", PSR_c
| PSR_x
},
14590 {"fsx", PSR_f
| PSR_s
| PSR_x
},
14591 {"fsc", PSR_f
| PSR_s
| PSR_c
},
14592 {"fxs", PSR_f
| PSR_x
| PSR_s
},
14593 {"fxc", PSR_f
| PSR_x
| PSR_c
},
14594 {"fcs", PSR_f
| PSR_c
| PSR_s
},
14595 {"fcx", PSR_f
| PSR_c
| PSR_x
},
14596 {"sfx", PSR_s
| PSR_f
| PSR_x
},
14597 {"sfc", PSR_s
| PSR_f
| PSR_c
},
14598 {"sxf", PSR_s
| PSR_x
| PSR_f
},
14599 {"sxc", PSR_s
| PSR_x
| PSR_c
},
14600 {"scf", PSR_s
| PSR_c
| PSR_f
},
14601 {"scx", PSR_s
| PSR_c
| PSR_x
},
14602 {"xfs", PSR_x
| PSR_f
| PSR_s
},
14603 {"xfc", PSR_x
| PSR_f
| PSR_c
},
14604 {"xsf", PSR_x
| PSR_s
| PSR_f
},
14605 {"xsc", PSR_x
| PSR_s
| PSR_c
},
14606 {"xcf", PSR_x
| PSR_c
| PSR_f
},
14607 {"xcs", PSR_x
| PSR_c
| PSR_s
},
14608 {"cfs", PSR_c
| PSR_f
| PSR_s
},
14609 {"cfx", PSR_c
| PSR_f
| PSR_x
},
14610 {"csf", PSR_c
| PSR_s
| PSR_f
},
14611 {"csx", PSR_c
| PSR_s
| PSR_x
},
14612 {"cxf", PSR_c
| PSR_x
| PSR_f
},
14613 {"cxs", PSR_c
| PSR_x
| PSR_s
},
14614 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
14615 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
14616 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
14617 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
14618 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
14619 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
14620 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
14621 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
14622 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
14623 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
14624 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
14625 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
14626 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
14627 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
14628 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
14629 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
14630 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
14631 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
14632 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
14633 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
14634 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
14635 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
14636 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
14637 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
14640 /* Table of V7M psr names. */
14641 static const struct asm_psr v7m_psrs
[] =
14643 {"apsr", 0 }, {"APSR", 0 },
14644 {"iapsr", 1 }, {"IAPSR", 1 },
14645 {"eapsr", 2 }, {"EAPSR", 2 },
14646 {"psr", 3 }, {"PSR", 3 },
14647 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
14648 {"ipsr", 5 }, {"IPSR", 5 },
14649 {"epsr", 6 }, {"EPSR", 6 },
14650 {"iepsr", 7 }, {"IEPSR", 7 },
14651 {"msp", 8 }, {"MSP", 8 },
14652 {"psp", 9 }, {"PSP", 9 },
14653 {"primask", 16}, {"PRIMASK", 16},
14654 {"basepri", 17}, {"BASEPRI", 17},
14655 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
14656 {"faultmask", 19}, {"FAULTMASK", 19},
14657 {"control", 20}, {"CONTROL", 20}
14660 /* Table of all shift-in-operand names. */
14661 static const struct asm_shift_name shift_names
[] =
14663 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
14664 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
14665 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
14666 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
14667 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
14668 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
14671 /* Table of all explicit relocation names. */
14673 static struct reloc_entry reloc_names
[] =
14675 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
14676 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
14677 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
14678 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
14679 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
14680 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
14681 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
14682 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
14683 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
14684 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
14685 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
14689 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14690 static const struct asm_cond conds
[] =
14694 {"cs", 0x2}, {"hs", 0x2},
14695 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14709 static struct asm_barrier_opt barrier_opt_names
[] =
14717 /* Table of ARM-format instructions. */
14719 /* Macros for gluing together operand strings. N.B. In all cases
14720 other than OPS0, the trailing OP_stop comes from default
14721 zero-initialization of the unspecified elements of the array. */
14722 #define OPS0() { OP_stop, }
14723 #define OPS1(a) { OP_##a, }
14724 #define OPS2(a,b) { OP_##a,OP_##b, }
14725 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14726 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14727 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14728 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14730 /* These macros abstract out the exact format of the mnemonic table and
14731 save some repeated characters. */
14733 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14734 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14735 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14736 THUMB_VARIANT, do_##ae, do_##te }
14738 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14739 a T_MNEM_xyz enumerator. */
14740 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14741 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14742 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14743 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14745 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14746 infix after the third character. */
14747 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14748 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14749 THUMB_VARIANT, do_##ae, do_##te }
14750 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14751 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14752 THUMB_VARIANT, do_##ae, do_##te }
14753 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14754 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14755 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14756 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14757 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14758 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14759 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14760 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14762 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14763 appear in the condition table. */
14764 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14765 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14766 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14768 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14769 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14770 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14771 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14772 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14773 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14774 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14775 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14776 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14777 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14778 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14779 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14780 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14781 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14782 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14783 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14784 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14785 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14786 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14787 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14789 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14790 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14791 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14792 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14794 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14795 field is still 0xE. Many of the Thumb variants can be executed
14796 conditionally, so this is checked separately. */
14797 #define TUE(mnem, op, top, nops, ops, ae, te) \
14798 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14799 THUMB_VARIANT, do_##ae, do_##te }
14801 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14802 condition code field. */
14803 #define TUF(mnem, op, top, nops, ops, ae, te) \
14804 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14805 THUMB_VARIANT, do_##ae, do_##te }
14807 /* ARM-only variants of all the above. */
14808 #define CE(mnem, op, nops, ops, ae) \
14809 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14811 #define C3(mnem, op, nops, ops, ae) \
14812 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14814 /* Legacy mnemonics that always have conditional infix after the third
14816 #define CL(mnem, op, nops, ops, ae) \
14817 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14818 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14820 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14821 #define cCE(mnem, op, nops, ops, ae) \
14822 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14824 /* Legacy coprocessor instructions where conditional infix and conditional
14825 suffix are ambiguous. For consistency this includes all FPA instructions,
14826 not just the potentially ambiguous ones. */
14827 #define cCL(mnem, op, nops, ops, ae) \
14828 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14829 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14831 /* Coprocessor, takes either a suffix or a position-3 infix
14832 (for an FPA corner case). */
14833 #define C3E(mnem, op, nops, ops, ae) \
14834 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14835 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14837 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14838 { #m1 #m2 #m3, OPS##nops ops, \
14839 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14840 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14842 #define CM(m1, m2, op, nops, ops, ae) \
14843 xCM_(m1, , m2, op, nops, ops, ae), \
14844 xCM_(m1, eq, m2, op, nops, ops, ae), \
14845 xCM_(m1, ne, m2, op, nops, ops, ae), \
14846 xCM_(m1, cs, m2, op, nops, ops, ae), \
14847 xCM_(m1, hs, m2, op, nops, ops, ae), \
14848 xCM_(m1, cc, m2, op, nops, ops, ae), \
14849 xCM_(m1, ul, m2, op, nops, ops, ae), \
14850 xCM_(m1, lo, m2, op, nops, ops, ae), \
14851 xCM_(m1, mi, m2, op, nops, ops, ae), \
14852 xCM_(m1, pl, m2, op, nops, ops, ae), \
14853 xCM_(m1, vs, m2, op, nops, ops, ae), \
14854 xCM_(m1, vc, m2, op, nops, ops, ae), \
14855 xCM_(m1, hi, m2, op, nops, ops, ae), \
14856 xCM_(m1, ls, m2, op, nops, ops, ae), \
14857 xCM_(m1, ge, m2, op, nops, ops, ae), \
14858 xCM_(m1, lt, m2, op, nops, ops, ae), \
14859 xCM_(m1, gt, m2, op, nops, ops, ae), \
14860 xCM_(m1, le, m2, op, nops, ops, ae), \
14861 xCM_(m1, al, m2, op, nops, ops, ae)
14863 #define UE(mnem, op, nops, ops, ae) \
14864 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14866 #define UF(mnem, op, nops, ops, ae) \
14867 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14869 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14870 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14871 use the same encoding function for each. */
14872 #define NUF(mnem, op, nops, ops, enc) \
14873 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14874 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14876 /* Neon data processing, version which indirects through neon_enc_tab for
14877 the various overloaded versions of opcodes. */
14878 #define nUF(mnem, op, nops, ops, enc) \
14879 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14880 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14882 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14884 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14885 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14886 THUMB_VARIANT, do_##enc, do_##enc }
14888 #define NCE(mnem, op, nops, ops, enc) \
14889 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14891 #define NCEF(mnem, op, nops, ops, enc) \
14892 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14894 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14895 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14896 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14897 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14899 #define nCE(mnem, op, nops, ops, enc) \
14900 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14902 #define nCEF(mnem, op, nops, ops, enc) \
14903 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14907 /* Thumb-only, unconditional. */
14908 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14910 static const struct asm_opcode insns
[] =
14912 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14913 #define THUMB_VARIANT &arm_ext_v4t
14914 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14915 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14916 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14917 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14918 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14919 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14920 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14921 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14922 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14923 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14924 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14925 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14926 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14927 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14928 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14929 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14931 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14932 for setting PSR flag bits. They are obsolete in V6 and do not
14933 have Thumb equivalents. */
14934 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14935 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14936 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14937 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14938 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14939 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14940 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14941 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14942 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14944 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14945 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14946 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14947 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14949 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14950 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14951 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14952 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14954 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14955 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14956 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14957 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14958 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14959 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14961 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14962 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14963 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14964 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14967 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14968 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14969 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14971 /* Thumb-compatibility pseudo ops. */
14972 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14973 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14974 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14975 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14976 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14977 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14978 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14979 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14980 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14981 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14982 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14983 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14985 /* These may simplify to neg. */
14986 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14987 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14989 #undef THUMB_VARIANT
14990 #define THUMB_VARIANT &arm_ext_v6
14991 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14993 /* V1 instructions with no Thumb analogue prior to V6T2. */
14994 #undef THUMB_VARIANT
14995 #define THUMB_VARIANT &arm_ext_v6t2
14996 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14997 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14998 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
15000 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15001 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15002 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15003 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15005 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15006 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15008 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15009 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15011 /* V1 instructions with no Thumb analogue at all. */
15012 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
15013 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
15015 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
15016 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
15017 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
15018 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
15019 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
15020 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
15021 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
15022 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
15025 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
15026 #undef THUMB_VARIANT
15027 #define THUMB_VARIANT &arm_ext_v4t
15028 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
15029 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
15031 #undef THUMB_VARIANT
15032 #define THUMB_VARIANT &arm_ext_v6t2
15033 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15034 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
15036 /* Generic coprocessor instructions. */
15037 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15038 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15039 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15040 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15041 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15042 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15043 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15046 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15047 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15048 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15051 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15052 #undef THUMB_VARIANT
15053 #define THUMB_VARIANT &arm_ext_msr
15054 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
15055 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
15058 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15059 #undef THUMB_VARIANT
15060 #define THUMB_VARIANT &arm_ext_v6t2
15061 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15062 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15063 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15064 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15065 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15066 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15067 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15068 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15071 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15072 #undef THUMB_VARIANT
15073 #define THUMB_VARIANT &arm_ext_v4t
15074 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15075 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15076 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15077 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15078 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15079 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15082 #define ARM_VARIANT &arm_ext_v4t_5
15083 /* ARM Architecture 4T. */
15084 /* Note: bx (and blx) are required on V5, even if the processor does
15085 not support Thumb. */
15086 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
15089 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15090 #undef THUMB_VARIANT
15091 #define THUMB_VARIANT &arm_ext_v5t
15092 /* Note: blx has 2 variants; the .value coded here is for
15093 BLX(2). Only this variant has conditional execution. */
15094 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
15095 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
15097 #undef THUMB_VARIANT
15098 #define THUMB_VARIANT &arm_ext_v6t2
15099 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
15100 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15101 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15102 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15103 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15104 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15105 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15106 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15109 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15110 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15111 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15112 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15113 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15115 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15116 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15118 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15119 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15120 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15121 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15123 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15124 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15125 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15126 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15128 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15129 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15131 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15132 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15133 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15134 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
15137 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15138 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
15139 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15140 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15142 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15143 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15146 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15147 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
15150 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15151 #undef THUMB_VARIANT
15152 #define THUMB_VARIANT &arm_ext_v6
15153 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15154 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15155 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15156 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15157 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15158 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15159 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15160 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15161 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15162 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
15164 #undef THUMB_VARIANT
15165 #define THUMB_VARIANT &arm_ext_v6t2
15166 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
15167 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
15168 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15169 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15171 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
15172 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
15174 /* ARM V6 not included in V7M (eg. integer SIMD). */
15175 #undef THUMB_VARIANT
15176 #define THUMB_VARIANT &arm_ext_v6_notm
15177 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
15178 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
15179 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
15180 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15181 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15182 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15183 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15184 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15185 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15186 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15187 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15188 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15189 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15190 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15191 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15192 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15193 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15194 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15195 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15196 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15197 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15198 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15199 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15200 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15201 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15202 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15203 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15204 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15205 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15206 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15207 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15208 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15209 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15210 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15211 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15212 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15213 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15214 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15215 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15216 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15217 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
15218 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
15219 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15220 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15221 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
15222 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
15223 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15224 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15225 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15226 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15227 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15228 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15229 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15230 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15231 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15232 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15233 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15234 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15235 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15236 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15237 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15238 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15239 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15240 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15241 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15242 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15243 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15244 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15245 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15246 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15247 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15248 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15249 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15250 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15251 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
15252 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
15253 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
15254 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
15255 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
15256 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
15257 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15258 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15259 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
15262 #define ARM_VARIANT &arm_ext_v6k
15263 #undef THUMB_VARIANT
15264 #define THUMB_VARIANT &arm_ext_v6k
15265 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
15266 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
15267 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
15268 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
15270 #undef THUMB_VARIANT
15271 #define THUMB_VARIANT &arm_ext_v6_notm
15272 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
15273 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
15275 #undef THUMB_VARIANT
15276 #define THUMB_VARIANT &arm_ext_v6t2
15277 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15278 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15279 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15280 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15281 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
15284 #define ARM_VARIANT &arm_ext_v6z
15285 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
15288 #define ARM_VARIANT &arm_ext_v6t2
15289 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
15290 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
15291 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15292 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15294 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15295 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15296 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15297 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
15299 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15300 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15301 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15302 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15304 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
15305 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
15306 /* ARM does not really have an IT instruction, so always allow it. */
15308 #define ARM_VARIANT &arm_ext_v1
15309 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
15310 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
15311 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
15312 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
15313 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
15314 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
15315 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
15316 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
15317 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
15318 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
15319 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
15320 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
15321 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
15322 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
15323 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
15325 /* Thumb2 only instructions. */
15327 #define ARM_VARIANT NULL
15329 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15330 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15331 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
15332 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
15334 /* Thumb-2 hardware division instructions (R and M profiles only). */
15335 #undef THUMB_VARIANT
15336 #define THUMB_VARIANT &arm_ext_div
15337 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15338 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15340 /* ARM V6M/V7 instructions. */
15342 #define ARM_VARIANT &arm_ext_barrier
15343 #undef THUMB_VARIANT
15344 #define THUMB_VARIANT &arm_ext_barrier
15345 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
15346 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
15347 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
15349 /* ARM V7 instructions. */
15351 #define ARM_VARIANT &arm_ext_v7
15352 #undef THUMB_VARIANT
15353 #define THUMB_VARIANT &arm_ext_v7
15354 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
15355 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
15358 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15359 cCE(wfs
, e200110
, 1, (RR
), rd
),
15360 cCE(rfs
, e300110
, 1, (RR
), rd
),
15361 cCE(wfc
, e400110
, 1, (RR
), rd
),
15362 cCE(rfc
, e500110
, 1, (RR
), rd
),
15364 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15365 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15366 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15367 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15369 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15370 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15371 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15372 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15374 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
15375 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
15376 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
15377 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
15378 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
15379 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
15380 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
15381 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
15382 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
15383 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
15384 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
15385 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
15387 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
15388 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
15389 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
15390 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
15391 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
15392 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
15393 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
15394 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
15395 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
15396 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
15397 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
15398 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
15400 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
15401 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
15402 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
15403 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
15404 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
15405 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
15406 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
15407 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
15408 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
15409 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
15410 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
15411 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
15413 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
15414 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
15415 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
15416 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
15417 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
15418 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
15419 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
15420 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
15421 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
15422 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
15423 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
15424 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
15426 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
15427 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
15428 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
15429 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
15430 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
15431 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
15432 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
15433 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
15434 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
15435 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
15436 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
15437 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
15439 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
15440 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
15441 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
15442 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
15443 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
15444 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
15445 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
15446 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
15447 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
15448 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
15449 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
15450 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
15452 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
15453 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
15454 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
15455 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
15456 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
15457 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
15458 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
15459 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
15460 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
15461 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
15462 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
15463 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
15465 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
15466 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
15467 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
15468 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
15469 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
15470 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
15471 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
15472 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
15473 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
15474 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
15475 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
15476 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
15478 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
15479 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
15480 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
15481 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
15482 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
15483 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
15484 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
15485 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
15486 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
15487 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
15488 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
15489 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
15491 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
15492 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
15493 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
15494 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
15495 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
15496 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
15497 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
15498 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
15499 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
15500 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
15501 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
15502 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
15504 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
15505 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
15506 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
15507 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
15508 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
15509 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
15510 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
15511 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
15512 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
15513 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
15514 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
15515 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
15517 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
15518 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
15519 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
15520 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
15521 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
15522 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
15523 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
15524 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
15525 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
15526 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
15527 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
15528 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
15530 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
15531 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
15532 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
15533 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
15534 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
15535 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
15536 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
15537 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
15538 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
15539 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
15540 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
15541 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
15543 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
15544 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
15545 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
15546 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
15547 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
15548 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
15549 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
15550 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
15551 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
15552 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
15553 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
15554 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
15556 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
15557 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
15558 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
15559 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
15560 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
15561 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
15562 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
15563 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
15564 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
15565 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
15566 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
15567 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
15569 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
15570 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
15571 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
15572 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
15573 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
15574 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
15575 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
15576 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
15577 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
15578 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
15579 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
15580 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
15582 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15583 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15584 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15585 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15586 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15587 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15588 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15589 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15590 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15591 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15592 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15593 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15595 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15596 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15597 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15598 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15599 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15600 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15601 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15602 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15603 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15604 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15605 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15606 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15608 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15609 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15610 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15611 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15612 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15613 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15614 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15615 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15616 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15617 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15618 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15619 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15621 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15622 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15623 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15624 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15625 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15626 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15627 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15628 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15629 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15630 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15631 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15632 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15634 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15635 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15636 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15637 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15638 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15639 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15640 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15641 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15642 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15643 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15644 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15645 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15647 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15648 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15649 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15650 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15651 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15652 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15653 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15654 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15655 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15656 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15657 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15658 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15660 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15661 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15662 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15663 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15664 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15665 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15666 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15667 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15668 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15669 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15670 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15671 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15673 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15674 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15675 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15676 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15677 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15678 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15679 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15680 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15681 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15682 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15683 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15684 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15686 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15687 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15688 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15689 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15690 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15691 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15692 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15693 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15694 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15695 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15696 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15697 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15699 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15700 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15701 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15702 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15703 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15704 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15705 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15706 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15707 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15708 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15709 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15710 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15712 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15713 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15714 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15715 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15716 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15717 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15718 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15719 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15720 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15721 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15722 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15723 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15725 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15726 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15727 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15728 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15729 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15730 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15731 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15732 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15733 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15734 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15735 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15736 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15738 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15739 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15740 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15741 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15742 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15743 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15744 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15745 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15746 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15747 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15748 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15749 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15751 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15752 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15753 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15754 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15756 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
15757 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
15758 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
15759 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
15760 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
15761 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
15762 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
15763 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
15764 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
15765 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
15766 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
15767 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
15769 /* The implementation of the FIX instruction is broken on some
15770 assemblers, in that it accepts a precision specifier as well as a
15771 rounding specifier, despite the fact that this is meaningless.
15772 To be more compatible, we accept it as well, though of course it
15773 does not set any bits. */
15774 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
15775 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
15776 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
15777 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
15778 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
15779 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
15780 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
15781 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
15782 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
15783 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
15784 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
15785 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
15786 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
15788 /* Instructions that were new with the real FPA, call them V2. */
15790 #define ARM_VARIANT &fpu_fpa_ext_v2
15791 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15792 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15793 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15794 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15795 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15796 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15799 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15800 /* Moves and type conversions. */
15801 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15802 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
15803 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
15804 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
15805 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15806 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15807 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15808 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15809 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15810 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15811 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
15812 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
15814 /* Memory operations. */
15815 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15816 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15817 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15818 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15819 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15820 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15821 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15822 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15823 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15824 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15825 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15826 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15827 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15828 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15829 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15830 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15831 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15832 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15834 /* Monadic operations. */
15835 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15836 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15837 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15839 /* Dyadic operations. */
15840 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15841 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15842 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15843 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15844 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15845 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15846 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15847 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15848 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15851 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15852 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15853 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15854 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15857 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15858 /* Moves and type conversions. */
15859 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15860 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15861 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15862 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15863 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15864 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15865 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15866 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15867 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15868 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15869 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15870 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15871 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15873 /* Memory operations. */
15874 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15875 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15876 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15877 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15878 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15879 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15880 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15881 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15882 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15883 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15885 /* Monadic operations. */
15886 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15887 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15888 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15890 /* Dyadic operations. */
15891 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15892 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15893 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15894 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15895 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15896 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15897 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15898 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15899 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15902 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15903 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15904 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15905 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15908 #define ARM_VARIANT &fpu_vfp_ext_v2
15909 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15910 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15911 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15912 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15914 /* Instructions which may belong to either the Neon or VFP instruction sets.
15915 Individual encoder functions perform additional architecture checks. */
15917 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15918 #undef THUMB_VARIANT
15919 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15920 /* These mnemonics are unique to VFP. */
15921 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15922 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15923 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15924 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15925 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15926 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15927 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15928 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15929 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15930 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
15932 /* Mnemonics shared by Neon and VFP. */
15933 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15934 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15935 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15937 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15938 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15940 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15941 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15943 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15944 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15945 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15946 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15947 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15948 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15949 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15950 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15952 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15954 /* NOTE: All VMOV encoding is special-cased! */
15955 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15956 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15958 #undef THUMB_VARIANT
15959 #define THUMB_VARIANT &fpu_neon_ext_v1
15961 #define ARM_VARIANT &fpu_neon_ext_v1
15962 /* Data processing with three registers of the same length. */
15963 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15964 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15965 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15966 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15967 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15968 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15969 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15970 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15971 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15972 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15973 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15974 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15975 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15976 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15977 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15978 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15979 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15980 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15981 /* If not immediate, fall back to neon_dyadic_i64_su.
15982 shl_imm should accept I8 I16 I32 I64,
15983 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15984 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15985 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15986 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15987 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15988 /* Logic ops, types optional & ignored. */
15989 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15990 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15991 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15992 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15993 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15994 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15995 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15996 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15997 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15998 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15999 /* Bitfield ops, untyped. */
16000 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16001 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16002 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16003 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16004 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16005 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16006 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
16007 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16008 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16009 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16010 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16011 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16012 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16013 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
16014 back to neon_dyadic_if_su. */
16015 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
16016 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
16017 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
16018 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
16019 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
16020 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
16021 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
16022 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
16023 /* Comparison. Type I8 I16 I32 F32. */
16024 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
16025 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
16026 /* As above, D registers only. */
16027 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
16028 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
16029 /* Int and float variants, signedness unimportant. */
16030 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
16031 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
16032 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
16033 /* Add/sub take types I8 I16 I32 I64 F32. */
16034 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
16035 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
16036 /* vtst takes sizes 8, 16, 32. */
16037 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
16038 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
16039 /* VMUL takes I8 I16 I32 F32 P8. */
16040 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
16041 /* VQD{R}MULH takes S16 S32. */
16042 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
16043 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
16044 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
16045 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
16046 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16047 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16048 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16049 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16050 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16051 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16052 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16053 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16054 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16055 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16056 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16057 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16059 /* Two address, int/float. Types S8 S16 S32 F32. */
16060 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16061 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16063 /* Data processing with two registers and a shift amount. */
16064 /* Right shifts, and variants with rounding.
16065 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16066 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16067 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16068 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16069 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16070 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16071 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16072 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16073 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16074 /* Shift and insert. Sizes accepted 8 16 32 64. */
16075 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
16076 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
16077 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
16078 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
16079 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16080 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
16081 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
16082 /* Right shift immediate, saturating & narrowing, with rounding variants.
16083 Types accepted S16 S32 S64 U16 U32 U64. */
16084 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16085 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16086 /* As above, unsigned. Types accepted S16 S32 S64. */
16087 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16088 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16089 /* Right shift narrowing. Types accepted I16 I32 I64. */
16090 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16091 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16092 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16093 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
16094 /* CVT with optional immediate for fixed-point variant. */
16095 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
16097 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
16098 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
16100 /* Data processing, three registers of different lengths. */
16101 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16102 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
16103 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16104 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16105 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16106 /* If not scalar, fall back to neon_dyadic_long.
16107 Vector types as above, scalar types S16 S32 U16 U32. */
16108 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16109 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16110 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16111 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16112 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16113 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16114 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16115 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16116 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16117 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16118 /* Saturating doubling multiplies. Types S16 S32. */
16119 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16120 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16121 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16122 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16123 S16 S32 U16 U32. */
16124 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
16126 /* Extract. Size 8. */
16127 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
16128 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
16130 /* Two registers, miscellaneous. */
16131 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16132 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
16133 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
16134 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
16135 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
16136 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
16137 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
16138 /* Vector replicate. Sizes 8 16 32. */
16139 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
16140 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
16141 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16142 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
16143 /* VMOVN. Types I16 I32 I64. */
16144 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
16145 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16146 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
16147 /* VQMOVUN. Types S16 S32 S64. */
16148 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
16149 /* VZIP / VUZP. Sizes 8 16 32. */
16150 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16151 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16152 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16153 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16154 /* VQABS / VQNEG. Types S8 S16 S32. */
16155 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16156 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16157 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16158 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16159 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16160 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16161 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
16162 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16163 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
16164 /* Reciprocal estimates. Types U32 F32. */
16165 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16166 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
16167 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16168 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
16169 /* VCLS. Types S8 S16 S32. */
16170 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
16171 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
16172 /* VCLZ. Types I8 I16 I32. */
16173 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
16174 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
16175 /* VCNT. Size 8. */
16176 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
16177 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
16178 /* Two address, untyped. */
16179 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
16180 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
16181 /* VTRN. Sizes 8 16 32. */
16182 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
16183 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
16185 /* Table lookup. Size 8. */
16186 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16187 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16189 #undef THUMB_VARIANT
16190 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16192 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16193 /* Neon element/structure load/store. */
16194 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16195 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16196 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16197 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16198 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16199 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16200 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16201 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16203 #undef THUMB_VARIANT
16204 #define THUMB_VARIANT &fpu_vfp_ext_v3
16206 #define ARM_VARIANT &fpu_vfp_ext_v3
16207 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
16208 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
16209 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16210 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16211 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16212 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16213 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16214 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16215 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16216 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16217 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16218 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16219 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16220 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16221 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16222 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16223 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16224 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16226 #undef THUMB_VARIANT
16228 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16229 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16230 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16231 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16232 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16233 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16234 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16235 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
16236 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
16239 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16240 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
16241 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
16242 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
16243 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
16244 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
16245 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
16246 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
16247 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
16248 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
16249 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16250 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16251 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16252 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16253 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16254 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16255 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16256 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16257 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16258 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
16259 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
16260 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16261 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16262 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16263 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16264 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16265 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16266 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
16267 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
16268 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
16269 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
16270 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
16271 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
16272 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
16273 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
16274 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16275 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16276 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16277 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16278 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16279 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16280 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16281 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16282 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16283 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16284 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16285 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16286 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
16287 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16288 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16289 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16290 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16291 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16292 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16293 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16294 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16295 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16296 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16297 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16298 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16299 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16300 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16301 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16302 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16303 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16304 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16305 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16306 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16307 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16308 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16309 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16310 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16311 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16312 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16313 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16314 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16315 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16316 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16317 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16318 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16319 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16320 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16321 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16322 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16323 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16324 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16325 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16326 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16327 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16328 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
16329 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16330 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16331 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16332 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16333 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16334 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16335 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16336 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16337 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16338 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16339 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16340 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16341 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16342 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16343 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16344 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16345 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16346 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16347 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16348 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16349 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16350 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
16351 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16352 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16353 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16354 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16355 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16356 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16357 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16358 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16359 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16360 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16361 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16362 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16363 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16364 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16365 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16366 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16367 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16368 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16369 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16370 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16371 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16372 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16373 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16374 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16375 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16376 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16377 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16378 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16379 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16380 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16381 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16382 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16383 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16384 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16385 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16386 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16387 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16388 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16389 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16390 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16391 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16392 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16393 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16394 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16395 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16396 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16397 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16398 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16399 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16400 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16401 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
16404 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16405 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
16406 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
16407 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
16408 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16409 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16410 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16411 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16412 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16413 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16414 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16415 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16416 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16417 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16418 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16419 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16420 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16421 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16422 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16423 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16424 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16425 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
16426 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16427 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16428 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16429 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16430 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16431 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16432 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16433 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16434 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16435 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16436 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16437 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16438 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16439 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16440 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16441 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16442 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16443 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16444 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16445 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16446 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16447 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16448 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16449 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16450 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16451 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16452 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16453 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16454 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16455 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16456 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16457 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16458 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16459 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16460 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16461 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16464 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16465 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16466 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16467 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16468 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16469 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16470 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16471 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16472 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16473 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
16474 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
16475 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
16476 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
16477 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
16478 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
16479 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
16480 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
16481 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
16482 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
16483 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
16484 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
16485 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
16486 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
16487 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
16488 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
16489 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
16490 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
16491 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
16492 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
16493 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
16494 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
16495 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
16496 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
16497 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
16498 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
16499 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
16500 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
16501 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
16502 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
16503 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
16504 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
16505 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
16506 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
16507 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
16508 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
16509 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
16510 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
16511 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
16512 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
16513 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
16514 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
16515 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
16516 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
16517 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
16518 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
16519 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16520 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16521 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16522 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16523 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16524 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16525 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
16526 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
16527 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
16528 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
16529 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16530 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16531 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16532 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16533 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16534 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16535 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16536 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16537 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16538 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16539 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16540 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16543 #undef THUMB_VARIANT
16570 /* MD interface: bits in the object file. */
16572 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16573 for use in the a.out file, and stores them in the array pointed to by buf.
16574 This knows about the endian-ness of the target machine and does
16575 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16576 2 (short) and 4 (long) Floating numbers are put out as a series of
16577 LITTLENUMS (shorts, here at least). */
16580 md_number_to_chars (char * buf
, valueT val
, int n
)
16582 if (target_big_endian
)
16583 number_to_chars_bigendian (buf
, val
, n
);
16585 number_to_chars_littleendian (buf
, val
, n
);
16589 md_chars_to_number (char * buf
, int n
)
16592 unsigned char * where
= (unsigned char *) buf
;
16594 if (target_big_endian
)
16599 result
|= (*where
++ & 255);
16607 result
|= (where
[n
] & 255);
16614 /* MD interface: Sections. */
16616 /* Estimate the size of a frag before relaxing. Assume everything fits in
16620 md_estimate_size_before_relax (fragS
* fragp
,
16621 segT segtype ATTRIBUTE_UNUSED
)
16627 /* Convert a machine dependent frag. */
16630 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
16632 unsigned long insn
;
16633 unsigned long old_op
;
16641 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16643 old_op
= bfd_get_16(abfd
, buf
);
16644 if (fragp
->fr_symbol
)
16646 exp
.X_op
= O_symbol
;
16647 exp
.X_add_symbol
= fragp
->fr_symbol
;
16651 exp
.X_op
= O_constant
;
16653 exp
.X_add_number
= fragp
->fr_offset
;
16654 opcode
= fragp
->fr_subtype
;
16657 case T_MNEM_ldr_pc
:
16658 case T_MNEM_ldr_pc2
:
16659 case T_MNEM_ldr_sp
:
16660 case T_MNEM_str_sp
:
16667 if (fragp
->fr_var
== 4)
16669 insn
= THUMB_OP32 (opcode
);
16670 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
16672 insn
|= (old_op
& 0x700) << 4;
16676 insn
|= (old_op
& 7) << 12;
16677 insn
|= (old_op
& 0x38) << 13;
16679 insn
|= 0x00000c00;
16680 put_thumb32_insn (buf
, insn
);
16681 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
16685 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
16687 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
16690 if (fragp
->fr_var
== 4)
16692 insn
= THUMB_OP32 (opcode
);
16693 insn
|= (old_op
& 0xf0) << 4;
16694 put_thumb32_insn (buf
, insn
);
16695 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
16699 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16700 exp
.X_add_number
-= 4;
16708 if (fragp
->fr_var
== 4)
16710 int r0off
= (opcode
== T_MNEM_mov
16711 || opcode
== T_MNEM_movs
) ? 0 : 8;
16712 insn
= THUMB_OP32 (opcode
);
16713 insn
= (insn
& 0xe1ffffff) | 0x10000000;
16714 insn
|= (old_op
& 0x700) << r0off
;
16715 put_thumb32_insn (buf
, insn
);
16716 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16720 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
16725 if (fragp
->fr_var
== 4)
16727 insn
= THUMB_OP32(opcode
);
16728 put_thumb32_insn (buf
, insn
);
16729 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
16732 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
16736 if (fragp
->fr_var
== 4)
16738 insn
= THUMB_OP32(opcode
);
16739 insn
|= (old_op
& 0xf00) << 14;
16740 put_thumb32_insn (buf
, insn
);
16741 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
16744 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
16747 case T_MNEM_add_sp
:
16748 case T_MNEM_add_pc
:
16749 case T_MNEM_inc_sp
:
16750 case T_MNEM_dec_sp
:
16751 if (fragp
->fr_var
== 4)
16753 /* ??? Choose between add and addw. */
16754 insn
= THUMB_OP32 (opcode
);
16755 insn
|= (old_op
& 0xf0) << 4;
16756 put_thumb32_insn (buf
, insn
);
16757 if (opcode
== T_MNEM_add_pc
)
16758 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
16760 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16763 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16771 if (fragp
->fr_var
== 4)
16773 insn
= THUMB_OP32 (opcode
);
16774 insn
|= (old_op
& 0xf0) << 4;
16775 insn
|= (old_op
& 0xf) << 16;
16776 put_thumb32_insn (buf
, insn
);
16777 if (insn
& (1 << 20))
16778 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16780 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16783 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16789 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
16791 fixp
->fx_file
= fragp
->fr_file
;
16792 fixp
->fx_line
= fragp
->fr_line
;
16793 fragp
->fr_fix
+= fragp
->fr_var
;
16796 /* Return the size of a relaxable immediate operand instruction.
16797 SHIFT and SIZE specify the form of the allowable immediate. */
16799 relax_immediate (fragS
*fragp
, int size
, int shift
)
16805 /* ??? Should be able to do better than this. */
16806 if (fragp
->fr_symbol
)
16809 low
= (1 << shift
) - 1;
16810 mask
= (1 << (shift
+ size
)) - (1 << shift
);
16811 offset
= fragp
->fr_offset
;
16812 /* Force misaligned offsets to 32-bit variant. */
16815 if (offset
& ~mask
)
16820 /* Get the address of a symbol during relaxation. */
16822 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
16828 sym
= fragp
->fr_symbol
;
16829 sym_frag
= symbol_get_frag (sym
);
16830 know (S_GET_SEGMENT (sym
) != absolute_section
16831 || sym_frag
== &zero_address_frag
);
16832 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
16834 /* If frag has yet to be reached on this pass, assume it will
16835 move by STRETCH just as we did. If this is not so, it will
16836 be because some frag between grows, and that will force
16840 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
16844 /* Adjust stretch for any alignment frag. Note that if have
16845 been expanding the earlier code, the symbol may be
16846 defined in what appears to be an earlier frag. FIXME:
16847 This doesn't handle the fr_subtype field, which specifies
16848 a maximum number of bytes to skip when doing an
16850 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
16852 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
16855 stretch
= - ((- stretch
)
16856 & ~ ((1 << (int) f
->fr_offset
) - 1));
16858 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
16870 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16873 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
16878 /* Assume worst case for symbols not known to be in the same section. */
16879 if (!S_IS_DEFINED (fragp
->fr_symbol
)
16880 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16883 val
= relaxed_symbol_addr (fragp
, stretch
);
16884 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
16885 addr
= (addr
+ 4) & ~3;
16886 /* Force misaligned targets to 32-bit variant. */
16890 if (val
< 0 || val
> 1020)
16895 /* Return the size of a relaxable add/sub immediate instruction. */
16897 relax_addsub (fragS
*fragp
, asection
*sec
)
16902 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16903 op
= bfd_get_16(sec
->owner
, buf
);
16904 if ((op
& 0xf) == ((op
>> 4) & 0xf))
16905 return relax_immediate (fragp
, 8, 0);
16907 return relax_immediate (fragp
, 3, 0);
16911 /* Return the size of a relaxable branch instruction. BITS is the
16912 size of the offset field in the narrow instruction. */
16915 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
16921 /* Assume worst case for symbols not known to be in the same section. */
16922 if (!S_IS_DEFINED (fragp
->fr_symbol
)
16923 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16926 val
= relaxed_symbol_addr (fragp
, stretch
);
16927 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
16930 /* Offset is a signed value *2 */
16932 if (val
>= limit
|| val
< -limit
)
16938 /* Relax a machine dependent frag. This returns the amount by which
16939 the current size of the frag should change. */
16942 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
16947 oldsize
= fragp
->fr_var
;
16948 switch (fragp
->fr_subtype
)
16950 case T_MNEM_ldr_pc2
:
16951 newsize
= relax_adr (fragp
, sec
, stretch
);
16953 case T_MNEM_ldr_pc
:
16954 case T_MNEM_ldr_sp
:
16955 case T_MNEM_str_sp
:
16956 newsize
= relax_immediate (fragp
, 8, 2);
16960 newsize
= relax_immediate (fragp
, 5, 2);
16964 newsize
= relax_immediate (fragp
, 5, 1);
16968 newsize
= relax_immediate (fragp
, 5, 0);
16971 newsize
= relax_adr (fragp
, sec
, stretch
);
16977 newsize
= relax_immediate (fragp
, 8, 0);
16980 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
16983 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
16985 case T_MNEM_add_sp
:
16986 case T_MNEM_add_pc
:
16987 newsize
= relax_immediate (fragp
, 8, 2);
16989 case T_MNEM_inc_sp
:
16990 case T_MNEM_dec_sp
:
16991 newsize
= relax_immediate (fragp
, 7, 2);
16997 newsize
= relax_addsub (fragp
, sec
);
17003 fragp
->fr_var
= newsize
;
17004 /* Freeze wide instructions that are at or before the same location as
17005 in the previous pass. This avoids infinite loops.
17006 Don't freeze them unconditionally because targets may be artificially
17007 misaligned by the expansion of preceding frags. */
17008 if (stretch
<= 0 && newsize
> 2)
17010 md_convert_frag (sec
->owner
, sec
, fragp
);
17014 return newsize
- oldsize
;
17017 /* Round up a section size to the appropriate boundary. */
17020 md_section_align (segT segment ATTRIBUTE_UNUSED
,
17023 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
17024 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
17026 /* For a.out, force the section size to be aligned. If we don't do
17027 this, BFD will align it for us, but it will not write out the
17028 final bytes of the section. This may be a bug in BFD, but it is
17029 easier to fix it here since that is how the other a.out targets
17033 align
= bfd_get_section_alignment (stdoutput
, segment
);
17034 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
17041 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
17042 of an rs_align_code fragment. */
17045 arm_handle_align (fragS
* fragP
)
17047 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
17048 static char const thumb_noop
[2] = { 0xc0, 0x46 };
17049 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
17050 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
17052 int bytes
, fix
, noop_size
;
17056 if (fragP
->fr_type
!= rs_align_code
)
17059 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
17060 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
17063 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17064 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
17066 if (fragP
->tc_frag_data
)
17068 if (target_big_endian
)
17069 noop
= thumb_bigend_noop
;
17072 noop_size
= sizeof (thumb_noop
);
17076 if (target_big_endian
)
17077 noop
= arm_bigend_noop
;
17080 noop_size
= sizeof (arm_noop
);
17083 if (bytes
& (noop_size
- 1))
17085 fix
= bytes
& (noop_size
- 1);
17086 memset (p
, 0, fix
);
17091 while (bytes
>= noop_size
)
17093 memcpy (p
, noop
, noop_size
);
17095 bytes
-= noop_size
;
17099 fragP
->fr_fix
+= fix
;
17100 fragP
->fr_var
= noop_size
;
17103 /* Called from md_do_align. Used to create an alignment
17104 frag in a code section. */
17107 arm_frag_align_code (int n
, int max
)
17111 /* We assume that there will never be a requirement
17112 to support alignments greater than 32 bytes. */
17113 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17114 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17116 p
= frag_var (rs_align_code
,
17117 MAX_MEM_FOR_RS_ALIGN_CODE
,
17119 (relax_substateT
) max
,
17126 /* Perform target specific initialisation of a frag. */
17129 arm_init_frag (fragS
* fragP
)
17131 /* Record whether this frag is in an ARM or a THUMB area. */
17132 fragP
->tc_frag_data
= thumb_mode
;
17136 /* When we change sections we need to issue a new mapping symbol. */
17139 arm_elf_change_section (void)
17142 segment_info_type
*seginfo
;
17144 /* Link an unlinked unwind index table section to the .text section. */
17145 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
17146 && elf_linked_to_section (now_seg
) == NULL
)
17147 elf_linked_to_section (now_seg
) = text_section
;
17149 if (!SEG_NORMAL (now_seg
))
17152 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
17154 /* We can ignore sections that only contain debug info. */
17155 if ((flags
& SEC_ALLOC
) == 0)
17158 seginfo
= seg_info (now_seg
);
17159 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
17160 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
17164 arm_elf_section_type (const char * str
, size_t len
)
17166 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
17167 return SHT_ARM_EXIDX
;
17172 /* Code to deal with unwinding tables. */
17174 static void add_unwind_adjustsp (offsetT
);
17176 /* Generate any deferred unwind frame offset. */
17179 flush_pending_unwind (void)
17183 offset
= unwind
.pending_offset
;
17184 unwind
.pending_offset
= 0;
17186 add_unwind_adjustsp (offset
);
17189 /* Add an opcode to this list for this function. Two-byte opcodes should
17190 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17194 add_unwind_opcode (valueT op
, int length
)
17196 /* Add any deferred stack adjustment. */
17197 if (unwind
.pending_offset
)
17198 flush_pending_unwind ();
17200 unwind
.sp_restored
= 0;
17202 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
17204 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
17205 if (unwind
.opcodes
)
17206 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
17207 unwind
.opcode_alloc
);
17209 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
17214 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
17216 unwind
.opcode_count
++;
17220 /* Add unwind opcodes to adjust the stack pointer. */
17223 add_unwind_adjustsp (offsetT offset
)
17227 if (offset
> 0x200)
17229 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17234 /* Long form: 0xb2, uleb128. */
17235 /* This might not fit in a word so add the individual bytes,
17236 remembering the list is built in reverse order. */
17237 o
= (valueT
) ((offset
- 0x204) >> 2);
17239 add_unwind_opcode (0, 1);
17241 /* Calculate the uleb128 encoding of the offset. */
17245 bytes
[n
] = o
& 0x7f;
17251 /* Add the insn. */
17253 add_unwind_opcode (bytes
[n
- 1], 1);
17254 add_unwind_opcode (0xb2, 1);
17256 else if (offset
> 0x100)
17258 /* Two short opcodes. */
17259 add_unwind_opcode (0x3f, 1);
17260 op
= (offset
- 0x104) >> 2;
17261 add_unwind_opcode (op
, 1);
17263 else if (offset
> 0)
17265 /* Short opcode. */
17266 op
= (offset
- 4) >> 2;
17267 add_unwind_opcode (op
, 1);
17269 else if (offset
< 0)
17272 while (offset
> 0x100)
17274 add_unwind_opcode (0x7f, 1);
17277 op
= ((offset
- 4) >> 2) | 0x40;
17278 add_unwind_opcode (op
, 1);
17282 /* Finish the list of unwind opcodes for this function. */
17284 finish_unwind_opcodes (void)
17288 if (unwind
.fp_used
)
17290 /* Adjust sp as necessary. */
17291 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
17292 flush_pending_unwind ();
17294 /* After restoring sp from the frame pointer. */
17295 op
= 0x90 | unwind
.fp_reg
;
17296 add_unwind_opcode (op
, 1);
17299 flush_pending_unwind ();
17303 /* Start an exception table entry. If idx is nonzero this is an index table
17307 start_unwind_section (const segT text_seg
, int idx
)
17309 const char * text_name
;
17310 const char * prefix
;
17311 const char * prefix_once
;
17312 const char * group_name
;
17316 size_t sec_name_len
;
17323 prefix
= ELF_STRING_ARM_unwind
;
17324 prefix_once
= ELF_STRING_ARM_unwind_once
;
17325 type
= SHT_ARM_EXIDX
;
17329 prefix
= ELF_STRING_ARM_unwind_info
;
17330 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
17331 type
= SHT_PROGBITS
;
17334 text_name
= segment_name (text_seg
);
17335 if (streq (text_name
, ".text"))
17338 if (strncmp (text_name
, ".gnu.linkonce.t.",
17339 strlen (".gnu.linkonce.t.")) == 0)
17341 prefix
= prefix_once
;
17342 text_name
+= strlen (".gnu.linkonce.t.");
17345 prefix_len
= strlen (prefix
);
17346 text_len
= strlen (text_name
);
17347 sec_name_len
= prefix_len
+ text_len
;
17348 sec_name
= xmalloc (sec_name_len
+ 1);
17349 memcpy (sec_name
, prefix
, prefix_len
);
17350 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
17351 sec_name
[prefix_len
+ text_len
] = '\0';
17357 /* Handle COMDAT group. */
17358 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
17360 group_name
= elf_group_name (text_seg
);
17361 if (group_name
== NULL
)
17363 as_bad (_("Group section `%s' has no group signature"),
17364 segment_name (text_seg
));
17365 ignore_rest_of_line ();
17368 flags
|= SHF_GROUP
;
17372 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
17374 /* Set the section link for index tables. */
17376 elf_linked_to_section (now_seg
) = text_seg
;
17380 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17381 personality routine data. Returns zero, or the index table value for
17382 and inline entry. */
17385 create_unwind_entry (int have_data
)
17390 /* The current word of data. */
17392 /* The number of bytes left in this word. */
17395 finish_unwind_opcodes ();
17397 /* Remember the current text section. */
17398 unwind
.saved_seg
= now_seg
;
17399 unwind
.saved_subseg
= now_subseg
;
17401 start_unwind_section (now_seg
, 0);
17403 if (unwind
.personality_routine
== NULL
)
17405 if (unwind
.personality_index
== -2)
17408 as_bad (_("handlerdata in cantunwind frame"));
17409 return 1; /* EXIDX_CANTUNWIND. */
17412 /* Use a default personality routine if none is specified. */
17413 if (unwind
.personality_index
== -1)
17415 if (unwind
.opcode_count
> 3)
17416 unwind
.personality_index
= 1;
17418 unwind
.personality_index
= 0;
17421 /* Space for the personality routine entry. */
17422 if (unwind
.personality_index
== 0)
17424 if (unwind
.opcode_count
> 3)
17425 as_bad (_("too many unwind opcodes for personality routine 0"));
17429 /* All the data is inline in the index table. */
17432 while (unwind
.opcode_count
> 0)
17434 unwind
.opcode_count
--;
17435 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17439 /* Pad with "finish" opcodes. */
17441 data
= (data
<< 8) | 0xb0;
17448 /* We get two opcodes "free" in the first word. */
17449 size
= unwind
.opcode_count
- 2;
17452 /* An extra byte is required for the opcode count. */
17453 size
= unwind
.opcode_count
+ 1;
17455 size
= (size
+ 3) >> 2;
17457 as_bad (_("too many unwind opcodes"));
17459 frag_align (2, 0, 0);
17460 record_alignment (now_seg
, 2);
17461 unwind
.table_entry
= expr_build_dot ();
17463 /* Allocate the table entry. */
17464 ptr
= frag_more ((size
<< 2) + 4);
17465 where
= frag_now_fix () - ((size
<< 2) + 4);
17467 switch (unwind
.personality_index
)
17470 /* ??? Should this be a PLT generating relocation? */
17471 /* Custom personality routine. */
17472 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
17473 BFD_RELOC_ARM_PREL31
);
17478 /* Set the first byte to the number of additional words. */
17483 /* ABI defined personality routines. */
17485 /* Three opcodes bytes are packed into the first word. */
17492 /* The size and first two opcode bytes go in the first word. */
17493 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
17498 /* Should never happen. */
17502 /* Pack the opcodes into words (MSB first), reversing the list at the same
17504 while (unwind
.opcode_count
> 0)
17508 md_number_to_chars (ptr
, data
, 4);
17513 unwind
.opcode_count
--;
17515 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17518 /* Finish off the last word. */
17521 /* Pad with "finish" opcodes. */
17523 data
= (data
<< 8) | 0xb0;
17525 md_number_to_chars (ptr
, data
, 4);
17530 /* Add an empty descriptor if there is no user-specified data. */
17531 ptr
= frag_more (4);
17532 md_number_to_chars (ptr
, 0, 4);
17539 /* Initialize the DWARF-2 unwind information for this procedure. */
17542 tc_arm_frame_initial_instructions (void)
17544 cfi_add_CFA_def_cfa (REG_SP
, 0);
17546 #endif /* OBJ_ELF */
17548 /* Convert REGNAME to a DWARF-2 register number. */
17551 tc_arm_regname_to_dw2regnum (char *regname
)
17553 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
17563 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
17567 expr
.X_op
= O_secrel
;
17568 expr
.X_add_symbol
= symbol
;
17569 expr
.X_add_number
= 0;
17570 emit_expr (&expr
, size
);
17574 /* MD interface: Symbol and relocation handling. */
17576 /* Return the address within the segment that a PC-relative fixup is
17577 relative to. For ARM, PC-relative fixups applied to instructions
17578 are generally relative to the location of the fixup plus 8 bytes.
17579 Thumb branches are offset by 4, and Thumb loads relative to PC
17580 require special handling. */
17583 md_pcrel_from_section (fixS
* fixP
, segT seg
)
17585 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
17587 /* If this is pc-relative and we are going to emit a relocation
17588 then we just want to put out any pipeline compensation that the linker
17589 will need. Otherwise we want to use the calculated base.
17590 For WinCE we skip the bias for externals as well, since this
17591 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17593 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
17594 || (arm_force_relocation (fixP
)
17596 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
17601 switch (fixP
->fx_r_type
)
17603 /* PC relative addressing on the Thumb is slightly odd as the
17604 bottom two bits of the PC are forced to zero for the
17605 calculation. This happens *after* application of the
17606 pipeline offset. However, Thumb adrl already adjusts for
17607 this, so we need not do it again. */
17608 case BFD_RELOC_ARM_THUMB_ADD
:
17611 case BFD_RELOC_ARM_THUMB_OFFSET
:
17612 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17613 case BFD_RELOC_ARM_T32_ADD_PC12
:
17614 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17615 return (base
+ 4) & ~3;
17617 /* Thumb branches are simply offset by +4. */
17618 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
17619 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
17620 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
17621 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17622 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17623 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17624 case BFD_RELOC_THUMB_PCREL_BLX
:
17627 /* ARM mode branches are offset by +8. However, the Windows CE
17628 loader expects the relocation not to take this into account. */
17629 case BFD_RELOC_ARM_PCREL_BRANCH
:
17630 case BFD_RELOC_ARM_PCREL_CALL
:
17631 case BFD_RELOC_ARM_PCREL_JUMP
:
17632 case BFD_RELOC_ARM_PCREL_BLX
:
17633 case BFD_RELOC_ARM_PLT32
:
17635 /* When handling fixups immediately, because we have already
17636 discovered the value of a symbol, or the address of the frag involved
17637 we must account for the offset by +8, as the OS loader will never see the reloc.
17638 see fixup_segment() in write.c
17639 The S_IS_EXTERNAL test handles the case of global symbols.
17640 Those need the calculated base, not just the pipe compensation the linker will need. */
17642 && fixP
->fx_addsy
!= NULL
17643 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
17644 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
17651 /* ARM mode loads relative to PC are also offset by +8. Unlike
17652 branches, the Windows CE loader *does* expect the relocation
17653 to take this into account. */
17654 case BFD_RELOC_ARM_OFFSET_IMM
:
17655 case BFD_RELOC_ARM_OFFSET_IMM8
:
17656 case BFD_RELOC_ARM_HWLITERAL
:
17657 case BFD_RELOC_ARM_LITERAL
:
17658 case BFD_RELOC_ARM_CP_OFF_IMM
:
17662 /* Other PC-relative relocations are un-offset. */
17668 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17669 Otherwise we have no need to default values of symbols. */
17672 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
17675 if (name
[0] == '_' && name
[1] == 'G'
17676 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
17680 if (symbol_find (name
))
17681 as_bad (_("GOT already in the symbol table"));
17683 GOT_symbol
= symbol_new (name
, undefined_section
,
17684 (valueT
) 0, & zero_address_frag
);
17694 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17695 computed as two separate immediate values, added together. We
17696 already know that this value cannot be computed by just one ARM
17699 static unsigned int
17700 validate_immediate_twopart (unsigned int val
,
17701 unsigned int * highpart
)
17706 for (i
= 0; i
< 32; i
+= 2)
17707 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
17713 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
17715 else if (a
& 0xff0000)
17717 if (a
& 0xff000000)
17719 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
17723 assert (a
& 0xff000000);
17724 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
17727 return (a
& 0xff) | (i
<< 7);
17734 validate_offset_imm (unsigned int val
, int hwse
)
17736 if ((hwse
&& val
> 255) || val
> 4095)
17741 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17742 negative immediate constant by altering the instruction. A bit of
17747 by inverting the second operand, and
17750 by negating the second operand. */
17753 negate_data_op (unsigned long * instruction
,
17754 unsigned long value
)
17757 unsigned long negated
, inverted
;
17759 negated
= encode_arm_immediate (-value
);
17760 inverted
= encode_arm_immediate (~value
);
17762 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
17765 /* First negates. */
17766 case OPCODE_SUB
: /* ADD <-> SUB */
17767 new_inst
= OPCODE_ADD
;
17772 new_inst
= OPCODE_SUB
;
17776 case OPCODE_CMP
: /* CMP <-> CMN */
17777 new_inst
= OPCODE_CMN
;
17782 new_inst
= OPCODE_CMP
;
17786 /* Now Inverted ops. */
17787 case OPCODE_MOV
: /* MOV <-> MVN */
17788 new_inst
= OPCODE_MVN
;
17793 new_inst
= OPCODE_MOV
;
17797 case OPCODE_AND
: /* AND <-> BIC */
17798 new_inst
= OPCODE_BIC
;
17803 new_inst
= OPCODE_AND
;
17807 case OPCODE_ADC
: /* ADC <-> SBC */
17808 new_inst
= OPCODE_SBC
;
17813 new_inst
= OPCODE_ADC
;
17817 /* We cannot do anything. */
17822 if (value
== (unsigned) FAIL
)
17825 *instruction
&= OPCODE_MASK
;
17826 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
17830 /* Like negate_data_op, but for Thumb-2. */
17832 static unsigned int
17833 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
17837 unsigned int negated
, inverted
;
17839 negated
= encode_thumb32_immediate (-value
);
17840 inverted
= encode_thumb32_immediate (~value
);
17842 rd
= (*instruction
>> 8) & 0xf;
17843 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
17846 /* ADD <-> SUB. Includes CMP <-> CMN. */
17847 case T2_OPCODE_SUB
:
17848 new_inst
= T2_OPCODE_ADD
;
17852 case T2_OPCODE_ADD
:
17853 new_inst
= T2_OPCODE_SUB
;
17857 /* ORR <-> ORN. Includes MOV <-> MVN. */
17858 case T2_OPCODE_ORR
:
17859 new_inst
= T2_OPCODE_ORN
;
17863 case T2_OPCODE_ORN
:
17864 new_inst
= T2_OPCODE_ORR
;
17868 /* AND <-> BIC. TST has no inverted equivalent. */
17869 case T2_OPCODE_AND
:
17870 new_inst
= T2_OPCODE_BIC
;
17877 case T2_OPCODE_BIC
:
17878 new_inst
= T2_OPCODE_AND
;
17883 case T2_OPCODE_ADC
:
17884 new_inst
= T2_OPCODE_SBC
;
17888 case T2_OPCODE_SBC
:
17889 new_inst
= T2_OPCODE_ADC
;
17893 /* We cannot do anything. */
17898 if (value
== (unsigned int)FAIL
)
17901 *instruction
&= T2_OPCODE_MASK
;
17902 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
17906 /* Read a 32-bit thumb instruction from buf. */
17907 static unsigned long
17908 get_thumb32_insn (char * buf
)
17910 unsigned long insn
;
17911 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
17912 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17918 /* We usually want to set the low bit on the address of thumb function
17919 symbols. In particular .word foo - . should have the low bit set.
17920 Generic code tries to fold the difference of two symbols to
17921 a constant. Prevent this and force a relocation when the first symbols
17922 is a thumb function. */
17924 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
17926 if (op
== O_subtract
17927 && l
->X_op
== O_symbol
17928 && r
->X_op
== O_symbol
17929 && THUMB_IS_FUNC (l
->X_add_symbol
))
17931 l
->X_op
= O_subtract
;
17932 l
->X_op_symbol
= r
->X_add_symbol
;
17933 l
->X_add_number
-= r
->X_add_number
;
17936 /* Process as normal. */
17941 md_apply_fix (fixS
* fixP
,
17945 offsetT value
= * valP
;
17947 unsigned int newimm
;
17948 unsigned long temp
;
17950 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
17952 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
17954 /* Note whether this will delete the relocation. */
17956 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
17959 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17960 consistency with the behaviour on 32-bit hosts. Remember value
17962 value
&= 0xffffffff;
17963 value
^= 0x80000000;
17964 value
-= 0x80000000;
17967 fixP
->fx_addnumber
= value
;
17969 /* Same treatment for fixP->fx_offset. */
17970 fixP
->fx_offset
&= 0xffffffff;
17971 fixP
->fx_offset
^= 0x80000000;
17972 fixP
->fx_offset
-= 0x80000000;
17974 switch (fixP
->fx_r_type
)
17976 case BFD_RELOC_NONE
:
17977 /* This will need to go in the object file. */
17981 case BFD_RELOC_ARM_IMMEDIATE
:
17982 /* We claim that this fixup has been processed here,
17983 even if in fact we generate an error because we do
17984 not have a reloc for it, so tc_gen_reloc will reject it. */
17988 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17990 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17991 _("undefined symbol %s used as an immediate value"),
17992 S_GET_NAME (fixP
->fx_addsy
));
17996 newimm
= encode_arm_immediate (value
);
17997 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17999 /* If the instruction will fail, see if we can fix things up by
18000 changing the opcode. */
18001 if (newimm
== (unsigned int) FAIL
18002 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
18004 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18005 _("invalid constant (%lx) after fixup"),
18006 (unsigned long) value
);
18010 newimm
|= (temp
& 0xfffff000);
18011 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
18014 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
18016 unsigned int highpart
= 0;
18017 unsigned int newinsn
= 0xe1a00000; /* nop. */
18019 newimm
= encode_arm_immediate (value
);
18020 temp
= md_chars_to_number (buf
, INSN_SIZE
);
18022 /* If the instruction will fail, see if we can fix things up by
18023 changing the opcode. */
18024 if (newimm
== (unsigned int) FAIL
18025 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
18027 /* No ? OK - try using two ADD instructions to generate
18029 newimm
= validate_immediate_twopart (value
, & highpart
);
18031 /* Yes - then make sure that the second instruction is
18033 if (newimm
!= (unsigned int) FAIL
)
18035 /* Still No ? Try using a negated value. */
18036 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
18037 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
18038 /* Otherwise - give up. */
18041 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18042 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
18047 /* Replace the first operand in the 2nd instruction (which
18048 is the PC) with the destination register. We have
18049 already added in the PC in the first instruction and we
18050 do not want to do it again. */
18051 newinsn
&= ~ 0xf0000;
18052 newinsn
|= ((newinsn
& 0x0f000) << 4);
18055 newimm
|= (temp
& 0xfffff000);
18056 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
18058 highpart
|= (newinsn
& 0xfffff000);
18059 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
18063 case BFD_RELOC_ARM_OFFSET_IMM
:
18064 if (!fixP
->fx_done
&& seg
->use_rela_p
)
18067 case BFD_RELOC_ARM_LITERAL
:
18073 if (validate_offset_imm (value
, 0) == FAIL
)
18075 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
18076 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18077 _("invalid literal constant: pool needs to be closer"));
18079 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18080 _("bad immediate value for offset (%ld)"),
18085 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18086 newval
&= 0xff7ff000;
18087 newval
|= value
| (sign
? INDEX_UP
: 0);
18088 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18091 case BFD_RELOC_ARM_OFFSET_IMM8
:
18092 case BFD_RELOC_ARM_HWLITERAL
:
18098 if (validate_offset_imm (value
, 1) == FAIL
)
18100 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
18101 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18102 _("invalid literal constant: pool needs to be closer"));
18104 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18109 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18110 newval
&= 0xff7ff0f0;
18111 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
18112 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18115 case BFD_RELOC_ARM_T32_OFFSET_U8
:
18116 if (value
< 0 || value
> 1020 || value
% 4 != 0)
18117 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18118 _("bad immediate value for offset (%ld)"), (long) value
);
18121 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
18123 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
18126 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
18127 /* This is a complicated relocation used for all varieties of Thumb32
18128 load/store instruction with immediate offset:
18130 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18131 *4, optional writeback(W)
18132 (doubleword load/store)
18134 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18135 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18136 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18137 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18138 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18140 Uppercase letters indicate bits that are already encoded at
18141 this point. Lowercase letters are our problem. For the
18142 second block of instructions, the secondary opcode nybble
18143 (bits 8..11) is present, and bit 23 is zero, even if this is
18144 a PC-relative operation. */
18145 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18147 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
18149 if ((newval
& 0xf0000000) == 0xe0000000)
18151 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18153 newval
|= (1 << 23);
18156 if (value
% 4 != 0)
18158 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18159 _("offset not a multiple of 4"));
18165 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18166 _("offset out of range"));
18171 else if ((newval
& 0x000f0000) == 0x000f0000)
18173 /* PC-relative, 12-bit offset. */
18175 newval
|= (1 << 23);
18180 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18181 _("offset out of range"));
18186 else if ((newval
& 0x00000100) == 0x00000100)
18188 /* Writeback: 8-bit, +/- offset. */
18190 newval
|= (1 << 9);
18195 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18196 _("offset out of range"));
18201 else if ((newval
& 0x00000f00) == 0x00000e00)
18203 /* T-instruction: positive 8-bit offset. */
18204 if (value
< 0 || value
> 0xff)
18206 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18207 _("offset out of range"));
18215 /* Positive 12-bit or negative 8-bit offset. */
18219 newval
|= (1 << 23);
18229 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18230 _("offset out of range"));
18237 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
18238 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
18241 case BFD_RELOC_ARM_SHIFT_IMM
:
18242 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18243 if (((unsigned long) value
) > 32
18245 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
18247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18248 _("shift expression is too large"));
18253 /* Shifts of zero must be done as lsl. */
18255 else if (value
== 32)
18257 newval
&= 0xfffff07f;
18258 newval
|= (value
& 0x1f) << 7;
18259 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18262 case BFD_RELOC_ARM_T32_IMMEDIATE
:
18263 case BFD_RELOC_ARM_T32_ADD_IMM
:
18264 case BFD_RELOC_ARM_T32_IMM12
:
18265 case BFD_RELOC_ARM_T32_ADD_PC12
:
18266 /* We claim that this fixup has been processed here,
18267 even if in fact we generate an error because we do
18268 not have a reloc for it, so tc_gen_reloc will reject it. */
18272 && ! S_IS_DEFINED (fixP
->fx_addsy
))
18274 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18275 _("undefined symbol %s used as an immediate value"),
18276 S_GET_NAME (fixP
->fx_addsy
));
18280 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18282 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
18285 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18286 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18288 newimm
= encode_thumb32_immediate (value
);
18289 if (newimm
== (unsigned int) FAIL
)
18290 newimm
= thumb32_negate_data_op (&newval
, value
);
18292 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
18293 && newimm
== (unsigned int) FAIL
)
18295 /* Turn add/sum into addw/subw. */
18296 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18297 newval
= (newval
& 0xfeffffff) | 0x02000000;
18299 /* 12 bit immediate for addw/subw. */
18303 newval
^= 0x00a00000;
18306 newimm
= (unsigned int) FAIL
;
18311 if (newimm
== (unsigned int)FAIL
)
18313 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18314 _("invalid constant (%lx) after fixup"),
18315 (unsigned long) value
);
18319 newval
|= (newimm
& 0x800) << 15;
18320 newval
|= (newimm
& 0x700) << 4;
18321 newval
|= (newimm
& 0x0ff);
18323 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
18324 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
18327 case BFD_RELOC_ARM_SMC
:
18328 if (((unsigned long) value
) > 0xffff)
18329 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18330 _("invalid smc expression"));
18331 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18332 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
18333 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18336 case BFD_RELOC_ARM_SWI
:
18337 if (fixP
->tc_fix_data
!= 0)
18339 if (((unsigned long) value
) > 0xff)
18340 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18341 _("invalid swi expression"));
18342 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18344 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18348 if (((unsigned long) value
) > 0x00ffffff)
18349 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18350 _("invalid swi expression"));
18351 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18353 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18357 case BFD_RELOC_ARM_MULTI
:
18358 if (((unsigned long) value
) > 0xffff)
18359 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18360 _("invalid expression in load/store multiple"));
18361 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
18362 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18366 case BFD_RELOC_ARM_PCREL_CALL
:
18367 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18368 if ((newval
& 0xf0000000) == 0xf0000000)
18372 goto arm_branch_common
;
18374 case BFD_RELOC_ARM_PCREL_JUMP
:
18375 case BFD_RELOC_ARM_PLT32
:
18377 case BFD_RELOC_ARM_PCREL_BRANCH
:
18379 goto arm_branch_common
;
18381 case BFD_RELOC_ARM_PCREL_BLX
:
18384 /* We are going to store value (shifted right by two) in the
18385 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18386 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18387 also be be clear. */
18389 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18390 _("misaligned branch destination"));
18391 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
18392 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
18393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18394 _("branch out of range"));
18396 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18398 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18399 newval
|= (value
>> 2) & 0x00ffffff;
18400 /* Set the H bit on BLX instructions. */
18404 newval
|= 0x01000000;
18406 newval
&= ~0x01000000;
18408 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18412 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
18413 /* CBZ can only branch forward. */
18415 /* Attempts to use CBZ to branch to the next instruction
18416 (which, strictly speaking, are prohibited) will be turned into
18419 FIXME: It may be better to remove the instruction completely and
18420 perform relaxation. */
18423 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18424 newval
= 0xbf00; /* NOP encoding T1 */
18425 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18430 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18431 _("branch out of range"));
18433 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18435 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18436 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
18437 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18442 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
18443 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
18444 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18445 _("branch out of range"));
18447 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18449 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18450 newval
|= (value
& 0x1ff) >> 1;
18451 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18455 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
18456 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
18457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18458 _("branch out of range"));
18460 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18462 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18463 newval
|= (value
& 0xfff) >> 1;
18464 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18468 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18469 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
18470 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18471 _("conditional branch out of range"));
18473 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18476 addressT S
, J1
, J2
, lo
, hi
;
18478 S
= (value
& 0x00100000) >> 20;
18479 J2
= (value
& 0x00080000) >> 19;
18480 J1
= (value
& 0x00040000) >> 18;
18481 hi
= (value
& 0x0003f000) >> 12;
18482 lo
= (value
& 0x00000ffe) >> 1;
18484 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18485 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18486 newval
|= (S
<< 10) | hi
;
18487 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
18488 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18489 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18493 case BFD_RELOC_THUMB_PCREL_BLX
:
18494 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18495 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
18496 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18497 _("branch out of range"));
18499 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
18500 /* For a BLX instruction, make sure that the relocation is rounded up
18501 to a word boundary. This follows the semantics of the instruction
18502 which specifies that bit 1 of the target address will come from bit
18503 1 of the base address. */
18504 value
= (value
+ 1) & ~ 1;
18506 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18510 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18511 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18512 newval
|= (value
& 0x7fffff) >> 12;
18513 newval2
|= (value
& 0xfff) >> 1;
18514 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18515 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18519 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18520 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
18521 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18522 _("branch out of range"));
18524 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18527 addressT S
, I1
, I2
, lo
, hi
;
18529 S
= (value
& 0x01000000) >> 24;
18530 I1
= (value
& 0x00800000) >> 23;
18531 I2
= (value
& 0x00400000) >> 22;
18532 hi
= (value
& 0x003ff000) >> 12;
18533 lo
= (value
& 0x00000ffe) >> 1;
18538 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18539 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18540 newval
|= (S
<< 10) | hi
;
18541 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
18542 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18543 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18548 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18549 md_number_to_chars (buf
, value
, 1);
18553 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18554 md_number_to_chars (buf
, value
, 2);
18558 case BFD_RELOC_ARM_TLS_GD32
:
18559 case BFD_RELOC_ARM_TLS_LE32
:
18560 case BFD_RELOC_ARM_TLS_IE32
:
18561 case BFD_RELOC_ARM_TLS_LDM32
:
18562 case BFD_RELOC_ARM_TLS_LDO32
:
18563 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
18566 case BFD_RELOC_ARM_GOT32
:
18567 case BFD_RELOC_ARM_GOTOFF
:
18568 case BFD_RELOC_ARM_TARGET2
:
18569 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18570 md_number_to_chars (buf
, 0, 4);
18574 case BFD_RELOC_RVA
:
18576 case BFD_RELOC_ARM_TARGET1
:
18577 case BFD_RELOC_ARM_ROSEGREL32
:
18578 case BFD_RELOC_ARM_SBREL32
:
18579 case BFD_RELOC_32_PCREL
:
18581 case BFD_RELOC_32_SECREL
:
18583 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18585 /* For WinCE we only do this for pcrel fixups. */
18586 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
18588 md_number_to_chars (buf
, value
, 4);
18592 case BFD_RELOC_ARM_PREL31
:
18593 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18595 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
18596 if ((value
^ (value
>> 1)) & 0x40000000)
18598 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18599 _("rel31 relocation overflow"));
18601 newval
|= value
& 0x7fffffff;
18602 md_number_to_chars (buf
, newval
, 4);
18607 case BFD_RELOC_ARM_CP_OFF_IMM
:
18608 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
18609 if (value
< -1023 || value
> 1023 || (value
& 3))
18610 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18611 _("co-processor offset out of range"));
18616 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18617 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18618 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18620 newval
= get_thumb32_insn (buf
);
18621 newval
&= 0xff7fff00;
18622 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
18623 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18624 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18625 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18627 put_thumb32_insn (buf
, newval
);
18630 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
18631 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
18632 if (value
< -255 || value
> 255)
18633 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18634 _("co-processor offset out of range"));
18636 goto cp_off_common
;
18638 case BFD_RELOC_ARM_THUMB_OFFSET
:
18639 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18640 /* Exactly what ranges, and where the offset is inserted depends
18641 on the type of instruction, we can establish this from the
18643 switch (newval
>> 12)
18645 case 4: /* PC load. */
18646 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18647 forced to zero for these loads; md_pcrel_from has already
18648 compensated for this. */
18650 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18651 _("invalid offset, target not word aligned (0x%08lX)"),
18652 (((unsigned long) fixP
->fx_frag
->fr_address
18653 + (unsigned long) fixP
->fx_where
) & ~3)
18654 + (unsigned long) value
);
18656 if (value
& ~0x3fc)
18657 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18658 _("invalid offset, value too big (0x%08lX)"),
18661 newval
|= value
>> 2;
18664 case 9: /* SP load/store. */
18665 if (value
& ~0x3fc)
18666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18667 _("invalid offset, value too big (0x%08lX)"),
18669 newval
|= value
>> 2;
18672 case 6: /* Word load/store. */
18674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18675 _("invalid offset, value too big (0x%08lX)"),
18677 newval
|= value
<< 4; /* 6 - 2. */
18680 case 7: /* Byte load/store. */
18682 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18683 _("invalid offset, value too big (0x%08lX)"),
18685 newval
|= value
<< 6;
18688 case 8: /* Halfword load/store. */
18690 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18691 _("invalid offset, value too big (0x%08lX)"),
18693 newval
|= value
<< 5; /* 6 - 1. */
18697 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18698 "Unable to process relocation for thumb opcode: %lx",
18699 (unsigned long) newval
);
18702 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18705 case BFD_RELOC_ARM_THUMB_ADD
:
18706 /* This is a complicated relocation, since we use it for all of
18707 the following immediate relocations:
18711 9bit ADD/SUB SP word-aligned
18712 10bit ADD PC/SP word-aligned
18714 The type of instruction being processed is encoded in the
18721 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18723 int rd
= (newval
>> 4) & 0xf;
18724 int rs
= newval
& 0xf;
18725 int subtract
= !!(newval
& 0x8000);
18727 /* Check for HI regs, only very restricted cases allowed:
18728 Adjusting SP, and using PC or SP to get an address. */
18729 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
18730 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
18731 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18732 _("invalid Hi register with immediate"));
18734 /* If value is negative, choose the opposite instruction. */
18738 subtract
= !subtract
;
18740 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18741 _("immediate value out of range"));
18746 if (value
& ~0x1fc)
18747 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18748 _("invalid immediate for stack address calculation"));
18749 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
18750 newval
|= value
>> 2;
18752 else if (rs
== REG_PC
|| rs
== REG_SP
)
18754 if (subtract
|| value
& ~0x3fc)
18755 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18756 _("invalid immediate for address calculation (value = 0x%08lX)"),
18757 (unsigned long) value
);
18758 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
18760 newval
|= value
>> 2;
18765 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18766 _("immediate value out of range"));
18767 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
18768 newval
|= (rd
<< 8) | value
;
18773 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18774 _("immediate value out of range"));
18775 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
18776 newval
|= rd
| (rs
<< 3) | (value
<< 6);
18779 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18782 case BFD_RELOC_ARM_THUMB_IMM
:
18783 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18784 if (value
< 0 || value
> 255)
18785 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18786 _("invalid immediate: %ld is out of range"),
18789 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18792 case BFD_RELOC_ARM_THUMB_SHIFT
:
18793 /* 5bit shift value (0..32). LSL cannot take 32. */
18794 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
18795 temp
= newval
& 0xf800;
18796 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
18797 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18798 _("invalid shift value: %ld"), (long) value
);
18799 /* Shifts of zero must be encoded as LSL. */
18801 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
18802 /* Shifts of 32 are encoded as zero. */
18803 else if (value
== 32)
18805 newval
|= value
<< 6;
18806 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18809 case BFD_RELOC_VTABLE_INHERIT
:
18810 case BFD_RELOC_VTABLE_ENTRY
:
18814 case BFD_RELOC_ARM_MOVW
:
18815 case BFD_RELOC_ARM_MOVT
:
18816 case BFD_RELOC_ARM_THUMB_MOVW
:
18817 case BFD_RELOC_ARM_THUMB_MOVT
:
18818 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18820 /* REL format relocations are limited to a 16-bit addend. */
18821 if (!fixP
->fx_done
)
18823 if (value
< -0x8000 || value
> 0x7fff)
18824 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18825 _("offset out of range"));
18827 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
18828 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18833 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
18834 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18836 newval
= get_thumb32_insn (buf
);
18837 newval
&= 0xfbf08f00;
18838 newval
|= (value
& 0xf000) << 4;
18839 newval
|= (value
& 0x0800) << 15;
18840 newval
|= (value
& 0x0700) << 4;
18841 newval
|= (value
& 0x00ff);
18842 put_thumb32_insn (buf
, newval
);
18846 newval
= md_chars_to_number (buf
, 4);
18847 newval
&= 0xfff0f000;
18848 newval
|= value
& 0x0fff;
18849 newval
|= (value
& 0xf000) << 4;
18850 md_number_to_chars (buf
, newval
, 4);
18855 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18856 case BFD_RELOC_ARM_ALU_PC_G0
:
18857 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18858 case BFD_RELOC_ARM_ALU_PC_G1
:
18859 case BFD_RELOC_ARM_ALU_PC_G2
:
18860 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18861 case BFD_RELOC_ARM_ALU_SB_G0
:
18862 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18863 case BFD_RELOC_ARM_ALU_SB_G1
:
18864 case BFD_RELOC_ARM_ALU_SB_G2
:
18865 assert (!fixP
->fx_done
);
18866 if (!seg
->use_rela_p
)
18869 bfd_vma encoded_addend
;
18870 bfd_vma addend_abs
= abs (value
);
18872 /* Check that the absolute value of the addend can be
18873 expressed as an 8-bit constant plus a rotation. */
18874 encoded_addend
= encode_arm_immediate (addend_abs
);
18875 if (encoded_addend
== (unsigned int) FAIL
)
18876 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18877 _("the offset 0x%08lX is not representable"),
18878 (unsigned long) addend_abs
);
18880 /* Extract the instruction. */
18881 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18883 /* If the addend is positive, use an ADD instruction.
18884 Otherwise use a SUB. Take care not to destroy the S bit. */
18885 insn
&= 0xff1fffff;
18891 /* Place the encoded addend into the first 12 bits of the
18893 insn
&= 0xfffff000;
18894 insn
|= encoded_addend
;
18896 /* Update the instruction. */
18897 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18901 case BFD_RELOC_ARM_LDR_PC_G0
:
18902 case BFD_RELOC_ARM_LDR_PC_G1
:
18903 case BFD_RELOC_ARM_LDR_PC_G2
:
18904 case BFD_RELOC_ARM_LDR_SB_G0
:
18905 case BFD_RELOC_ARM_LDR_SB_G1
:
18906 case BFD_RELOC_ARM_LDR_SB_G2
:
18907 assert (!fixP
->fx_done
);
18908 if (!seg
->use_rela_p
)
18911 bfd_vma addend_abs
= abs (value
);
18913 /* Check that the absolute value of the addend can be
18914 encoded in 12 bits. */
18915 if (addend_abs
>= 0x1000)
18916 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18917 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18918 (unsigned long) addend_abs
);
18920 /* Extract the instruction. */
18921 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18923 /* If the addend is negative, clear bit 23 of the instruction.
18924 Otherwise set it. */
18926 insn
&= ~(1 << 23);
18930 /* Place the absolute value of the addend into the first 12 bits
18931 of the instruction. */
18932 insn
&= 0xfffff000;
18933 insn
|= addend_abs
;
18935 /* Update the instruction. */
18936 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18940 case BFD_RELOC_ARM_LDRS_PC_G0
:
18941 case BFD_RELOC_ARM_LDRS_PC_G1
:
18942 case BFD_RELOC_ARM_LDRS_PC_G2
:
18943 case BFD_RELOC_ARM_LDRS_SB_G0
:
18944 case BFD_RELOC_ARM_LDRS_SB_G1
:
18945 case BFD_RELOC_ARM_LDRS_SB_G2
:
18946 assert (!fixP
->fx_done
);
18947 if (!seg
->use_rela_p
)
18950 bfd_vma addend_abs
= abs (value
);
18952 /* Check that the absolute value of the addend can be
18953 encoded in 8 bits. */
18954 if (addend_abs
>= 0x100)
18955 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18956 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18957 (unsigned long) addend_abs
);
18959 /* Extract the instruction. */
18960 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18962 /* If the addend is negative, clear bit 23 of the instruction.
18963 Otherwise set it. */
18965 insn
&= ~(1 << 23);
18969 /* Place the first four bits of the absolute value of the addend
18970 into the first 4 bits of the instruction, and the remaining
18971 four into bits 8 .. 11. */
18972 insn
&= 0xfffff0f0;
18973 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
18975 /* Update the instruction. */
18976 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18980 case BFD_RELOC_ARM_LDC_PC_G0
:
18981 case BFD_RELOC_ARM_LDC_PC_G1
:
18982 case BFD_RELOC_ARM_LDC_PC_G2
:
18983 case BFD_RELOC_ARM_LDC_SB_G0
:
18984 case BFD_RELOC_ARM_LDC_SB_G1
:
18985 case BFD_RELOC_ARM_LDC_SB_G2
:
18986 assert (!fixP
->fx_done
);
18987 if (!seg
->use_rela_p
)
18990 bfd_vma addend_abs
= abs (value
);
18992 /* Check that the absolute value of the addend is a multiple of
18993 four and, when divided by four, fits in 8 bits. */
18994 if (addend_abs
& 0x3)
18995 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18996 _("bad offset 0x%08lX (must be word-aligned)"),
18997 (unsigned long) addend_abs
);
18999 if ((addend_abs
>> 2) > 0xff)
19000 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19001 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
19002 (unsigned long) addend_abs
);
19004 /* Extract the instruction. */
19005 insn
= md_chars_to_number (buf
, INSN_SIZE
);
19007 /* If the addend is negative, clear bit 23 of the instruction.
19008 Otherwise set it. */
19010 insn
&= ~(1 << 23);
19014 /* Place the addend (divided by four) into the first eight
19015 bits of the instruction. */
19016 insn
&= 0xfffffff0;
19017 insn
|= addend_abs
>> 2;
19019 /* Update the instruction. */
19020 md_number_to_chars (buf
, insn
, INSN_SIZE
);
19024 case BFD_RELOC_ARM_V4BX
:
19025 /* This will need to go in the object file. */
19029 case BFD_RELOC_UNUSED
:
19031 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19032 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
19036 /* Translate internal representation of relocation info to BFD target
19040 tc_gen_reloc (asection
*section
, fixS
*fixp
)
19043 bfd_reloc_code_real_type code
;
19045 reloc
= xmalloc (sizeof (arelent
));
19047 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
19048 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
19049 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
19051 if (fixp
->fx_pcrel
)
19053 if (section
->use_rela_p
)
19054 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
19056 fixp
->fx_offset
= reloc
->address
;
19058 reloc
->addend
= fixp
->fx_offset
;
19060 switch (fixp
->fx_r_type
)
19063 if (fixp
->fx_pcrel
)
19065 code
= BFD_RELOC_8_PCREL
;
19070 if (fixp
->fx_pcrel
)
19072 code
= BFD_RELOC_16_PCREL
;
19077 if (fixp
->fx_pcrel
)
19079 code
= BFD_RELOC_32_PCREL
;
19083 case BFD_RELOC_ARM_MOVW
:
19084 if (fixp
->fx_pcrel
)
19086 code
= BFD_RELOC_ARM_MOVW_PCREL
;
19090 case BFD_RELOC_ARM_MOVT
:
19091 if (fixp
->fx_pcrel
)
19093 code
= BFD_RELOC_ARM_MOVT_PCREL
;
19097 case BFD_RELOC_ARM_THUMB_MOVW
:
19098 if (fixp
->fx_pcrel
)
19100 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
19104 case BFD_RELOC_ARM_THUMB_MOVT
:
19105 if (fixp
->fx_pcrel
)
19107 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
19111 case BFD_RELOC_NONE
:
19112 case BFD_RELOC_ARM_PCREL_BRANCH
:
19113 case BFD_RELOC_ARM_PCREL_BLX
:
19114 case BFD_RELOC_RVA
:
19115 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19116 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19117 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19118 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19119 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19120 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19121 case BFD_RELOC_THUMB_PCREL_BLX
:
19122 case BFD_RELOC_VTABLE_ENTRY
:
19123 case BFD_RELOC_VTABLE_INHERIT
:
19125 case BFD_RELOC_32_SECREL
:
19127 code
= fixp
->fx_r_type
;
19130 case BFD_RELOC_ARM_LITERAL
:
19131 case BFD_RELOC_ARM_HWLITERAL
:
19132 /* If this is called then the a literal has
19133 been referenced across a section boundary. */
19134 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19135 _("literal referenced across section boundary"));
19139 case BFD_RELOC_ARM_GOT32
:
19140 case BFD_RELOC_ARM_GOTOFF
:
19141 case BFD_RELOC_ARM_PLT32
:
19142 case BFD_RELOC_ARM_TARGET1
:
19143 case BFD_RELOC_ARM_ROSEGREL32
:
19144 case BFD_RELOC_ARM_SBREL32
:
19145 case BFD_RELOC_ARM_PREL31
:
19146 case BFD_RELOC_ARM_TARGET2
:
19147 case BFD_RELOC_ARM_TLS_LE32
:
19148 case BFD_RELOC_ARM_TLS_LDO32
:
19149 case BFD_RELOC_ARM_PCREL_CALL
:
19150 case BFD_RELOC_ARM_PCREL_JUMP
:
19151 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
19152 case BFD_RELOC_ARM_ALU_PC_G0
:
19153 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
19154 case BFD_RELOC_ARM_ALU_PC_G1
:
19155 case BFD_RELOC_ARM_ALU_PC_G2
:
19156 case BFD_RELOC_ARM_LDR_PC_G0
:
19157 case BFD_RELOC_ARM_LDR_PC_G1
:
19158 case BFD_RELOC_ARM_LDR_PC_G2
:
19159 case BFD_RELOC_ARM_LDRS_PC_G0
:
19160 case BFD_RELOC_ARM_LDRS_PC_G1
:
19161 case BFD_RELOC_ARM_LDRS_PC_G2
:
19162 case BFD_RELOC_ARM_LDC_PC_G0
:
19163 case BFD_RELOC_ARM_LDC_PC_G1
:
19164 case BFD_RELOC_ARM_LDC_PC_G2
:
19165 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
19166 case BFD_RELOC_ARM_ALU_SB_G0
:
19167 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
19168 case BFD_RELOC_ARM_ALU_SB_G1
:
19169 case BFD_RELOC_ARM_ALU_SB_G2
:
19170 case BFD_RELOC_ARM_LDR_SB_G0
:
19171 case BFD_RELOC_ARM_LDR_SB_G1
:
19172 case BFD_RELOC_ARM_LDR_SB_G2
:
19173 case BFD_RELOC_ARM_LDRS_SB_G0
:
19174 case BFD_RELOC_ARM_LDRS_SB_G1
:
19175 case BFD_RELOC_ARM_LDRS_SB_G2
:
19176 case BFD_RELOC_ARM_LDC_SB_G0
:
19177 case BFD_RELOC_ARM_LDC_SB_G1
:
19178 case BFD_RELOC_ARM_LDC_SB_G2
:
19179 case BFD_RELOC_ARM_V4BX
:
19180 code
= fixp
->fx_r_type
;
19183 case BFD_RELOC_ARM_TLS_GD32
:
19184 case BFD_RELOC_ARM_TLS_IE32
:
19185 case BFD_RELOC_ARM_TLS_LDM32
:
19186 /* BFD will include the symbol's address in the addend.
19187 But we don't want that, so subtract it out again here. */
19188 if (!S_IS_COMMON (fixp
->fx_addsy
))
19189 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
19190 code
= fixp
->fx_r_type
;
19194 case BFD_RELOC_ARM_IMMEDIATE
:
19195 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19196 _("internal relocation (type: IMMEDIATE) not fixed up"));
19199 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19200 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19201 _("ADRL used for a symbol not defined in the same file"));
19204 case BFD_RELOC_ARM_OFFSET_IMM
:
19205 if (section
->use_rela_p
)
19207 code
= fixp
->fx_r_type
;
19211 if (fixp
->fx_addsy
!= NULL
19212 && !S_IS_DEFINED (fixp
->fx_addsy
)
19213 && S_IS_LOCAL (fixp
->fx_addsy
))
19215 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19216 _("undefined local label `%s'"),
19217 S_GET_NAME (fixp
->fx_addsy
));
19221 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19222 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19229 switch (fixp
->fx_r_type
)
19231 case BFD_RELOC_NONE
: type
= "NONE"; break;
19232 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
19233 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
19234 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
19235 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
19236 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
19237 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
19238 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
19239 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
19240 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
19241 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
19242 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
19243 default: type
= _("<unknown>"); break;
19245 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19246 _("cannot represent %s relocation in this object file format"),
19253 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
19255 && fixp
->fx_addsy
== GOT_symbol
)
19257 code
= BFD_RELOC_ARM_GOTPC
;
19258 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
19262 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
19264 if (reloc
->howto
== NULL
)
19266 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19267 _("cannot represent %s relocation in this object file format"),
19268 bfd_get_reloc_code_name (code
));
19272 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19273 vtable entry to be used in the relocation's section offset. */
19274 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19275 reloc
->address
= fixp
->fx_offset
;
19280 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19283 cons_fix_new_arm (fragS
* frag
,
19288 bfd_reloc_code_real_type type
;
19292 FIXME: @@ Should look at CPU word size. */
19296 type
= BFD_RELOC_8
;
19299 type
= BFD_RELOC_16
;
19303 type
= BFD_RELOC_32
;
19306 type
= BFD_RELOC_64
;
19311 if (exp
->X_op
== O_secrel
)
19313 exp
->X_op
= O_symbol
;
19314 type
= BFD_RELOC_32_SECREL
;
19318 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
19321 #if defined OBJ_COFF || defined OBJ_ELF
19323 arm_validate_fix (fixS
* fixP
)
19325 /* If the destination of the branch is a defined symbol which does not have
19326 the THUMB_FUNC attribute, then we must be calling a function which has
19327 the (interfacearm) attribute. We look for the Thumb entry point to that
19328 function and change the branch to refer to that function instead. */
19329 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
19330 && fixP
->fx_addsy
!= NULL
19331 && S_IS_DEFINED (fixP
->fx_addsy
)
19332 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
19334 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
19340 arm_force_relocation (struct fix
* fixp
)
19342 #if defined (OBJ_COFF) && defined (TE_PE)
19343 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
19347 /* Resolve these relocations even if the symbol is extern or weak. */
19348 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
19349 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
19350 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
19351 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
19352 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
19353 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
19354 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
19357 /* Always leave these relocations for the linker. */
19358 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19359 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19360 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19363 /* Always generate relocations against function symbols. */
19364 if (fixp
->fx_r_type
== BFD_RELOC_32
19366 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
19369 return generic_force_reloc (fixp
);
19372 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19373 /* Relocations against function names must be left unadjusted,
19374 so that the linker can use this information to generate interworking
19375 stubs. The MIPS version of this function
19376 also prevents relocations that are mips-16 specific, but I do not
19377 know why it does this.
19380 There is one other problem that ought to be addressed here, but
19381 which currently is not: Taking the address of a label (rather
19382 than a function) and then later jumping to that address. Such
19383 addresses also ought to have their bottom bit set (assuming that
19384 they reside in Thumb code), but at the moment they will not. */
19387 arm_fix_adjustable (fixS
* fixP
)
19389 if (fixP
->fx_addsy
== NULL
)
19392 /* Preserve relocations against symbols with function type. */
19393 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
19396 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
19397 && fixP
->fx_subsy
== NULL
)
19400 /* We need the symbol name for the VTABLE entries. */
19401 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
19402 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19405 /* Don't allow symbols to be discarded on GOT related relocs. */
19406 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
19407 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
19408 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
19409 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
19410 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
19411 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
19412 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
19413 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
19414 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
19417 /* Similarly for group relocations. */
19418 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19419 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19420 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19423 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
19424 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
19425 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
19426 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
19427 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
19428 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
19429 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
19430 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
19431 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
19436 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19441 elf32_arm_target_format (void)
19444 return (target_big_endian
19445 ? "elf32-bigarm-symbian"
19446 : "elf32-littlearm-symbian");
19447 #elif defined (TE_VXWORKS)
19448 return (target_big_endian
19449 ? "elf32-bigarm-vxworks"
19450 : "elf32-littlearm-vxworks");
19452 if (target_big_endian
)
19453 return "elf32-bigarm";
19455 return "elf32-littlearm";
19460 armelf_frob_symbol (symbolS
* symp
,
19463 elf_frob_symbol (symp
, puntp
);
19467 /* MD interface: Finalization. */
19469 /* A good place to do this, although this was probably not intended
19470 for this kind of use. We need to dump the literal pool before
19471 references are made to a null symbol pointer. */
19476 literal_pool
* pool
;
19478 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
19480 /* Put it at the end of the relevant section. */
19481 subseg_set (pool
->section
, pool
->sub_section
);
19483 arm_elf_change_section ();
19489 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19493 arm_adjust_symtab (void)
19498 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19500 if (ARM_IS_THUMB (sym
))
19502 if (THUMB_IS_FUNC (sym
))
19504 /* Mark the symbol as a Thumb function. */
19505 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
19506 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
19507 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
19509 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
19510 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
19512 as_bad (_("%s: unexpected function type: %d"),
19513 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
19515 else switch (S_GET_STORAGE_CLASS (sym
))
19518 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
19521 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
19524 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
19532 if (ARM_IS_INTERWORK (sym
))
19533 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
19540 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19542 if (ARM_IS_THUMB (sym
))
19544 elf_symbol_type
* elf_sym
;
19546 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
19547 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
19549 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
19550 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
19552 /* If it's a .thumb_func, declare it as so,
19553 otherwise tag label as .code 16. */
19554 if (THUMB_IS_FUNC (sym
))
19555 elf_sym
->internal_elf_sym
.st_info
=
19556 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
19557 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
19558 elf_sym
->internal_elf_sym
.st_info
=
19559 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
19566 /* MD interface: Initialization. */
19569 set_constant_flonums (void)
19573 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
19574 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
19578 /* Auto-select Thumb mode if it's the only available instruction set for the
19579 given architecture. */
19582 autoselect_thumb_from_cpu_variant (void)
19584 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19585 opcode_select (16);
19594 if ( (arm_ops_hsh
= hash_new ()) == NULL
19595 || (arm_cond_hsh
= hash_new ()) == NULL
19596 || (arm_shift_hsh
= hash_new ()) == NULL
19597 || (arm_psr_hsh
= hash_new ()) == NULL
19598 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
19599 || (arm_reg_hsh
= hash_new ()) == NULL
19600 || (arm_reloc_hsh
= hash_new ()) == NULL
19601 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
19602 as_fatal (_("virtual memory exhausted"));
19604 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
19605 hash_insert (arm_ops_hsh
, insns
[i
].template, (void *) (insns
+ i
));
19606 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
19607 hash_insert (arm_cond_hsh
, conds
[i
].template, (void *) (conds
+ i
));
19608 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
19609 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
19610 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
19611 hash_insert (arm_psr_hsh
, psrs
[i
].template, (void *) (psrs
+ i
));
19612 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
19613 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (void *) (v7m_psrs
+ i
));
19614 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
19615 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
19617 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
19619 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
19620 (void *) (barrier_opt_names
+ i
));
19622 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
19623 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
19626 set_constant_flonums ();
19628 /* Set the cpu variant based on the command-line options. We prefer
19629 -mcpu= over -march= if both are set (as for GCC); and we prefer
19630 -mfpu= over any other way of setting the floating point unit.
19631 Use of legacy options with new options are faulted. */
19634 if (mcpu_cpu_opt
|| march_cpu_opt
)
19635 as_bad (_("use of old and new-style options to set CPU type"));
19637 mcpu_cpu_opt
= legacy_cpu
;
19639 else if (!mcpu_cpu_opt
)
19640 mcpu_cpu_opt
= march_cpu_opt
;
19645 as_bad (_("use of old and new-style options to set FPU type"));
19647 mfpu_opt
= legacy_fpu
;
19649 else if (!mfpu_opt
)
19651 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19652 /* Some environments specify a default FPU. If they don't, infer it
19653 from the processor. */
19655 mfpu_opt
= mcpu_fpu_opt
;
19657 mfpu_opt
= march_fpu_opt
;
19659 mfpu_opt
= &fpu_default
;
19665 if (mcpu_cpu_opt
!= NULL
)
19666 mfpu_opt
= &fpu_default
;
19667 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
19668 mfpu_opt
= &fpu_arch_vfp_v2
;
19670 mfpu_opt
= &fpu_arch_fpa
;
19676 mcpu_cpu_opt
= &cpu_default
;
19677 selected_cpu
= cpu_default
;
19681 selected_cpu
= *mcpu_cpu_opt
;
19683 mcpu_cpu_opt
= &arm_arch_any
;
19686 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19688 autoselect_thumb_from_cpu_variant ();
19690 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
19692 #if defined OBJ_COFF || defined OBJ_ELF
19694 unsigned int flags
= 0;
19696 #if defined OBJ_ELF
19697 flags
= meabi_flags
;
19699 switch (meabi_flags
)
19701 case EF_ARM_EABI_UNKNOWN
:
19703 /* Set the flags in the private structure. */
19704 if (uses_apcs_26
) flags
|= F_APCS26
;
19705 if (support_interwork
) flags
|= F_INTERWORK
;
19706 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
19707 if (pic_code
) flags
|= F_PIC
;
19708 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
19709 flags
|= F_SOFT_FLOAT
;
19711 switch (mfloat_abi_opt
)
19713 case ARM_FLOAT_ABI_SOFT
:
19714 case ARM_FLOAT_ABI_SOFTFP
:
19715 flags
|= F_SOFT_FLOAT
;
19718 case ARM_FLOAT_ABI_HARD
:
19719 if (flags
& F_SOFT_FLOAT
)
19720 as_bad (_("hard-float conflicts with specified fpu"));
19724 /* Using pure-endian doubles (even if soft-float). */
19725 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
19726 flags
|= F_VFP_FLOAT
;
19728 #if defined OBJ_ELF
19729 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
19730 flags
|= EF_ARM_MAVERICK_FLOAT
;
19733 case EF_ARM_EABI_VER4
:
19734 case EF_ARM_EABI_VER5
:
19735 /* No additional flags to set. */
19742 bfd_set_private_flags (stdoutput
, flags
);
19744 /* We have run out flags in the COFF header to encode the
19745 status of ATPCS support, so instead we create a dummy,
19746 empty, debug section called .arm.atpcs. */
19751 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
19755 bfd_set_section_flags
19756 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
19757 bfd_set_section_size (stdoutput
, sec
, 0);
19758 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
19764 /* Record the CPU type as well. */
19765 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
19766 mach
= bfd_mach_arm_iWMMXt2
;
19767 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
19768 mach
= bfd_mach_arm_iWMMXt
;
19769 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
19770 mach
= bfd_mach_arm_XScale
;
19771 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
19772 mach
= bfd_mach_arm_ep9312
;
19773 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
19774 mach
= bfd_mach_arm_5TE
;
19775 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
19777 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19778 mach
= bfd_mach_arm_5T
;
19780 mach
= bfd_mach_arm_5
;
19782 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
19784 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19785 mach
= bfd_mach_arm_4T
;
19787 mach
= bfd_mach_arm_4
;
19789 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
19790 mach
= bfd_mach_arm_3M
;
19791 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
19792 mach
= bfd_mach_arm_3
;
19793 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
19794 mach
= bfd_mach_arm_2a
;
19795 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
19796 mach
= bfd_mach_arm_2
;
19798 mach
= bfd_mach_arm_unknown
;
19800 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
19803 /* Command line processing. */
19806 Invocation line includes a switch not recognized by the base assembler.
19807 See if it's a processor-specific option.
19809 This routine is somewhat complicated by the need for backwards
19810 compatibility (since older releases of gcc can't be changed).
19811 The new options try to make the interface as compatible as
19814 New options (supported) are:
19816 -mcpu=<cpu name> Assemble for selected processor
19817 -march=<architecture name> Assemble for selected architecture
19818 -mfpu=<fpu architecture> Assemble for selected FPU.
19819 -EB/-mbig-endian Big-endian
19820 -EL/-mlittle-endian Little-endian
19821 -k Generate PIC code
19822 -mthumb Start in Thumb mode
19823 -mthumb-interwork Code supports ARM/Thumb interworking
19825 For now we will also provide support for:
19827 -mapcs-32 32-bit Program counter
19828 -mapcs-26 26-bit Program counter
19829 -macps-float Floats passed in FP registers
19830 -mapcs-reentrant Reentrant code
19832 (sometime these will probably be replaced with -mapcs=<list of options>
19833 and -matpcs=<list of options>)
19835 The remaining options are only supported for back-wards compatibility.
19836 Cpu variants, the arm part is optional:
19837 -m[arm]1 Currently not supported.
19838 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19839 -m[arm]3 Arm 3 processor
19840 -m[arm]6[xx], Arm 6 processors
19841 -m[arm]7[xx][t][[d]m] Arm 7 processors
19842 -m[arm]8[10] Arm 8 processors
19843 -m[arm]9[20][tdmi] Arm 9 processors
19844 -mstrongarm[110[0]] StrongARM processors
19845 -mxscale XScale processors
19846 -m[arm]v[2345[t[e]]] Arm architectures
19847 -mall All (except the ARM1)
19849 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19850 -mfpe-old (No float load/store multiples)
19851 -mvfpxd VFP Single precision
19853 -mno-fpu Disable all floating point instructions
19855 The following CPU names are recognized:
19856 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19857 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19858 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19859 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19860 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19861 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19862 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19866 const char * md_shortopts
= "m:k";
19868 #ifdef ARM_BI_ENDIAN
19869 #define OPTION_EB (OPTION_MD_BASE + 0)
19870 #define OPTION_EL (OPTION_MD_BASE + 1)
19872 #if TARGET_BYTES_BIG_ENDIAN
19873 #define OPTION_EB (OPTION_MD_BASE + 0)
19875 #define OPTION_EL (OPTION_MD_BASE + 1)
19878 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
19880 struct option md_longopts
[] =
19883 {"EB", no_argument
, NULL
, OPTION_EB
},
19886 {"EL", no_argument
, NULL
, OPTION_EL
},
19888 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
19889 {NULL
, no_argument
, NULL
, 0}
19892 size_t md_longopts_size
= sizeof (md_longopts
);
19894 struct arm_option_table
19896 char *option
; /* Option name to match. */
19897 char *help
; /* Help information. */
19898 int *var
; /* Variable to change. */
19899 int value
; /* What to change it to. */
19900 char *deprecated
; /* If non-null, print this message. */
19903 struct arm_option_table arm_opts
[] =
19905 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
19906 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
19907 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19908 &support_interwork
, 1, NULL
},
19909 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
19910 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
19911 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
19913 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
19914 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
19915 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
19916 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
19919 /* These are recognized by the assembler, but have no affect on code. */
19920 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
19921 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
19922 {NULL
, NULL
, NULL
, 0, NULL
}
19925 struct arm_legacy_option_table
19927 char *option
; /* Option name to match. */
19928 const arm_feature_set
**var
; /* Variable to change. */
19929 const arm_feature_set value
; /* What to change it to. */
19930 char *deprecated
; /* If non-null, print this message. */
19933 const struct arm_legacy_option_table arm_legacy_opts
[] =
19935 /* DON'T add any new processors to this list -- we want the whole list
19936 to go away... Add them to the processors table instead. */
19937 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19938 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19939 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19940 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19941 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19942 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19943 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19944 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19945 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19946 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19947 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19948 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19949 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19950 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19951 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19952 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19953 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19954 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19955 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19956 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19957 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19958 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19959 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19960 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19961 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19962 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19963 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19964 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19965 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19966 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19967 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19968 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19969 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19970 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19971 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19972 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19973 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19974 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19975 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19976 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19977 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19978 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19979 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19980 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19981 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19982 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19983 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19984 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19985 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19986 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19987 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19988 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19989 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19990 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19991 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19992 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19993 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19994 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19995 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19996 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19997 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19998 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19999 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
20000 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
20001 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
20002 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
20003 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
20004 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
20005 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
20006 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
20007 N_("use -mcpu=strongarm110")},
20008 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
20009 N_("use -mcpu=strongarm1100")},
20010 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
20011 N_("use -mcpu=strongarm1110")},
20012 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
20013 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
20014 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
20016 /* Architecture variants -- don't add any more to this list either. */
20017 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
20018 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
20019 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
20020 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
20021 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
20022 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
20023 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
20024 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
20025 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
20026 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
20027 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
20028 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
20029 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
20030 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
20031 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
20032 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
20033 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
20034 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
20036 /* Floating point variants -- don't add any more to this list either. */
20037 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
20038 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
20039 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
20040 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
20041 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
20043 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
20046 struct arm_cpu_option_table
20049 const arm_feature_set value
;
20050 /* For some CPUs we assume an FPU unless the user explicitly sets
20052 const arm_feature_set default_fpu
;
20053 /* The canonical name of the CPU, or NULL to use NAME converted to upper
20055 const char *canonical_name
;
20058 /* This list should, at a minimum, contain all the cpu names
20059 recognized by GCC. */
20060 static const struct arm_cpu_option_table arm_cpus
[] =
20062 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
20063 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
20064 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
20065 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
20066 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
20067 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20068 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20069 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20070 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20071 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20072 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20073 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20074 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20075 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20076 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20077 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20078 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20079 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20080 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20081 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20082 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20083 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20084 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20085 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20086 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20087 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20088 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20089 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20090 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20091 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20092 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20093 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20094 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20095 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20096 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20097 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20098 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20099 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20100 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20101 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
20102 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20103 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20104 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20105 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20106 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20107 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20108 /* For V5 or later processors we default to using VFP; but the user
20109 should really set the FPU type explicitly. */
20110 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20111 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20112 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20113 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20114 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20115 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20116 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
20117 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20118 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20119 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
20120 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20121 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20122 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20123 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20124 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20125 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
20126 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20127 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20128 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20129 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
20130 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20131 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
20132 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20133 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
20134 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
20135 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
20136 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
20137 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
20138 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
20139 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
20140 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
20141 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
20142 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
20143 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
20144 | FPU_NEON_EXT_V1
),
20146 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
20147 | FPU_NEON_EXT_V1
),
20149 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
20150 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
20151 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
20152 /* ??? XSCALE is really an architecture. */
20153 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20154 /* ??? iwmmxt is not a processor. */
20155 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
20156 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
20157 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20159 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
20160 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
20163 struct arm_arch_option_table
20166 const arm_feature_set value
;
20167 const arm_feature_set default_fpu
;
20170 /* This list should, at a minimum, contain all the architecture names
20171 recognized by GCC. */
20172 static const struct arm_arch_option_table arm_archs
[] =
20174 {"all", ARM_ANY
, FPU_ARCH_FPA
},
20175 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
20176 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
20177 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20178 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20179 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
20180 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
20181 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
20182 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
20183 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
20184 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
20185 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
20186 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
20187 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
20188 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
20189 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
20190 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
20191 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20192 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20193 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
20194 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
20195 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
20196 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
20197 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
20198 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
20199 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
20200 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
20201 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
20202 /* The official spelling of the ARMv7 profile variants is the dashed form.
20203 Accept the non-dashed form for compatibility with old toolchains. */
20204 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20205 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20206 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20207 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20208 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20209 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20210 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
20211 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
20212 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
20213 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
20216 /* ISA extensions in the co-processor space. */
20217 struct arm_option_cpu_value_table
20220 const arm_feature_set value
;
20223 static const struct arm_option_cpu_value_table arm_extensions
[] =
20225 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
20226 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
20227 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
20228 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
20229 {NULL
, ARM_ARCH_NONE
}
20232 /* This list should, at a minimum, contain all the fpu names
20233 recognized by GCC. */
20234 static const struct arm_option_cpu_value_table arm_fpus
[] =
20236 {"softfpa", FPU_NONE
},
20237 {"fpe", FPU_ARCH_FPE
},
20238 {"fpe2", FPU_ARCH_FPE
},
20239 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
20240 {"fpa", FPU_ARCH_FPA
},
20241 {"fpa10", FPU_ARCH_FPA
},
20242 {"fpa11", FPU_ARCH_FPA
},
20243 {"arm7500fe", FPU_ARCH_FPA
},
20244 {"softvfp", FPU_ARCH_VFP
},
20245 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
20246 {"vfp", FPU_ARCH_VFP_V2
},
20247 {"vfp9", FPU_ARCH_VFP_V2
},
20248 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
20249 {"vfp10", FPU_ARCH_VFP_V2
},
20250 {"vfp10-r0", FPU_ARCH_VFP_V1
},
20251 {"vfpxd", FPU_ARCH_VFP_V1xD
},
20252 {"vfpv2", FPU_ARCH_VFP_V2
},
20253 {"vfpv3", FPU_ARCH_VFP_V3
},
20254 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
20255 {"arm1020t", FPU_ARCH_VFP_V1
},
20256 {"arm1020e", FPU_ARCH_VFP_V2
},
20257 {"arm1136jfs", FPU_ARCH_VFP_V2
},
20258 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
20259 {"maverick", FPU_ARCH_MAVERICK
},
20260 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
20261 {NULL
, ARM_ARCH_NONE
}
20264 struct arm_option_value_table
20270 static const struct arm_option_value_table arm_float_abis
[] =
20272 {"hard", ARM_FLOAT_ABI_HARD
},
20273 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
20274 {"soft", ARM_FLOAT_ABI_SOFT
},
20279 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20280 static const struct arm_option_value_table arm_eabis
[] =
20282 {"gnu", EF_ARM_EABI_UNKNOWN
},
20283 {"4", EF_ARM_EABI_VER4
},
20284 {"5", EF_ARM_EABI_VER5
},
20289 struct arm_long_option_table
20291 char * option
; /* Substring to match. */
20292 char * help
; /* Help information. */
20293 int (* func
) (char * subopt
); /* Function to decode sub-option. */
20294 char * deprecated
; /* If non-null, print this message. */
20298 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
20300 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
20302 /* Copy the feature set, so that we can modify it. */
20303 *ext_set
= **opt_p
;
20306 while (str
!= NULL
&& *str
!= 0)
20308 const struct arm_option_cpu_value_table
* opt
;
20314 as_bad (_("invalid architectural extension"));
20319 ext
= strchr (str
, '+');
20322 optlen
= ext
- str
;
20324 optlen
= strlen (str
);
20328 as_bad (_("missing architectural extension"));
20332 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
20333 if (strncmp (opt
->name
, str
, optlen
) == 0)
20335 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
20339 if (opt
->name
== NULL
)
20341 as_bad (_("unknown architectural extension `%s'"), str
);
20352 arm_parse_cpu (char * str
)
20354 const struct arm_cpu_option_table
* opt
;
20355 char * ext
= strchr (str
, '+');
20359 optlen
= ext
- str
;
20361 optlen
= strlen (str
);
20365 as_bad (_("missing cpu name `%s'"), str
);
20369 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
20370 if (strncmp (opt
->name
, str
, optlen
) == 0)
20372 mcpu_cpu_opt
= &opt
->value
;
20373 mcpu_fpu_opt
= &opt
->default_fpu
;
20374 if (opt
->canonical_name
)
20375 strcpy (selected_cpu_name
, opt
->canonical_name
);
20379 for (i
= 0; i
< optlen
; i
++)
20380 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20381 selected_cpu_name
[i
] = 0;
20385 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
20390 as_bad (_("unknown cpu `%s'"), str
);
20395 arm_parse_arch (char * str
)
20397 const struct arm_arch_option_table
*opt
;
20398 char *ext
= strchr (str
, '+');
20402 optlen
= ext
- str
;
20404 optlen
= strlen (str
);
20408 as_bad (_("missing architecture name `%s'"), str
);
20412 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
20413 if (streq (opt
->name
, str
))
20415 march_cpu_opt
= &opt
->value
;
20416 march_fpu_opt
= &opt
->default_fpu
;
20417 strcpy (selected_cpu_name
, opt
->name
);
20420 return arm_parse_extension (ext
, &march_cpu_opt
);
20425 as_bad (_("unknown architecture `%s'\n"), str
);
20430 arm_parse_fpu (char * str
)
20432 const struct arm_option_cpu_value_table
* opt
;
20434 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20435 if (streq (opt
->name
, str
))
20437 mfpu_opt
= &opt
->value
;
20441 as_bad (_("unknown floating point format `%s'\n"), str
);
20446 arm_parse_float_abi (char * str
)
20448 const struct arm_option_value_table
* opt
;
20450 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
20451 if (streq (opt
->name
, str
))
20453 mfloat_abi_opt
= opt
->value
;
20457 as_bad (_("unknown floating point abi `%s'\n"), str
);
20463 arm_parse_eabi (char * str
)
20465 const struct arm_option_value_table
*opt
;
20467 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
20468 if (streq (opt
->name
, str
))
20470 meabi_flags
= opt
->value
;
20473 as_bad (_("unknown EABI `%s'\n"), str
);
20478 struct arm_long_option_table arm_long_opts
[] =
20480 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20481 arm_parse_cpu
, NULL
},
20482 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20483 arm_parse_arch
, NULL
},
20484 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20485 arm_parse_fpu
, NULL
},
20486 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20487 arm_parse_float_abi
, NULL
},
20489 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
20490 arm_parse_eabi
, NULL
},
20492 {NULL
, NULL
, 0, NULL
}
20496 md_parse_option (int c
, char * arg
)
20498 struct arm_option_table
*opt
;
20499 const struct arm_legacy_option_table
*fopt
;
20500 struct arm_long_option_table
*lopt
;
20506 target_big_endian
= 1;
20512 target_big_endian
= 0;
20516 case OPTION_FIX_V4BX
:
20521 /* Listing option. Just ignore these, we don't support additional
20526 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20528 if (c
== opt
->option
[0]
20529 && ((arg
== NULL
&& opt
->option
[1] == 0)
20530 || streq (arg
, opt
->option
+ 1)))
20532 #if WARN_DEPRECATED
20533 /* If the option is deprecated, tell the user. */
20534 if (opt
->deprecated
!= NULL
)
20535 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20536 arg
? arg
: "", _(opt
->deprecated
));
20539 if (opt
->var
!= NULL
)
20540 *opt
->var
= opt
->value
;
20546 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
20548 if (c
== fopt
->option
[0]
20549 && ((arg
== NULL
&& fopt
->option
[1] == 0)
20550 || streq (arg
, fopt
->option
+ 1)))
20552 #if WARN_DEPRECATED
20553 /* If the option is deprecated, tell the user. */
20554 if (fopt
->deprecated
!= NULL
)
20555 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20556 arg
? arg
: "", _(fopt
->deprecated
));
20559 if (fopt
->var
!= NULL
)
20560 *fopt
->var
= &fopt
->value
;
20566 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20568 /* These options are expected to have an argument. */
20569 if (c
== lopt
->option
[0]
20571 && strncmp (arg
, lopt
->option
+ 1,
20572 strlen (lopt
->option
+ 1)) == 0)
20574 #if WARN_DEPRECATED
20575 /* If the option is deprecated, tell the user. */
20576 if (lopt
->deprecated
!= NULL
)
20577 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
20578 _(lopt
->deprecated
));
20581 /* Call the sup-option parser. */
20582 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
20593 md_show_usage (FILE * fp
)
20595 struct arm_option_table
*opt
;
20596 struct arm_long_option_table
*lopt
;
20598 fprintf (fp
, _(" ARM-specific assembler options:\n"));
20600 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20601 if (opt
->help
!= NULL
)
20602 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
20604 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20605 if (lopt
->help
!= NULL
)
20606 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
20610 -EB assemble code for a big-endian cpu\n"));
20615 -EL assemble code for a little-endian cpu\n"));
20619 --fix-v4bx Allow BX in ARMv4 code\n"));
20627 arm_feature_set flags
;
20628 } cpu_arch_ver_table
;
20630 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20631 least features first. */
20632 static const cpu_arch_ver_table cpu_arch_ver
[] =
20637 {4, ARM_ARCH_V5TE
},
20638 {5, ARM_ARCH_V5TEJ
},
20643 {8, ARM_ARCH_V6T2
},
20644 {10, ARM_ARCH_V7A
},
20645 {10, ARM_ARCH_V7R
},
20646 {10, ARM_ARCH_V7M
},
20650 /* Set the public EABI object attributes. */
20652 aeabi_set_public_attributes (void)
20655 arm_feature_set flags
;
20656 arm_feature_set tmp
;
20657 const cpu_arch_ver_table
*p
;
20659 /* Choose the architecture based on the capabilities of the requested cpu
20660 (if any) and/or the instructions actually used. */
20661 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
20662 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
20663 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
20664 /*Allow the user to override the reported architecture. */
20667 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
20668 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
20673 for (p
= cpu_arch_ver
; p
->val
; p
++)
20675 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
20678 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
20682 /* Tag_CPU_name. */
20683 if (selected_cpu_name
[0])
20687 p
= selected_cpu_name
;
20688 if (strncmp (p
, "armv", 4) == 0)
20693 for (i
= 0; p
[i
]; i
++)
20694 p
[i
] = TOUPPER (p
[i
]);
20696 bfd_elf_add_proc_attr_string (stdoutput
, 5, p
);
20698 /* Tag_CPU_arch. */
20699 bfd_elf_add_proc_attr_int (stdoutput
, 6, arch
);
20700 /* Tag_CPU_arch_profile. */
20701 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
20702 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'A');
20703 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
20704 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'R');
20705 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
20706 bfd_elf_add_proc_attr_int (stdoutput
, 7, 'M');
20707 /* Tag_ARM_ISA_use. */
20708 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
20709 bfd_elf_add_proc_attr_int (stdoutput
, 8, 1);
20710 /* Tag_THUMB_ISA_use. */
20711 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
20712 bfd_elf_add_proc_attr_int (stdoutput
, 9,
20713 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
20714 /* Tag_VFP_arch. */
20715 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_d32
)
20716 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_d32
))
20717 bfd_elf_add_proc_attr_int (stdoutput
, 10, 4);
20718 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
20719 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
20720 bfd_elf_add_proc_attr_int (stdoutput
, 10, 3);
20721 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
20722 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
20723 bfd_elf_add_proc_attr_int (stdoutput
, 10, 2);
20724 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
20725 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
20726 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
20727 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
20728 bfd_elf_add_proc_attr_int (stdoutput
, 10, 1);
20729 /* Tag_WMMX_arch. */
20730 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
20731 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
20732 bfd_elf_add_proc_attr_int (stdoutput
, 11, 1);
20733 /* Tag_NEON_arch. */
20734 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
20735 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
20736 bfd_elf_add_proc_attr_int (stdoutput
, 12, 1);
20739 /* Add the default contents for the .ARM.attributes section. */
20743 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
20746 aeabi_set_public_attributes ();
20748 #endif /* OBJ_ELF */
20751 /* Parse a .cpu directive. */
20754 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
20756 const struct arm_cpu_option_table
*opt
;
20760 name
= input_line_pointer
;
20761 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
20762 input_line_pointer
++;
20763 saved_char
= *input_line_pointer
;
20764 *input_line_pointer
= 0;
20766 /* Skip the first "all" entry. */
20767 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
20768 if (streq (opt
->name
, name
))
20770 mcpu_cpu_opt
= &opt
->value
;
20771 selected_cpu
= opt
->value
;
20772 if (opt
->canonical_name
)
20773 strcpy (selected_cpu_name
, opt
->canonical_name
);
20777 for (i
= 0; opt
->name
[i
]; i
++)
20778 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20779 selected_cpu_name
[i
] = 0;
20781 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20782 *input_line_pointer
= saved_char
;
20783 demand_empty_rest_of_line ();
20786 as_bad (_("unknown cpu `%s'"), name
);
20787 *input_line_pointer
= saved_char
;
20788 ignore_rest_of_line ();
20792 /* Parse a .arch directive. */
20795 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
20797 const struct arm_arch_option_table
*opt
;
20801 name
= input_line_pointer
;
20802 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
20803 input_line_pointer
++;
20804 saved_char
= *input_line_pointer
;
20805 *input_line_pointer
= 0;
20807 /* Skip the first "all" entry. */
20808 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20809 if (streq (opt
->name
, name
))
20811 mcpu_cpu_opt
= &opt
->value
;
20812 selected_cpu
= opt
->value
;
20813 strcpy (selected_cpu_name
, opt
->name
);
20814 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20815 *input_line_pointer
= saved_char
;
20816 demand_empty_rest_of_line ();
20820 as_bad (_("unknown architecture `%s'\n"), name
);
20821 *input_line_pointer
= saved_char
;
20822 ignore_rest_of_line ();
20826 /* Parse a .object_arch directive. */
20829 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
20831 const struct arm_arch_option_table
*opt
;
20835 name
= input_line_pointer
;
20836 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
20837 input_line_pointer
++;
20838 saved_char
= *input_line_pointer
;
20839 *input_line_pointer
= 0;
20841 /* Skip the first "all" entry. */
20842 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20843 if (streq (opt
->name
, name
))
20845 object_arch
= &opt
->value
;
20846 *input_line_pointer
= saved_char
;
20847 demand_empty_rest_of_line ();
20851 as_bad (_("unknown architecture `%s'\n"), name
);
20852 *input_line_pointer
= saved_char
;
20853 ignore_rest_of_line ();
20857 /* Parse a .fpu directive. */
20860 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
20862 const struct arm_option_cpu_value_table
*opt
;
20866 name
= input_line_pointer
;
20867 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
20868 input_line_pointer
++;
20869 saved_char
= *input_line_pointer
;
20870 *input_line_pointer
= 0;
20872 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20873 if (streq (opt
->name
, name
))
20875 mfpu_opt
= &opt
->value
;
20876 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20877 *input_line_pointer
= saved_char
;
20878 demand_empty_rest_of_line ();
20882 as_bad (_("unknown floating point format `%s'\n"), name
);
20883 *input_line_pointer
= saved_char
;
20884 ignore_rest_of_line ();
20887 /* Copy symbol information. */
20889 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
20891 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);