1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS
* table_entry
;
62 symbolS
* personality_routine
;
63 int personality_index
;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes
;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset
;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored
:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency
= 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant
;
133 static arm_feature_set arm_arch_used
;
134 static arm_feature_set thumb_arch_used
;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26
= FALSE
;
138 static int atpcs
= FALSE
;
139 static int support_interwork
= FALSE
;
140 static int uses_apcs_float
= FALSE
;
141 static int pic_code
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
157 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
158 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
159 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
160 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
161 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
162 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
163 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
164 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
167 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
170 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
171 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
173 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
174 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
175 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
176 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
177 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
178 static const arm_feature_set arm_ext_v4t_5
=
179 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
180 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
181 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
182 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
183 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
184 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
185 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
186 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
188 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
189 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
190 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
191 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
192 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
193 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
195 static const arm_feature_set arm_arch_any
= ARM_ANY
;
196 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
198 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
200 static const arm_feature_set arm_cext_iwmmxt
=
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
202 static const arm_feature_set arm_cext_xscale
=
203 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
204 static const arm_feature_set arm_cext_maverick
=
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
206 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
207 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
208 static const arm_feature_set fpu_vfp_ext_v1xd
=
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
210 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
211 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
212 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
213 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
215 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
217 static int mfloat_abi_opt
= -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name
[16];
224 static int meabi_flags
= EABI_DEFAULT
;
226 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS
* GOT_symbol
;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode
= 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax
= FALSE
;
279 enum neon_el_type type
;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
294 unsigned long instruction
;
298 struct neon_type vectype
;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type
;
313 struct neon_type_el vectype
;
314 unsigned present
: 1; /* Operand present. */
315 unsigned isreg
: 1; /* Operand was a register. */
316 unsigned immisreg
: 1; /* .imm field is a second register. */
317 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
324 unsigned writeback
: 1; /* Operand has trailing ! */
325 unsigned preind
: 1; /* Preindexed address. */
326 unsigned postind
: 1; /* Postindexed address. */
327 unsigned negative
: 1; /* Index register was negated. */
328 unsigned shifted
: 1; /* Shift applied to operation. */
329 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
333 static struct arm_it inst
;
335 #define NUM_FLOAT_VALS 8
337 const char * fp_const
[] =
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
345 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
361 #define DOUBLE_LOAD_FLAG 0x00000001
365 const char * template;
369 #define COND_ALWAYS 0xE
373 const char *template;
377 struct asm_barrier_opt
379 const char *template;
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
395 bfd_reloc_code_real_type reloc
;
400 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
401 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
406 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
413 struct neon_typed_alias
415 unsigned char defined
;
417 struct neon_type_el eltype
;
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 unsigned char number
;
454 unsigned char builtin
;
455 struct neon_typed_alias
*neon
;
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs
[] =
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
482 /* Some well known registers that we refer to directly elsewhere. */
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 /* Basic string to match. */
494 const char *template;
496 /* Parameters to instruction. */
497 unsigned char operands
[8];
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag
: 4;
502 /* Basic instruction code. */
503 unsigned int avalue
: 28;
505 /* Thumb-format instruction code. */
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set
*avariant
;
510 const arm_feature_set
*tvariant
;
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode
) (void);
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode
) (void);
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
533 #define DATA_OP_SHIFT 21
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
538 /* Codes to distinguish the arithmetic instructions. */
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
618 #define T_OPCODE_BRANCH 0xe000
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
635 static struct hash_control
*arm_ops_hsh
;
636 static struct hash_control
*arm_cond_hsh
;
637 static struct hash_control
*arm_shift_hsh
;
638 static struct hash_control
*arm_psr_hsh
;
639 static struct hash_control
*arm_v7m_psr_hsh
;
640 static struct hash_control
*arm_reg_hsh
;
641 static struct hash_control
*arm_reloc_hsh
;
642 static struct hash_control
*arm_barrier_opt_hsh
;
644 /* Stuff needed to resolve the label ambiguity
654 symbolS
* last_label_seen
;
655 static int label_is_thumb_function_name
= FALSE
;
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
663 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
664 unsigned int next_free_entry
;
669 struct literal_pool
* next
;
672 /* Pointer to a linked list of literal pools. */
673 literal_pool
* list_of_pools
= NULL
;
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask
= 0;
677 static int current_cc
;
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars
[] = "@";
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars
[] = "#";
695 const char line_separator_chars
[] = ";";
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS
[] = "eE";
701 /* Chars that mean this number is a floating point constant. */
705 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
707 /* Prefix characters that indicate the start of an immediate
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
711 /* Separator character handling. */
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
716 skip_past_char (char ** str
, char c
)
726 #define skip_past_comma(str) skip_past_char (str, ',')
728 /* Arithmetic expressions (possibly involving symbols). */
730 /* Return TRUE if anything in the expression is a bignum. */
733 walk_no_bignums (symbolS
* sp
)
735 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
738 if (symbol_get_value_expression (sp
)->X_add_symbol
)
740 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
741 || (symbol_get_value_expression (sp
)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
748 static int in_my_get_expression
= 0;
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
759 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
764 /* In unified syntax, all prefixes are optional. */
766 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
771 case GE_NO_PREFIX
: break;
773 if (!is_immediate_prefix (**str
))
775 inst
.error
= _("immediate expression requires a # prefix");
781 case GE_OPT_PREFIX_BIG
:
782 if (is_immediate_prefix (**str
))
788 memset (ep
, 0, sizeof (expressionS
));
790 save_in
= input_line_pointer
;
791 input_line_pointer
= *str
;
792 in_my_get_expression
= 1;
793 seg
= expression (ep
);
794 in_my_get_expression
= 0;
796 if (ep
->X_op
== O_illegal
)
798 /* We found a bad expression in md_operand(). */
799 *str
= input_line_pointer
;
800 input_line_pointer
= save_in
;
801 if (inst
.error
== NULL
)
802 inst
.error
= _("bad expression");
807 if (seg
!= absolute_section
808 && seg
!= text_section
809 && seg
!= data_section
810 && seg
!= bss_section
811 && seg
!= undefined_section
)
813 inst
.error
= _("bad segment");
814 *str
= input_line_pointer
;
815 input_line_pointer
= save_in
;
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode
!= GE_OPT_PREFIX_BIG
824 && (ep
->X_op
== O_big
826 && (walk_no_bignums (ep
->X_add_symbol
)
828 && walk_no_bignums (ep
->X_op_symbol
))))))
830 inst
.error
= _("invalid constant");
831 *str
= input_line_pointer
;
832 input_line_pointer
= save_in
;
836 *str
= input_line_pointer
;
837 input_line_pointer
= save_in
;
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
856 md_atof (int type
, char * litP
, int * sizeP
)
859 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
891 return _("bad call to MD_ATOF()");
894 t
= atof_ieee (input_line_pointer
, type
, words
);
896 input_line_pointer
= t
;
899 if (target_big_endian
)
901 for (i
= 0; i
< prec
; i
++)
903 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
909 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
910 for (i
= prec
- 1; i
>= 0; i
--)
912 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i
= 0; i
< prec
; i
+= 2)
920 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
921 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
932 md_operand (expressionS
* expr
)
934 if (in_my_get_expression
)
935 expr
->X_op
= O_illegal
;
938 /* Immediate values. */
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
945 immediate_for_directive (int *val
)
948 exp
.X_op
= O_illegal
;
950 if (is_immediate_prefix (*input_line_pointer
))
952 input_line_pointer
++;
956 if (exp
.X_op
!= O_constant
)
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
962 *val
= exp
.X_add_number
;
967 /* Register parsing. */
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
974 static struct reg_entry
*
975 arm_reg_parse_multi (char **ccp
)
979 struct reg_entry
*reg
;
981 #ifdef REGISTER_PREFIX
982 if (*start
!= REGISTER_PREFIX
)
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start
== OPTIONAL_REGISTER_PREFIX
)
992 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
997 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
999 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1009 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1010 enum arm_reg_type type
)
1012 /* Alternative syntaxes are accepted for a few register classes. */
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg
&& reg
->type
== REG_TYPE_CN
)
1025 /* For backward compatibility, a bare number is valid here. */
1027 unsigned long processor
= strtoul (start
, ccp
, 10);
1028 if (*ccp
!= start
&& processor
<= 15)
1032 case REG_TYPE_MMXWC
:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1050 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1053 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1060 if (reg
&& reg
->type
== type
)
1063 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1078 Can all be legally parsed by this function.
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1085 parse_neon_type (struct neon_type
*type
, char **str
)
1092 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1094 enum neon_el_type thistype
= NT_untyped
;
1095 unsigned thissize
= -1u;
1102 /* Just a size without an explicit type. */
1106 switch (TOLOWER (*ptr
))
1108 case 'i': thistype
= NT_integer
; break;
1109 case 'f': thistype
= NT_float
; break;
1110 case 'p': thistype
= NT_poly
; break;
1111 case 's': thistype
= NT_signed
; break;
1112 case 'u': thistype
= NT_unsigned
; break;
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1126 thissize
= strtoul (ptr
, &ptr
, 10);
1128 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1131 as_bad (_("bad size %d in type specifier"), thissize
);
1138 type
->el
[type
->elems
].type
= thistype
;
1139 type
->el
[type
->elems
].size
= thissize
;
1144 /* Empty/missing type is not a successful parse. */
1145 if (type
->elems
== 0)
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1159 first_error (const char *err
)
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1167 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1170 struct neon_type optype
;
1174 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1176 if (optype
.elems
== 1)
1177 *vectype
= optype
.el
[0];
1180 first_error (_("only one type should be specified for operand"));
1186 first_error (_("vector type expected"));
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1210 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1211 enum arm_reg_type
*rtype
,
1212 struct neon_typed_alias
*typeinfo
)
1215 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1216 struct neon_typed_alias atype
;
1217 struct neon_type_el parsetype
;
1221 atype
.eltype
.type
= NT_invtype
;
1222 atype
.eltype
.size
= -1;
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1228 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type
== REG_TYPE_NDQ
1238 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1241 if (type
!= reg
->type
)
1247 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1249 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1251 first_error (_("can't redefine type for operand"));
1254 atype
.defined
|= NTA_HASTYPE
;
1255 atype
.eltype
= parsetype
;
1258 if (skip_past_char (&str
, '[') == SUCCESS
)
1260 if (type
!= REG_TYPE_VFD
)
1262 first_error (_("only D registers may be indexed"));
1266 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1268 first_error (_("can't change index for operand"));
1272 atype
.defined
|= NTA_HASINDEX
;
1274 if (skip_past_char (&str
, ']') == SUCCESS
)
1275 atype
.index
= NEON_ALL_LANES
;
1280 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1282 if (exp
.X_op
!= O_constant
)
1284 first_error (_("constant expression required"));
1288 if (skip_past_char (&str
, ']') == FAIL
)
1291 atype
.index
= exp
.X_add_number
;
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1315 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1316 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1318 struct neon_typed_alias atype
;
1320 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1328 first_error (_("register operand expected, but got scalar"));
1333 *vectype
= atype
.eltype
;
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1348 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1352 struct neon_typed_alias atype
;
1354 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1356 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1359 if (atype
.index
== NEON_ALL_LANES
)
1361 first_error (_("scalar must have an index"));
1364 else if (atype
.index
>= 64 / elsize
)
1366 first_error (_("scalar index out of range"));
1371 *type
= atype
.eltype
;
1375 return reg
* 16 + atype
.index
;
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1380 parse_reg_list (char ** strp
)
1382 char * str
= * strp
;
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1401 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1403 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1413 first_error (_("bad range in register list"));
1417 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1419 if (range
& (1 << i
))
1421 (_("Warning: duplicated register (r%d) in register list"),
1429 if (range
& (1 << reg
))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1432 else if (reg
<= cur_reg
)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1438 while (skip_past_comma (&str
) != FAIL
1439 || (in_range
= 1, *str
++ == '-'));
1444 first_error (_("missing `}'"));
1452 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1455 if (expr
.X_op
== O_constant
)
1457 if (expr
.X_add_number
1458 != (expr
.X_add_number
& 0x0000ffff))
1460 inst
.error
= _("invalid register mask");
1464 if ((range
& expr
.X_add_number
) != 0)
1466 int regno
= range
& expr
.X_add_number
;
1469 regno
= (1 << regno
) - 1;
1471 (_("Warning: duplicated register (r%d) in register list"),
1475 range
|= expr
.X_add_number
;
1479 if (inst
.reloc
.type
!= 0)
1481 inst
.error
= _("expression too complex");
1485 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1486 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1487 inst
.reloc
.pc_rel
= 0;
1491 if (*str
== '|' || *str
== '+')
1497 while (another_range
);
1503 /* Types of registers in a list. */
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1528 parse_vfp_reg_list (char **str
, unsigned int *pbase
, enum reg_list_els etype
)
1532 enum arm_reg_type regtype
= 0;
1536 unsigned long mask
= 0;
1541 inst
.error
= _("expecting {");
1550 regtype
= REG_TYPE_VFS
;
1555 regtype
= REG_TYPE_VFD
;
1556 /* VFPv3 allows 32 D registers. */
1557 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1561 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1564 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1571 case REGLIST_NEON_D
:
1572 regtype
= REG_TYPE_NDQ
;
1577 base_reg
= max_regs
;
1581 int setmask
= 1, addregs
= 1;
1583 new_base
= arm_typed_reg_parse (str
, regtype
, ®type
, NULL
);
1585 if (new_base
== FAIL
)
1587 first_error (_(reg_expected_msgs
[regtype
]));
1591 /* Note: a value of 2 * n is returned for the register Q<n>. */
1592 if (regtype
== REG_TYPE_NQ
)
1598 if (new_base
< base_reg
)
1599 base_reg
= new_base
;
1601 if (mask
& (setmask
<< new_base
))
1603 first_error (_("invalid register list"));
1607 if ((mask
>> new_base
) != 0 && ! warned
)
1609 as_tsktsk (_("register list not in ascending order"));
1613 mask
|= setmask
<< new_base
;
1616 if (**str
== '-') /* We have the start of a range expression */
1622 if ((high_range
= arm_typed_reg_parse (str
, regtype
, NULL
, NULL
))
1625 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1629 if (regtype
== REG_TYPE_NQ
)
1630 high_range
= high_range
+ 1;
1632 if (high_range
<= new_base
)
1634 inst
.error
= _("register range not in ascending order");
1638 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1640 if (mask
& (setmask
<< new_base
))
1642 inst
.error
= _("invalid register list");
1646 mask
|= setmask
<< new_base
;
1651 while (skip_past_comma (str
) != FAIL
);
1655 /* Sanity check -- should have raised a parse error above. */
1656 if (count
== 0 || count
> max_regs
)
1661 /* Final test -- the registers must be consecutive. */
1663 for (i
= 0; i
< count
; i
++)
1665 if ((mask
& (1u << i
)) == 0)
1667 inst
.error
= _("non-contiguous register range");
1675 /* True if two alias types are the same. */
1678 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1686 if (a
->defined
!= b
->defined
)
1689 if ((a
->defined
& NTA_HASTYPE
) != 0
1690 && (a
->eltype
.type
!= b
->eltype
.type
1691 || a
->eltype
.size
!= b
->eltype
.size
))
1694 if ((a
->defined
& NTA_HASINDEX
) != 0
1695 && (a
->index
!= b
->index
))
1701 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1702 The base register is put in *PBASE.
1703 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1705 The register stride (minus one) is put in bit 4 of the return value.
1706 Bits [6:5] encode the list length (minus one).
1707 The type of the list elements is put in *ELTYPE, if non-NULL. */
1709 #define NEON_LANE(X) ((X) & 0xf)
1710 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1711 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1714 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1715 struct neon_type_el
*eltype
)
1722 int leading_brace
= 0;
1723 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1725 const char *const incr_error
= "register stride must be 1 or 2";
1726 const char *const type_error
= "mismatched element/structure types in list";
1727 struct neon_typed_alias firsttype
;
1729 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1734 struct neon_typed_alias atype
;
1735 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1739 first_error (_(reg_expected_msgs
[rtype
]));
1746 if (rtype
== REG_TYPE_NQ
)
1753 else if (reg_incr
== -1)
1755 reg_incr
= getreg
- base_reg
;
1756 if (reg_incr
< 1 || reg_incr
> 2)
1758 first_error (_(incr_error
));
1762 else if (getreg
!= base_reg
+ reg_incr
* count
)
1764 first_error (_(incr_error
));
1768 if (!neon_alias_types_same (&atype
, &firsttype
))
1770 first_error (_(type_error
));
1774 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1778 struct neon_typed_alias htype
;
1779 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1781 lane
= NEON_INTERLEAVE_LANES
;
1782 else if (lane
!= NEON_INTERLEAVE_LANES
)
1784 first_error (_(type_error
));
1789 else if (reg_incr
!= 1)
1791 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1795 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1798 first_error (_(reg_expected_msgs
[rtype
]));
1801 if (!neon_alias_types_same (&htype
, &firsttype
))
1803 first_error (_(type_error
));
1806 count
+= hireg
+ dregs
- getreg
;
1810 /* If we're using Q registers, we can't use [] or [n] syntax. */
1811 if (rtype
== REG_TYPE_NQ
)
1817 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1821 else if (lane
!= atype
.index
)
1823 first_error (_(type_error
));
1827 else if (lane
== -1)
1828 lane
= NEON_INTERLEAVE_LANES
;
1829 else if (lane
!= NEON_INTERLEAVE_LANES
)
1831 first_error (_(type_error
));
1836 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1838 /* No lane set by [x]. We must be interleaving structures. */
1840 lane
= NEON_INTERLEAVE_LANES
;
1843 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1844 || (count
> 1 && reg_incr
== -1))
1846 first_error (_("error parsing element/structure list"));
1850 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1852 first_error (_("expected }"));
1860 *eltype
= firsttype
.eltype
;
1865 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1868 /* Parse an explicit relocation suffix on an expression. This is
1869 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1870 arm_reloc_hsh contains no entries, so this function can only
1871 succeed if there is no () after the word. Returns -1 on error,
1872 BFD_RELOC_UNUSED if there wasn't any suffix. */
1874 parse_reloc (char **str
)
1876 struct reloc_entry
*r
;
1880 return BFD_RELOC_UNUSED
;
1885 while (*q
&& *q
!= ')' && *q
!= ',')
1890 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1897 /* Directives: register aliases. */
1899 static struct reg_entry
*
1900 insert_reg_alias (char *str
, int number
, int type
)
1902 struct reg_entry
*new;
1905 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1908 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1910 /* Only warn about a redefinition if it's not defined as the
1912 else if (new->number
!= number
|| new->type
!= type
)
1913 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1918 name
= xstrdup (str
);
1919 new = xmalloc (sizeof (struct reg_entry
));
1922 new->number
= number
;
1924 new->builtin
= FALSE
;
1927 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1934 insert_neon_reg_alias (char *str
, int number
, int type
,
1935 struct neon_typed_alias
*atype
)
1937 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1941 first_error (_("attempt to redefine typed alias"));
1947 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1948 *reg
->neon
= *atype
;
1952 /* Look for the .req directive. This is of the form:
1954 new_register_name .req existing_register_name
1956 If we find one, or if it looks sufficiently like one that we want to
1957 handle any error here, return non-zero. Otherwise return zero. */
1960 create_register_alias (char * newname
, char *p
)
1962 struct reg_entry
*old
;
1963 char *oldname
, *nbuf
;
1966 /* The input scrubber ensures that whitespace after the mnemonic is
1967 collapsed to single spaces. */
1969 if (strncmp (oldname
, " .req ", 6) != 0)
1973 if (*oldname
== '\0')
1976 old
= hash_find (arm_reg_hsh
, oldname
);
1979 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
1983 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1984 the desired alias name, and p points to its end. If not, then
1985 the desired alias name is in the global original_case_string. */
1986 #ifdef TC_CASE_SENSITIVE
1989 newname
= original_case_string
;
1990 nlen
= strlen (newname
);
1993 nbuf
= alloca (nlen
+ 1);
1994 memcpy (nbuf
, newname
, nlen
);
1997 /* Create aliases under the new name as stated; an all-lowercase
1998 version of the new name; and an all-uppercase version of the new
2000 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2002 for (p
= nbuf
; *p
; p
++)
2005 if (strncmp (nbuf
, newname
, nlen
))
2006 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2008 for (p
= nbuf
; *p
; p
++)
2011 if (strncmp (nbuf
, newname
, nlen
))
2012 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2017 /* Create a Neon typed/indexed register alias using directives, e.g.:
2022 These typed registers can be used instead of the types specified after the
2023 Neon mnemonic, so long as all operands given have types. Types can also be
2024 specified directly, e.g.:
2025 vadd d0.s32, d1.s32, d2.s32
2029 create_neon_reg_alias (char *newname
, char *p
)
2031 enum arm_reg_type basetype
;
2032 struct reg_entry
*basereg
;
2033 struct reg_entry mybasereg
;
2034 struct neon_type ntype
;
2035 struct neon_typed_alias typeinfo
;
2036 char *namebuf
, *nameend
;
2039 typeinfo
.defined
= 0;
2040 typeinfo
.eltype
.type
= NT_invtype
;
2041 typeinfo
.eltype
.size
= -1;
2042 typeinfo
.index
= -1;
2046 if (strncmp (p
, " .dn ", 5) == 0)
2047 basetype
= REG_TYPE_VFD
;
2048 else if (strncmp (p
, " .qn ", 5) == 0)
2049 basetype
= REG_TYPE_NQ
;
2058 basereg
= arm_reg_parse_multi (&p
);
2060 if (basereg
&& basereg
->type
!= basetype
)
2062 as_bad (_("bad type for register"));
2066 if (basereg
== NULL
)
2069 /* Try parsing as an integer. */
2070 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2071 if (exp
.X_op
!= O_constant
)
2073 as_bad (_("expression must be constant"));
2076 basereg
= &mybasereg
;
2077 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2083 typeinfo
= *basereg
->neon
;
2085 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2087 /* We got a type. */
2088 if (typeinfo
.defined
& NTA_HASTYPE
)
2090 as_bad (_("can't redefine the type of a register alias"));
2094 typeinfo
.defined
|= NTA_HASTYPE
;
2095 if (ntype
.elems
!= 1)
2097 as_bad (_("you must specify a single type only"));
2100 typeinfo
.eltype
= ntype
.el
[0];
2103 if (skip_past_char (&p
, '[') == SUCCESS
)
2106 /* We got a scalar index. */
2108 if (typeinfo
.defined
& NTA_HASINDEX
)
2110 as_bad (_("can't redefine the index of a scalar alias"));
2114 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2116 if (exp
.X_op
!= O_constant
)
2118 as_bad (_("scalar index must be constant"));
2122 typeinfo
.defined
|= NTA_HASINDEX
;
2123 typeinfo
.index
= exp
.X_add_number
;
2125 if (skip_past_char (&p
, ']') == FAIL
)
2127 as_bad (_("expecting ]"));
2132 namelen
= nameend
- newname
;
2133 namebuf
= alloca (namelen
+ 1);
2134 strncpy (namebuf
, newname
, namelen
);
2135 namebuf
[namelen
] = '\0';
2137 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2138 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2140 /* Insert name in all uppercase. */
2141 for (p
= namebuf
; *p
; p
++)
2144 if (strncmp (namebuf
, newname
, namelen
))
2145 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2146 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2148 /* Insert name in all lowercase. */
2149 for (p
= namebuf
; *p
; p
++)
2152 if (strncmp (namebuf
, newname
, namelen
))
2153 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2154 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2159 /* Should never be called, as .req goes between the alias and the
2160 register name, not at the beginning of the line. */
2162 s_req (int a ATTRIBUTE_UNUSED
)
2164 as_bad (_("invalid syntax for .req directive"));
2168 s_dn (int a ATTRIBUTE_UNUSED
)
2170 as_bad (_("invalid syntax for .dn directive"));
2174 s_qn (int a ATTRIBUTE_UNUSED
)
2176 as_bad (_("invalid syntax for .qn directive"));
2179 /* The .unreq directive deletes an alias which was previously defined
2180 by .req. For example:
2186 s_unreq (int a ATTRIBUTE_UNUSED
)
2191 name
= input_line_pointer
;
2193 while (*input_line_pointer
!= 0
2194 && *input_line_pointer
!= ' '
2195 && *input_line_pointer
!= '\n')
2196 ++input_line_pointer
;
2198 saved_char
= *input_line_pointer
;
2199 *input_line_pointer
= 0;
2202 as_bad (_("invalid syntax for .unreq directive"));
2205 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2208 as_bad (_("unknown register alias '%s'"), name
);
2209 else if (reg
->builtin
)
2210 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2214 hash_delete (arm_reg_hsh
, name
);
2215 free ((char *) reg
->name
);
2222 *input_line_pointer
= saved_char
;
2223 demand_empty_rest_of_line ();
2226 /* Directives: Instruction set selection. */
2229 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2230 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2231 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2232 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2234 static enum mstate mapstate
= MAP_UNDEFINED
;
2237 mapping_state (enum mstate state
)
2240 const char * symname
;
2243 if (mapstate
== state
)
2244 /* The mapping symbol has already been emitted.
2245 There is nothing else to do. */
2254 type
= BSF_NO_FLAGS
;
2258 type
= BSF_NO_FLAGS
;
2262 type
= BSF_NO_FLAGS
;
2270 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2272 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2273 symbol_table_insert (symbolP
);
2274 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2279 THUMB_SET_FUNC (symbolP
, 0);
2280 ARM_SET_THUMB (symbolP
, 0);
2281 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2285 THUMB_SET_FUNC (symbolP
, 1);
2286 ARM_SET_THUMB (symbolP
, 1);
2287 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2296 #define mapping_state(x) /* nothing */
2299 /* Find the real, Thumb encoded start of a Thumb function. */
2302 find_real_start (symbolS
* symbolP
)
2305 const char * name
= S_GET_NAME (symbolP
);
2306 symbolS
* new_target
;
2308 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2309 #define STUB_NAME ".real_start_of"
2314 /* The compiler may generate BL instructions to local labels because
2315 it needs to perform a branch to a far away location. These labels
2316 do not have a corresponding ".real_start_of" label. We check
2317 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2318 the ".real_start_of" convention for nonlocal branches. */
2319 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2322 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2323 new_target
= symbol_find (real_start
);
2325 if (new_target
== NULL
)
2327 as_warn ("Failed to find real start of function: %s\n", name
);
2328 new_target
= symbolP
;
2335 opcode_select (int width
)
2342 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2343 as_bad (_("selected processor does not support THUMB opcodes"));
2346 /* No need to force the alignment, since we will have been
2347 coming from ARM mode, which is word-aligned. */
2348 record_alignment (now_seg
, 1);
2350 mapping_state (MAP_THUMB
);
2356 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2357 as_bad (_("selected processor does not support ARM opcodes"));
2362 frag_align (2, 0, 0);
2364 record_alignment (now_seg
, 1);
2366 mapping_state (MAP_ARM
);
2370 as_bad (_("invalid instruction size selected (%d)"), width
);
2375 s_arm (int ignore ATTRIBUTE_UNUSED
)
2378 demand_empty_rest_of_line ();
2382 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2385 demand_empty_rest_of_line ();
2389 s_code (int unused ATTRIBUTE_UNUSED
)
2393 temp
= get_absolute_expression ();
2398 opcode_select (temp
);
2402 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2407 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2409 /* If we are not already in thumb mode go into it, EVEN if
2410 the target processor does not support thumb instructions.
2411 This is used by gcc/config/arm/lib1funcs.asm for example
2412 to compile interworking support functions even if the
2413 target processor should not support interworking. */
2417 record_alignment (now_seg
, 1);
2420 demand_empty_rest_of_line ();
2424 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2428 /* The following label is the name/address of the start of a Thumb function.
2429 We need to know this for the interworking support. */
2430 label_is_thumb_function_name
= TRUE
;
2433 /* Perform a .set directive, but also mark the alias as
2434 being a thumb function. */
2437 s_thumb_set (int equiv
)
2439 /* XXX the following is a duplicate of the code for s_set() in read.c
2440 We cannot just call that code as we need to get at the symbol that
2447 /* Especial apologies for the random logic:
2448 This just grew, and could be parsed much more simply!
2450 name
= input_line_pointer
;
2451 delim
= get_symbol_end ();
2452 end_name
= input_line_pointer
;
2455 if (*input_line_pointer
!= ',')
2458 as_bad (_("expected comma after name \"%s\""), name
);
2460 ignore_rest_of_line ();
2464 input_line_pointer
++;
2467 if (name
[0] == '.' && name
[1] == '\0')
2469 /* XXX - this should not happen to .thumb_set. */
2473 if ((symbolP
= symbol_find (name
)) == NULL
2474 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2477 /* When doing symbol listings, play games with dummy fragments living
2478 outside the normal fragment chain to record the file and line info
2480 if (listing
& LISTING_SYMBOLS
)
2482 extern struct list_info_struct
* listing_tail
;
2483 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2485 memset (dummy_frag
, 0, sizeof (fragS
));
2486 dummy_frag
->fr_type
= rs_fill
;
2487 dummy_frag
->line
= listing_tail
;
2488 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2489 dummy_frag
->fr_symbol
= symbolP
;
2493 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2496 /* "set" symbols are local unless otherwise specified. */
2497 SF_SET_LOCAL (symbolP
);
2498 #endif /* OBJ_COFF */
2499 } /* Make a new symbol. */
2501 symbol_table_insert (symbolP
);
2506 && S_IS_DEFINED (symbolP
)
2507 && S_GET_SEGMENT (symbolP
) != reg_section
)
2508 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2510 pseudo_set (symbolP
);
2512 demand_empty_rest_of_line ();
2514 /* XXX Now we come to the Thumb specific bit of code. */
2516 THUMB_SET_FUNC (symbolP
, 1);
2517 ARM_SET_THUMB (symbolP
, 1);
2518 #if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2523 /* Directives: Mode selection. */
2525 /* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2529 s_syntax (int unused ATTRIBUTE_UNUSED
)
2533 name
= input_line_pointer
;
2534 delim
= get_symbol_end ();
2536 if (!strcasecmp (name
, "unified"))
2537 unified_syntax
= TRUE
;
2538 else if (!strcasecmp (name
, "divided"))
2539 unified_syntax
= FALSE
;
2542 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2545 *input_line_pointer
= delim
;
2546 demand_empty_rest_of_line ();
2549 /* Directives: sectioning and alignment. */
2551 /* Same as s_align_ptwo but align 0 => align 2. */
2554 s_align (int unused ATTRIBUTE_UNUSED
)
2558 long max_alignment
= 15;
2560 temp
= get_absolute_expression ();
2561 if (temp
> max_alignment
)
2562 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2565 as_bad (_("alignment negative. 0 assumed."));
2569 if (*input_line_pointer
== ',')
2571 input_line_pointer
++;
2572 temp_fill
= get_absolute_expression ();
2580 /* Only make a frag if we HAVE to. */
2581 if (temp
&& !need_pass_2
)
2582 frag_align (temp
, (int) temp_fill
, 0);
2583 demand_empty_rest_of_line ();
2585 record_alignment (now_seg
, temp
);
2589 s_bss (int ignore ATTRIBUTE_UNUSED
)
2591 /* We don't support putting frags in the BSS segment, we fake it by
2592 marking in_bss, then looking at s_skip for clues. */
2593 subseg_set (bss_section
, 0);
2594 demand_empty_rest_of_line ();
2595 mapping_state (MAP_DATA
);
2599 s_even (int ignore ATTRIBUTE_UNUSED
)
2601 /* Never make frag if expect extra pass. */
2603 frag_align (1, 0, 0);
2605 record_alignment (now_seg
, 1);
2607 demand_empty_rest_of_line ();
2610 /* Directives: Literal pools. */
2612 static literal_pool
*
2613 find_literal_pool (void)
2615 literal_pool
* pool
;
2617 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2619 if (pool
->section
== now_seg
2620 && pool
->sub_section
== now_subseg
)
2627 static literal_pool
*
2628 find_or_make_literal_pool (void)
2630 /* Next literal pool ID number. */
2631 static unsigned int latest_pool_num
= 1;
2632 literal_pool
* pool
;
2634 pool
= find_literal_pool ();
2638 /* Create a new pool. */
2639 pool
= xmalloc (sizeof (* pool
));
2643 pool
->next_free_entry
= 0;
2644 pool
->section
= now_seg
;
2645 pool
->sub_section
= now_subseg
;
2646 pool
->next
= list_of_pools
;
2647 pool
->symbol
= NULL
;
2649 /* Add it to the list. */
2650 list_of_pools
= pool
;
2653 /* New pools, and emptied pools, will have a NULL symbol. */
2654 if (pool
->symbol
== NULL
)
2656 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2657 (valueT
) 0, &zero_address_frag
);
2658 pool
->id
= latest_pool_num
++;
2665 /* Add the literal in the global 'inst'
2666 structure to the relevent literal pool. */
2669 add_to_lit_pool (void)
2671 literal_pool
* pool
;
2674 pool
= find_or_make_literal_pool ();
2676 /* Check if this literal value is already in the pool. */
2677 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2679 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2680 && (inst
.reloc
.exp
.X_op
== O_constant
)
2681 && (pool
->literals
[entry
].X_add_number
2682 == inst
.reloc
.exp
.X_add_number
)
2683 && (pool
->literals
[entry
].X_unsigned
2684 == inst
.reloc
.exp
.X_unsigned
))
2687 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2688 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2689 && (pool
->literals
[entry
].X_add_number
2690 == inst
.reloc
.exp
.X_add_number
)
2691 && (pool
->literals
[entry
].X_add_symbol
2692 == inst
.reloc
.exp
.X_add_symbol
)
2693 && (pool
->literals
[entry
].X_op_symbol
2694 == inst
.reloc
.exp
.X_op_symbol
))
2698 /* Do we need to create a new entry? */
2699 if (entry
== pool
->next_free_entry
)
2701 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2703 inst
.error
= _("literal pool overflow");
2707 pool
->literals
[entry
] = inst
.reloc
.exp
;
2708 pool
->next_free_entry
+= 1;
2711 inst
.reloc
.exp
.X_op
= O_symbol
;
2712 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2713 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2718 /* Can't use symbol_new here, so have to create a symbol and then at
2719 a later date assign it a value. Thats what these functions do. */
2722 symbol_locate (symbolS
* symbolP
,
2723 const char * name
, /* It is copied, the caller can modify. */
2724 segT segment
, /* Segment identifier (SEG_<something>). */
2725 valueT valu
, /* Symbol value. */
2726 fragS
* frag
) /* Associated fragment. */
2728 unsigned int name_length
;
2729 char * preserved_copy_of_name
;
2731 name_length
= strlen (name
) + 1; /* +1 for \0. */
2732 obstack_grow (¬es
, name
, name_length
);
2733 preserved_copy_of_name
= obstack_finish (¬es
);
2735 #ifdef tc_canonicalize_symbol_name
2736 preserved_copy_of_name
=
2737 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2740 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2742 S_SET_SEGMENT (symbolP
, segment
);
2743 S_SET_VALUE (symbolP
, valu
);
2744 symbol_clear_list_pointers (symbolP
);
2746 symbol_set_frag (symbolP
, frag
);
2748 /* Link to end of symbol chain. */
2750 extern int symbol_table_frozen
;
2752 if (symbol_table_frozen
)
2756 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2758 obj_symbol_new_hook (symbolP
);
2760 #ifdef tc_symbol_new_hook
2761 tc_symbol_new_hook (symbolP
);
2765 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2766 #endif /* DEBUG_SYMS */
2771 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2774 literal_pool
* pool
;
2777 pool
= find_literal_pool ();
2779 || pool
->symbol
== NULL
2780 || pool
->next_free_entry
== 0)
2783 mapping_state (MAP_DATA
);
2785 /* Align pool as you have word accesses.
2786 Only make a frag if we have to. */
2788 frag_align (2, 0, 0);
2790 record_alignment (now_seg
, 2);
2792 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2794 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2795 (valueT
) frag_now_fix (), frag_now
);
2796 symbol_table_insert (pool
->symbol
);
2798 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2800 #if defined OBJ_COFF || defined OBJ_ELF
2801 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2804 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2805 /* First output the expression in the instruction to the pool. */
2806 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2808 /* Mark the pool as empty. */
2809 pool
->next_free_entry
= 0;
2810 pool
->symbol
= NULL
;
2814 /* Forward declarations for functions below, in the MD interface
2816 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2817 static valueT
create_unwind_entry (int);
2818 static void start_unwind_section (const segT
, int);
2819 static void add_unwind_opcode (valueT
, int);
2820 static void flush_pending_unwind (void);
2822 /* Directives: Data. */
2825 s_arm_elf_cons (int nbytes
)
2829 #ifdef md_flush_pending_output
2830 md_flush_pending_output ();
2833 if (is_it_end_of_statement ())
2835 demand_empty_rest_of_line ();
2839 #ifdef md_cons_align
2840 md_cons_align (nbytes
);
2843 mapping_state (MAP_DATA
);
2847 char *base
= input_line_pointer
;
2851 if (exp
.X_op
!= O_symbol
)
2852 emit_expr (&exp
, (unsigned int) nbytes
);
2855 char *before_reloc
= input_line_pointer
;
2856 reloc
= parse_reloc (&input_line_pointer
);
2859 as_bad (_("unrecognized relocation suffix"));
2860 ignore_rest_of_line ();
2863 else if (reloc
== BFD_RELOC_UNUSED
)
2864 emit_expr (&exp
, (unsigned int) nbytes
);
2867 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2868 int size
= bfd_get_reloc_size (howto
);
2870 if (reloc
== BFD_RELOC_ARM_PLT32
)
2872 as_bad (_("(plt) is only valid on branch targets"));
2873 reloc
= BFD_RELOC_UNUSED
;
2878 as_bad (_("%s relocations do not fit in %d bytes"),
2879 howto
->name
, nbytes
);
2882 /* We've parsed an expression stopping at O_symbol.
2883 But there may be more expression left now that we
2884 have parsed the relocation marker. Parse it again.
2885 XXX Surely there is a cleaner way to do this. */
2886 char *p
= input_line_pointer
;
2888 char *save_buf
= alloca (input_line_pointer
- base
);
2889 memcpy (save_buf
, base
, input_line_pointer
- base
);
2890 memmove (base
+ (input_line_pointer
- before_reloc
),
2891 base
, before_reloc
- base
);
2893 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2895 memcpy (base
, save_buf
, p
- base
);
2897 offset
= nbytes
- size
;
2898 p
= frag_more ((int) nbytes
);
2899 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2900 size
, &exp
, 0, reloc
);
2905 while (*input_line_pointer
++ == ',');
2907 /* Put terminator back into stream. */
2908 input_line_pointer
--;
2909 demand_empty_rest_of_line ();
2913 /* Parse a .rel31 directive. */
2916 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2923 if (*input_line_pointer
== '1')
2924 highbit
= 0x80000000;
2925 else if (*input_line_pointer
!= '0')
2926 as_bad (_("expected 0 or 1"));
2928 input_line_pointer
++;
2929 if (*input_line_pointer
!= ',')
2930 as_bad (_("missing comma"));
2931 input_line_pointer
++;
2933 #ifdef md_flush_pending_output
2934 md_flush_pending_output ();
2937 #ifdef md_cons_align
2941 mapping_state (MAP_DATA
);
2946 md_number_to_chars (p
, highbit
, 4);
2947 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
2948 BFD_RELOC_ARM_PREL31
);
2950 demand_empty_rest_of_line ();
2953 /* Directives: AEABI stack-unwind tables. */
2955 /* Parse an unwind_fnstart directive. Simply records the current location. */
2958 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
2960 demand_empty_rest_of_line ();
2961 /* Mark the start of the function. */
2962 unwind
.proc_start
= expr_build_dot ();
2964 /* Reset the rest of the unwind info. */
2965 unwind
.opcode_count
= 0;
2966 unwind
.table_entry
= NULL
;
2967 unwind
.personality_routine
= NULL
;
2968 unwind
.personality_index
= -1;
2969 unwind
.frame_size
= 0;
2970 unwind
.fp_offset
= 0;
2973 unwind
.sp_restored
= 0;
2977 /* Parse a handlerdata directive. Creates the exception handling table entry
2978 for the function. */
2981 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
2983 demand_empty_rest_of_line ();
2984 if (unwind
.table_entry
)
2985 as_bad (_("dupicate .handlerdata directive"));
2987 create_unwind_entry (1);
2990 /* Parse an unwind_fnend directive. Generates the index table entry. */
2993 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
2999 demand_empty_rest_of_line ();
3001 /* Add eh table entry. */
3002 if (unwind
.table_entry
== NULL
)
3003 val
= create_unwind_entry (0);
3007 /* Add index table entry. This is two words. */
3008 start_unwind_section (unwind
.saved_seg
, 1);
3009 frag_align (2, 0, 0);
3010 record_alignment (now_seg
, 2);
3012 ptr
= frag_more (8);
3013 where
= frag_now_fix () - 8;
3015 /* Self relative offset of the function start. */
3016 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3017 BFD_RELOC_ARM_PREL31
);
3019 /* Indicate dependency on EHABI-defined personality routines to the
3020 linker, if it hasn't been done already. */
3021 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3022 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3024 static const char *const name
[] = {
3025 "__aeabi_unwind_cpp_pr0",
3026 "__aeabi_unwind_cpp_pr1",
3027 "__aeabi_unwind_cpp_pr2"
3029 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3030 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3031 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3032 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3033 = marked_pr_dependency
;
3037 /* Inline exception table entry. */
3038 md_number_to_chars (ptr
+ 4, val
, 4);
3040 /* Self relative offset of the table entry. */
3041 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3042 BFD_RELOC_ARM_PREL31
);
3044 /* Restore the original section. */
3045 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3049 /* Parse an unwind_cantunwind directive. */
3052 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3054 demand_empty_rest_of_line ();
3055 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3056 as_bad (_("personality routine specified for cantunwind frame"));
3058 unwind
.personality_index
= -2;
3062 /* Parse a personalityindex directive. */
3065 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3069 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3070 as_bad (_("duplicate .personalityindex directive"));
3074 if (exp
.X_op
!= O_constant
3075 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3077 as_bad (_("bad personality routine number"));
3078 ignore_rest_of_line ();
3082 unwind
.personality_index
= exp
.X_add_number
;
3084 demand_empty_rest_of_line ();
3088 /* Parse a personality directive. */
3091 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3095 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3096 as_bad (_("duplicate .personality directive"));
3098 name
= input_line_pointer
;
3099 c
= get_symbol_end ();
3100 p
= input_line_pointer
;
3101 unwind
.personality_routine
= symbol_find_or_make (name
);
3103 demand_empty_rest_of_line ();
3107 /* Parse a directive saving core registers. */
3110 s_arm_unwind_save_core (void)
3116 range
= parse_reg_list (&input_line_pointer
);
3119 as_bad (_("expected register list"));
3120 ignore_rest_of_line ();
3124 demand_empty_rest_of_line ();
3126 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3127 into .unwind_save {..., sp...}. We aren't bothered about the value of
3128 ip because it is clobbered by calls. */
3129 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3130 && (range
& 0x3000) == 0x1000)
3132 unwind
.opcode_count
--;
3133 unwind
.sp_restored
= 0;
3134 range
= (range
| 0x2000) & ~0x1000;
3135 unwind
.pending_offset
= 0;
3141 /* See if we can use the short opcodes. These pop a block of up to 8
3142 registers starting with r4, plus maybe r14. */
3143 for (n
= 0; n
< 8; n
++)
3145 /* Break at the first non-saved register. */
3146 if ((range
& (1 << (n
+ 4))) == 0)
3149 /* See if there are any other bits set. */
3150 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3152 /* Use the long form. */
3153 op
= 0x8000 | ((range
>> 4) & 0xfff);
3154 add_unwind_opcode (op
, 2);
3158 /* Use the short form. */
3160 op
= 0xa8; /* Pop r14. */
3162 op
= 0xa0; /* Do not pop r14. */
3164 add_unwind_opcode (op
, 1);
3171 op
= 0xb100 | (range
& 0xf);
3172 add_unwind_opcode (op
, 2);
3175 /* Record the number of bytes pushed. */
3176 for (n
= 0; n
< 16; n
++)
3178 if (range
& (1 << n
))
3179 unwind
.frame_size
+= 4;
3184 /* Parse a directive saving FPA registers. */
3187 s_arm_unwind_save_fpa (int reg
)
3193 /* Get Number of registers to transfer. */
3194 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3197 exp
.X_op
= O_illegal
;
3199 if (exp
.X_op
!= O_constant
)
3201 as_bad (_("expected , <constant>"));
3202 ignore_rest_of_line ();
3206 num_regs
= exp
.X_add_number
;
3208 if (num_regs
< 1 || num_regs
> 4)
3210 as_bad (_("number of registers must be in the range [1:4]"));
3211 ignore_rest_of_line ();
3215 demand_empty_rest_of_line ();
3220 op
= 0xb4 | (num_regs
- 1);
3221 add_unwind_opcode (op
, 1);
3226 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3227 add_unwind_opcode (op
, 2);
3229 unwind
.frame_size
+= num_regs
* 12;
3233 /* Parse a directive saving VFP registers. */
3236 s_arm_unwind_save_vfp (void)
3242 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3250 demand_empty_rest_of_line ();
3255 op
= 0xb8 | (count
- 1);
3256 add_unwind_opcode (op
, 1);
3261 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3262 add_unwind_opcode (op
, 2);
3264 unwind
.frame_size
+= count
* 8 + 4;
3268 /* Parse a directive saving iWMMXt data registers. */
3271 s_arm_unwind_save_mmxwr (void)
3279 if (*input_line_pointer
== '{')
3280 input_line_pointer
++;
3284 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3288 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3293 as_tsktsk (_("register list not in ascending order"));
3296 if (*input_line_pointer
== '-')
3298 input_line_pointer
++;
3299 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3302 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3305 else if (reg
>= hi_reg
)
3307 as_bad (_("bad register range"));
3310 for (; reg
< hi_reg
; reg
++)
3314 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3316 if (*input_line_pointer
== '}')
3317 input_line_pointer
++;
3319 demand_empty_rest_of_line ();
3321 /* Generate any deferred opcodes because we're going to be looking at
3323 flush_pending_unwind ();
3325 for (i
= 0; i
< 16; i
++)
3327 if (mask
& (1 << i
))
3328 unwind
.frame_size
+= 8;
3331 /* Attempt to combine with a previous opcode. We do this because gcc
3332 likes to output separate unwind directives for a single block of
3334 if (unwind
.opcode_count
> 0)
3336 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3337 if ((i
& 0xf8) == 0xc0)
3340 /* Only merge if the blocks are contiguous. */
3343 if ((mask
& 0xfe00) == (1 << 9))
3345 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3346 unwind
.opcode_count
--;
3349 else if (i
== 6 && unwind
.opcode_count
>= 2)
3351 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3355 op
= 0xffff << (reg
- 1);
3357 || ((mask
& op
) == (1u << (reg
- 1))))
3359 op
= (1 << (reg
+ i
+ 1)) - 1;
3360 op
&= ~((1 << reg
) - 1);
3362 unwind
.opcode_count
-= 2;
3369 /* We want to generate opcodes in the order the registers have been
3370 saved, ie. descending order. */
3371 for (reg
= 15; reg
>= -1; reg
--)
3373 /* Save registers in blocks. */
3375 || !(mask
& (1 << reg
)))
3377 /* We found an unsaved reg. Generate opcodes to save the
3378 preceeding block. */
3384 op
= 0xc0 | (hi_reg
- 10);
3385 add_unwind_opcode (op
, 1);
3390 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3391 add_unwind_opcode (op
, 2);
3400 ignore_rest_of_line ();
3404 s_arm_unwind_save_mmxwcg (void)
3411 if (*input_line_pointer
== '{')
3412 input_line_pointer
++;
3416 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3420 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3426 as_tsktsk (_("register list not in ascending order"));
3429 if (*input_line_pointer
== '-')
3431 input_line_pointer
++;
3432 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3435 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3438 else if (reg
>= hi_reg
)
3440 as_bad (_("bad register range"));
3443 for (; reg
< hi_reg
; reg
++)
3447 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3449 if (*input_line_pointer
== '}')
3450 input_line_pointer
++;
3452 demand_empty_rest_of_line ();
3454 /* Generate any deferred opcodes because we're going to be looking at
3456 flush_pending_unwind ();
3458 for (reg
= 0; reg
< 16; reg
++)
3460 if (mask
& (1 << reg
))
3461 unwind
.frame_size
+= 4;
3464 add_unwind_opcode (op
, 2);
3467 ignore_rest_of_line ();
3471 /* Parse an unwind_save directive. */
3474 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED
)
3477 struct reg_entry
*reg
;
3478 bfd_boolean had_brace
= FALSE
;
3480 /* Figure out what sort of save we have. */
3481 peek
= input_line_pointer
;
3489 reg
= arm_reg_parse_multi (&peek
);
3493 as_bad (_("register expected"));
3494 ignore_rest_of_line ();
3503 as_bad (_("FPA .unwind_save does not take a register list"));
3504 ignore_rest_of_line ();
3507 s_arm_unwind_save_fpa (reg
->number
);
3510 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3511 case REG_TYPE_VFD
: s_arm_unwind_save_vfp (); return;
3512 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3513 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3516 as_bad (_(".unwind_save does not support this kind of register"));
3517 ignore_rest_of_line ();
3522 /* Parse an unwind_movsp directive. */
3525 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3530 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3533 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3534 ignore_rest_of_line ();
3537 demand_empty_rest_of_line ();
3539 if (reg
== REG_SP
|| reg
== REG_PC
)
3541 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3545 if (unwind
.fp_reg
!= REG_SP
)
3546 as_bad (_("unexpected .unwind_movsp directive"));
3548 /* Generate opcode to restore the value. */
3550 add_unwind_opcode (op
, 1);
3552 /* Record the information for later. */
3553 unwind
.fp_reg
= reg
;
3554 unwind
.fp_offset
= unwind
.frame_size
;
3555 unwind
.sp_restored
= 1;
3558 /* Parse an unwind_pad directive. */
3561 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3565 if (immediate_for_directive (&offset
) == FAIL
)
3570 as_bad (_("stack increment must be multiple of 4"));
3571 ignore_rest_of_line ();
3575 /* Don't generate any opcodes, just record the details for later. */
3576 unwind
.frame_size
+= offset
;
3577 unwind
.pending_offset
+= offset
;
3579 demand_empty_rest_of_line ();
3582 /* Parse an unwind_setfp directive. */
3585 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3591 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3592 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3595 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3597 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3599 as_bad (_("expected <reg>, <reg>"));
3600 ignore_rest_of_line ();
3604 /* Optional constant. */
3605 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3607 if (immediate_for_directive (&offset
) == FAIL
)
3613 demand_empty_rest_of_line ();
3615 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3617 as_bad (_("register must be either sp or set by a previous"
3618 "unwind_movsp directive"));
3622 /* Don't generate any opcodes, just record the information for later. */
3623 unwind
.fp_reg
= fp_reg
;
3626 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3628 unwind
.fp_offset
-= offset
;
3631 /* Parse an unwind_raw directive. */
3634 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3637 /* This is an arbitrary limit. */
3638 unsigned char op
[16];
3642 if (exp
.X_op
== O_constant
3643 && skip_past_comma (&input_line_pointer
) != FAIL
)
3645 unwind
.frame_size
+= exp
.X_add_number
;
3649 exp
.X_op
= O_illegal
;
3651 if (exp
.X_op
!= O_constant
)
3653 as_bad (_("expected <offset>, <opcode>"));
3654 ignore_rest_of_line ();
3660 /* Parse the opcode. */
3665 as_bad (_("unwind opcode too long"));
3666 ignore_rest_of_line ();
3668 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3670 as_bad (_("invalid unwind opcode"));
3671 ignore_rest_of_line ();
3674 op
[count
++] = exp
.X_add_number
;
3676 /* Parse the next byte. */
3677 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3683 /* Add the opcode bytes in reverse order. */
3685 add_unwind_opcode (op
[count
], 1);
3687 demand_empty_rest_of_line ();
3691 /* Parse a .eabi_attribute directive. */
3694 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3697 bfd_boolean is_string
;
3704 if (exp
.X_op
!= O_constant
)
3707 tag
= exp
.X_add_number
;
3708 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3713 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3715 if (tag
== 32 || !is_string
)
3718 if (exp
.X_op
!= O_constant
)
3720 as_bad (_("expected numeric constant"));
3721 ignore_rest_of_line ();
3724 i
= exp
.X_add_number
;
3726 if (tag
== Tag_compatibility
3727 && skip_past_comma (&input_line_pointer
) == FAIL
)
3729 as_bad (_("expected comma"));
3730 ignore_rest_of_line ();
3735 skip_whitespace(input_line_pointer
);
3736 if (*input_line_pointer
!= '"')
3738 input_line_pointer
++;
3739 s
= input_line_pointer
;
3740 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3741 input_line_pointer
++;
3742 if (*input_line_pointer
!= '"')
3744 saved_char
= *input_line_pointer
;
3745 *input_line_pointer
= 0;
3753 if (tag
== Tag_compatibility
)
3754 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3756 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3758 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3762 *input_line_pointer
= saved_char
;
3763 input_line_pointer
++;
3765 demand_empty_rest_of_line ();
3768 as_bad (_("bad string constant"));
3769 ignore_rest_of_line ();
3772 as_bad (_("expected <tag> , <value>"));
3773 ignore_rest_of_line ();
3775 #endif /* OBJ_ELF */
3777 static void s_arm_arch (int);
3778 static void s_arm_cpu (int);
3779 static void s_arm_fpu (int);
3781 /* This table describes all the machine specific pseudo-ops the assembler
3782 has to support. The fields are:
3783 pseudo-op name without dot
3784 function to call to execute this pseudo-op
3785 Integer arg to pass to the function. */
3787 const pseudo_typeS md_pseudo_table
[] =
3789 /* Never called because '.req' does not start a line. */
3790 { "req", s_req
, 0 },
3791 /* Following two are likewise never called. */
3794 { "unreq", s_unreq
, 0 },
3795 { "bss", s_bss
, 0 },
3796 { "align", s_align
, 0 },
3797 { "arm", s_arm
, 0 },
3798 { "thumb", s_thumb
, 0 },
3799 { "code", s_code
, 0 },
3800 { "force_thumb", s_force_thumb
, 0 },
3801 { "thumb_func", s_thumb_func
, 0 },
3802 { "thumb_set", s_thumb_set
, 0 },
3803 { "even", s_even
, 0 },
3804 { "ltorg", s_ltorg
, 0 },
3805 { "pool", s_ltorg
, 0 },
3806 { "syntax", s_syntax
, 0 },
3807 { "cpu", s_arm_cpu
, 0 },
3808 { "arch", s_arm_arch
, 0 },
3809 { "fpu", s_arm_fpu
, 0 },
3811 { "word", s_arm_elf_cons
, 4 },
3812 { "long", s_arm_elf_cons
, 4 },
3813 { "rel31", s_arm_rel31
, 0 },
3814 { "fnstart", s_arm_unwind_fnstart
, 0 },
3815 { "fnend", s_arm_unwind_fnend
, 0 },
3816 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3817 { "personality", s_arm_unwind_personality
, 0 },
3818 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3819 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3820 { "save", s_arm_unwind_save
, 0 },
3821 { "movsp", s_arm_unwind_movsp
, 0 },
3822 { "pad", s_arm_unwind_pad
, 0 },
3823 { "setfp", s_arm_unwind_setfp
, 0 },
3824 { "unwind_raw", s_arm_unwind_raw
, 0 },
3825 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3829 { "extend", float_cons
, 'x' },
3830 { "ldouble", float_cons
, 'x' },
3831 { "packed", float_cons
, 'p' },
3835 /* Parser functions used exclusively in instruction operands. */
3837 /* Generic immediate-value read function for use in insn parsing.
3838 STR points to the beginning of the immediate (the leading #);
3839 VAL receives the value; if the value is outside [MIN, MAX]
3840 issue an error. PREFIX_OPT is true if the immediate prefix is
3844 parse_immediate (char **str
, int *val
, int min
, int max
,
3845 bfd_boolean prefix_opt
)
3848 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3849 if (exp
.X_op
!= O_constant
)
3851 inst
.error
= _("constant expression required");
3855 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
3857 inst
.error
= _("immediate value out of range");
3861 *val
= exp
.X_add_number
;
3865 /* Less-generic immediate-value read function with the possibility of loading a
3866 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3867 instructions. Puts the result directly in inst.operands[i]. */
3870 parse_big_immediate (char **str
, int i
)
3875 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
3877 if (exp
.X_op
== O_constant
)
3878 inst
.operands
[i
].imm
= exp
.X_add_number
;
3879 else if (exp
.X_op
== O_big
3880 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
3881 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
3883 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
3884 /* Bignums have their least significant bits in
3885 generic_bignum[0]. Make sure we put 32 bits in imm and
3886 32 bits in reg, in a (hopefully) portable way. */
3887 assert (parts
!= 0);
3888 inst
.operands
[i
].imm
= 0;
3889 for (j
= 0; j
< parts
; j
++, idx
++)
3890 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
3891 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3892 inst
.operands
[i
].reg
= 0;
3893 for (j
= 0; j
< parts
; j
++, idx
++)
3894 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
3895 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3896 inst
.operands
[i
].regisimm
= 1;
3906 /* Returns the pseudo-register number of an FPA immediate constant,
3907 or FAIL if there isn't a valid constant here. */
3910 parse_fpa_immediate (char ** str
)
3912 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3918 /* First try and match exact strings, this is to guarantee
3919 that some formats will work even for cross assembly. */
3921 for (i
= 0; fp_const
[i
]; i
++)
3923 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
3927 *str
+= strlen (fp_const
[i
]);
3928 if (is_end_of_line
[(unsigned char) **str
])
3934 /* Just because we didn't get a match doesn't mean that the constant
3935 isn't valid, just that it is in a format that we don't
3936 automatically recognize. Try parsing it with the standard
3937 expression routines. */
3939 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
3941 /* Look for a raw floating point number. */
3942 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
3943 && is_end_of_line
[(unsigned char) *save_in
])
3945 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3947 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3949 if (words
[j
] != fp_values
[i
][j
])
3953 if (j
== MAX_LITTLENUMS
)
3961 /* Try and parse a more complex expression, this will probably fail
3962 unless the code uses a floating point prefix (eg "0f"). */
3963 save_in
= input_line_pointer
;
3964 input_line_pointer
= *str
;
3965 if (expression (&exp
) == absolute_section
3966 && exp
.X_op
== O_big
3967 && exp
.X_add_number
< 0)
3969 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3971 if (gen_to_words (words
, 5, (long) 15) == 0)
3973 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3975 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3977 if (words
[j
] != fp_values
[i
][j
])
3981 if (j
== MAX_LITTLENUMS
)
3983 *str
= input_line_pointer
;
3984 input_line_pointer
= save_in
;
3991 *str
= input_line_pointer
;
3992 input_line_pointer
= save_in
;
3993 inst
.error
= _("invalid FPA immediate expression");
3997 /* Returns 1 if a number has "quarter-precision" float format
3998 0baBbbbbbc defgh000 00000000 00000000. */
4001 is_quarter_float (unsigned imm
)
4003 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4004 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4007 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4008 0baBbbbbbc defgh000 00000000 00000000.
4009 The minus-zero case needs special handling, since it can't be encoded in the
4010 "quarter-precision" float format, but can nonetheless be loaded as an integer
4014 parse_qfloat_immediate (char **ccp
, int *immed
)
4017 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4019 skip_past_char (&str
, '#');
4021 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4023 unsigned fpword
= 0;
4026 /* Our FP word must be 32 bits (single-precision FP). */
4027 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4029 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4033 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4046 /* Shift operands. */
4049 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4052 struct asm_shift_name
4055 enum shift_kind kind
;
4058 /* Third argument to parse_shift. */
4059 enum parse_shift_mode
4061 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4062 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4063 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4064 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4065 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4068 /* Parse a <shift> specifier on an ARM data processing instruction.
4069 This has three forms:
4071 (LSL|LSR|ASL|ASR|ROR) Rs
4072 (LSL|LSR|ASL|ASR|ROR) #imm
4075 Note that ASL is assimilated to LSL in the instruction encoding, and
4076 RRX to ROR #0 (which cannot be written as such). */
4079 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4081 const struct asm_shift_name
*shift_name
;
4082 enum shift_kind shift
;
4087 for (p
= *str
; ISALPHA (*p
); p
++)
4092 inst
.error
= _("shift expression expected");
4096 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4098 if (shift_name
== NULL
)
4100 inst
.error
= _("shift expression expected");
4104 shift
= shift_name
->kind
;
4108 case NO_SHIFT_RESTRICT
:
4109 case SHIFT_IMMEDIATE
: break;
4111 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4112 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4114 inst
.error
= _("'LSL' or 'ASR' required");
4119 case SHIFT_LSL_IMMEDIATE
:
4120 if (shift
!= SHIFT_LSL
)
4122 inst
.error
= _("'LSL' required");
4127 case SHIFT_ASR_IMMEDIATE
:
4128 if (shift
!= SHIFT_ASR
)
4130 inst
.error
= _("'ASR' required");
4138 if (shift
!= SHIFT_RRX
)
4140 /* Whitespace can appear here if the next thing is a bare digit. */
4141 skip_whitespace (p
);
4143 if (mode
== NO_SHIFT_RESTRICT
4144 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4146 inst
.operands
[i
].imm
= reg
;
4147 inst
.operands
[i
].immisreg
= 1;
4149 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4152 inst
.operands
[i
].shift_kind
= shift
;
4153 inst
.operands
[i
].shifted
= 1;
4158 /* Parse a <shifter_operand> for an ARM data processing instruction:
4161 #<immediate>, <rotate>
4165 where <shift> is defined by parse_shift above, and <rotate> is a
4166 multiple of 2 between 0 and 30. Validation of immediate operands
4167 is deferred to md_apply_fix. */
4170 parse_shifter_operand (char **str
, int i
)
4175 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4177 inst
.operands
[i
].reg
= value
;
4178 inst
.operands
[i
].isreg
= 1;
4180 /* parse_shift will override this if appropriate */
4181 inst
.reloc
.exp
.X_op
= O_constant
;
4182 inst
.reloc
.exp
.X_add_number
= 0;
4184 if (skip_past_comma (str
) == FAIL
)
4187 /* Shift operation on register. */
4188 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4191 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4194 if (skip_past_comma (str
) == SUCCESS
)
4196 /* #x, y -- ie explicit rotation by Y. */
4197 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4200 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4202 inst
.error
= _("constant expression expected");
4206 value
= expr
.X_add_number
;
4207 if (value
< 0 || value
> 30 || value
% 2 != 0)
4209 inst
.error
= _("invalid rotation");
4212 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4214 inst
.error
= _("invalid constant");
4218 /* Convert to decoded value. md_apply_fix will put it back. */
4219 inst
.reloc
.exp
.X_add_number
4220 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4221 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4224 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4225 inst
.reloc
.pc_rel
= 0;
4229 /* Parse all forms of an ARM address expression. Information is written
4230 to inst.operands[i] and/or inst.reloc.
4232 Preindexed addressing (.preind=1):
4234 [Rn, #offset] .reg=Rn .reloc.exp=offset
4235 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4236 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4237 .shift_kind=shift .reloc.exp=shift_imm
4239 These three may have a trailing ! which causes .writeback to be set also.
4241 Postindexed addressing (.postind=1, .writeback=1):
4243 [Rn], #offset .reg=Rn .reloc.exp=offset
4244 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4248 Unindexed addressing (.preind=0, .postind=0):
4250 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4254 [Rn]{!} shorthand for [Rn,#0]{!}
4255 =immediate .isreg=0 .reloc.exp=immediate
4256 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4258 It is the caller's responsibility to check for addressing modes not
4259 supported by the instruction, and to set inst.reloc.type. */
4262 parse_address (char **str
, int i
)
4267 if (skip_past_char (&p
, '[') == FAIL
)
4269 if (skip_past_char (&p
, '=') == FAIL
)
4271 /* bare address - translate to PC-relative offset */
4272 inst
.reloc
.pc_rel
= 1;
4273 inst
.operands
[i
].reg
= REG_PC
;
4274 inst
.operands
[i
].isreg
= 1;
4275 inst
.operands
[i
].preind
= 1;
4277 /* else a load-constant pseudo op, no special treatment needed here */
4279 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4286 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4288 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4291 inst
.operands
[i
].reg
= reg
;
4292 inst
.operands
[i
].isreg
= 1;
4294 if (skip_past_comma (&p
) == SUCCESS
)
4296 inst
.operands
[i
].preind
= 1;
4299 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4301 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4303 inst
.operands
[i
].imm
= reg
;
4304 inst
.operands
[i
].immisreg
= 1;
4306 if (skip_past_comma (&p
) == SUCCESS
)
4307 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4310 else if (skip_past_char (&p
, ':') == SUCCESS
)
4312 /* FIXME: '@' should be used here, but it's filtered out by generic
4313 code before we get to see it here. This may be subject to
4316 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4317 if (exp
.X_op
!= O_constant
)
4319 inst
.error
= _("alignment must be constant");
4322 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4323 inst
.operands
[i
].immisalign
= 1;
4324 /* Alignments are not pre-indexes. */
4325 inst
.operands
[i
].preind
= 0;
4329 if (inst
.operands
[i
].negative
)
4331 inst
.operands
[i
].negative
= 0;
4334 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4339 if (skip_past_char (&p
, ']') == FAIL
)
4341 inst
.error
= _("']' expected");
4345 if (skip_past_char (&p
, '!') == SUCCESS
)
4346 inst
.operands
[i
].writeback
= 1;
4348 else if (skip_past_comma (&p
) == SUCCESS
)
4350 if (skip_past_char (&p
, '{') == SUCCESS
)
4352 /* [Rn], {expr} - unindexed, with option */
4353 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4354 0, 255, TRUE
) == FAIL
)
4357 if (skip_past_char (&p
, '}') == FAIL
)
4359 inst
.error
= _("'}' expected at end of 'option' field");
4362 if (inst
.operands
[i
].preind
)
4364 inst
.error
= _("cannot combine index with option");
4372 inst
.operands
[i
].postind
= 1;
4373 inst
.operands
[i
].writeback
= 1;
4375 if (inst
.operands
[i
].preind
)
4377 inst
.error
= _("cannot combine pre- and post-indexing");
4382 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4384 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4386 /* We might be using the immediate for alignment already. If we
4387 are, OR the register number into the low-order bits. */
4388 if (inst
.operands
[i
].immisalign
)
4389 inst
.operands
[i
].imm
|= reg
;
4391 inst
.operands
[i
].imm
= reg
;
4392 inst
.operands
[i
].immisreg
= 1;
4394 if (skip_past_comma (&p
) == SUCCESS
)
4395 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4400 if (inst
.operands
[i
].negative
)
4402 inst
.operands
[i
].negative
= 0;
4405 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4411 /* If at this point neither .preind nor .postind is set, we have a
4412 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4413 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4415 inst
.operands
[i
].preind
= 1;
4416 inst
.reloc
.exp
.X_op
= O_constant
;
4417 inst
.reloc
.exp
.X_add_number
= 0;
4423 /* Miscellaneous. */
4425 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4426 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4428 parse_psr (char **str
)
4431 unsigned long psr_field
;
4432 const struct asm_psr
*psr
;
4435 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4436 feature for ease of use and backwards compatibility. */
4438 if (strncasecmp (p
, "SPSR", 4) == 0)
4439 psr_field
= SPSR_BIT
;
4440 else if (strncasecmp (p
, "CPSR", 4) == 0)
4447 while (ISALNUM (*p
) || *p
== '_');
4449 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4460 /* A suffix follows. */
4466 while (ISALNUM (*p
) || *p
== '_');
4468 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4472 psr_field
|= psr
->field
;
4477 goto error
; /* Garbage after "[CS]PSR". */
4479 psr_field
|= (PSR_c
| PSR_f
);
4485 inst
.error
= _("flag for {c}psr instruction expected");
4489 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4490 value suitable for splatting into the AIF field of the instruction. */
4493 parse_cps_flags (char **str
)
4502 case '\0': case ',':
4505 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4506 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4507 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4510 inst
.error
= _("unrecognized CPS flag");
4515 if (saw_a_flag
== 0)
4517 inst
.error
= _("missing CPS flags");
4525 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4526 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4529 parse_endian_specifier (char **str
)
4534 if (strncasecmp (s
, "BE", 2))
4536 else if (strncasecmp (s
, "LE", 2))
4540 inst
.error
= _("valid endian specifiers are be or le");
4544 if (ISALNUM (s
[2]) || s
[2] == '_')
4546 inst
.error
= _("valid endian specifiers are be or le");
4551 return little_endian
;
4554 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4555 value suitable for poking into the rotate field of an sxt or sxta
4556 instruction, or FAIL on error. */
4559 parse_ror (char **str
)
4564 if (strncasecmp (s
, "ROR", 3) == 0)
4568 inst
.error
= _("missing rotation field after comma");
4572 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
4577 case 0: *str
= s
; return 0x0;
4578 case 8: *str
= s
; return 0x1;
4579 case 16: *str
= s
; return 0x2;
4580 case 24: *str
= s
; return 0x3;
4583 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
4588 /* Parse a conditional code (from conds[] below). The value returned is in the
4589 range 0 .. 14, or FAIL. */
4591 parse_cond (char **str
)
4594 const struct asm_cond
*c
;
4597 while (ISALPHA (*q
))
4600 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
4603 inst
.error
= _("condition required");
4611 /* Parse an option for a barrier instruction. Returns the encoding for the
4614 parse_barrier (char **str
)
4617 const struct asm_barrier_opt
*o
;
4620 while (ISALPHA (*q
))
4623 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
4631 /* Parse the operands of a table branch instruction. Similar to a memory
4634 parse_tb (char **str
)
4639 if (skip_past_char (&p
, '[') == FAIL
)
4641 inst
.error
= _("'[' expected");
4645 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4647 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4650 inst
.operands
[0].reg
= reg
;
4652 if (skip_past_comma (&p
) == FAIL
)
4654 inst
.error
= _("',' expected");
4658 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4660 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4663 inst
.operands
[0].imm
= reg
;
4665 if (skip_past_comma (&p
) == SUCCESS
)
4667 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
4669 if (inst
.reloc
.exp
.X_add_number
!= 1)
4671 inst
.error
= _("invalid shift");
4674 inst
.operands
[0].shifted
= 1;
4677 if (skip_past_char (&p
, ']') == FAIL
)
4679 inst
.error
= _("']' expected");
4686 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4687 information on the types the operands can take and how they are encoded.
4688 Note particularly the abuse of ".regisimm" to signify a Neon register.
4689 Up to three operands may be read; this function handles setting the
4690 ".present" field for each operand itself.
4691 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4692 else returns FAIL. */
4695 parse_neon_mov (char **str
, int *which_operand
)
4697 int i
= *which_operand
, val
;
4698 enum arm_reg_type rtype
;
4700 struct neon_type_el optype
;
4702 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4704 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4705 inst
.operands
[i
].reg
= val
;
4706 inst
.operands
[i
].isscalar
= 1;
4707 inst
.operands
[i
].vectype
= optype
;
4708 inst
.operands
[i
++].present
= 1;
4710 if (skip_past_comma (&ptr
) == FAIL
)
4713 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4716 inst
.operands
[i
].reg
= val
;
4717 inst
.operands
[i
].isreg
= 1;
4718 inst
.operands
[i
].present
= 1;
4720 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4723 /* Cases 0, 1, 2, 3, 5 (D only). */
4724 if (skip_past_comma (&ptr
) == FAIL
)
4727 inst
.operands
[i
].reg
= val
;
4728 inst
.operands
[i
].isreg
= 1;
4729 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4730 inst
.operands
[i
].vectype
= optype
;
4731 inst
.operands
[i
++].present
= 1;
4733 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4735 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4736 inst
.operands
[i
-1].regisimm
= 1;
4737 inst
.operands
[i
].reg
= val
;
4738 inst
.operands
[i
].isreg
= 1;
4739 inst
.operands
[i
++].present
= 1;
4741 if (rtype
== REG_TYPE_NQ
)
4743 first_error (_("can't use Neon quad register here"));
4746 if (skip_past_comma (&ptr
) == FAIL
)
4748 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4750 inst
.operands
[i
].reg
= val
;
4751 inst
.operands
[i
].isreg
= 1;
4752 inst
.operands
[i
].present
= 1;
4754 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
4756 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4757 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4758 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4761 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
4763 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4764 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4765 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4768 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4771 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4772 Case 1: VMOV<c><q> <Dd>, <Dm> */
4773 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4776 inst
.operands
[i
].reg
= val
;
4777 inst
.operands
[i
].isreg
= 1;
4778 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4779 inst
.operands
[i
].vectype
= optype
;
4780 inst
.operands
[i
].present
= 1;
4784 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4788 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4791 inst
.operands
[i
].reg
= val
;
4792 inst
.operands
[i
].isreg
= 1;
4793 inst
.operands
[i
++].present
= 1;
4795 if (skip_past_comma (&ptr
) == FAIL
)
4798 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4800 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4801 inst
.operands
[i
].reg
= val
;
4802 inst
.operands
[i
].isscalar
= 1;
4803 inst
.operands
[i
].present
= 1;
4804 inst
.operands
[i
].vectype
= optype
;
4806 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4808 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4809 inst
.operands
[i
].reg
= val
;
4810 inst
.operands
[i
].isreg
= 1;
4811 inst
.operands
[i
++].present
= 1;
4813 if (skip_past_comma (&ptr
) == FAIL
)
4816 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFD
, NULL
, &optype
))
4819 first_error (_(reg_expected_msgs
[REG_TYPE_VFD
]));
4823 inst
.operands
[i
].reg
= val
;
4824 inst
.operands
[i
].isreg
= 1;
4825 inst
.operands
[i
].regisimm
= 1;
4826 inst
.operands
[i
].vectype
= optype
;
4827 inst
.operands
[i
].present
= 1;
4832 first_error (_("parse error"));
4836 /* Successfully parsed the operands. Update args. */
4842 first_error (_("expected comma"));
4846 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
4850 first_error (_("instruction cannot be conditionalized"));
4854 /* Matcher codes for parse_operands. */
4855 enum operand_parse_code
4857 OP_stop
, /* end of line */
4859 OP_RR
, /* ARM register */
4860 OP_RRnpc
, /* ARM register, not r15 */
4861 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
4862 OP_RRw
, /* ARM register, not r15, optional trailing ! */
4863 OP_RCP
, /* Coprocessor number */
4864 OP_RCN
, /* Coprocessor register */
4865 OP_RF
, /* FPA register */
4866 OP_RVS
, /* VFP single precision register */
4867 OP_RVD
, /* VFP double precision register (0..15) */
4868 OP_RND
, /* Neon double precision register (0..31) */
4869 OP_RNQ
, /* Neon quad precision register */
4870 OP_RNDQ
, /* Neon double or quad precision register */
4871 OP_RNSC
, /* Neon scalar D[X] */
4872 OP_RVC
, /* VFP control register */
4873 OP_RMF
, /* Maverick F register */
4874 OP_RMD
, /* Maverick D register */
4875 OP_RMFX
, /* Maverick FX register */
4876 OP_RMDX
, /* Maverick DX register */
4877 OP_RMAX
, /* Maverick AX register */
4878 OP_RMDS
, /* Maverick DSPSC register */
4879 OP_RIWR
, /* iWMMXt wR register */
4880 OP_RIWC
, /* iWMMXt wC register */
4881 OP_RIWG
, /* iWMMXt wCG register */
4882 OP_RXA
, /* XScale accumulator register */
4884 OP_REGLST
, /* ARM register list */
4885 OP_VRSLST
, /* VFP single-precision register list */
4886 OP_VRDLST
, /* VFP double-precision register list */
4887 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
4888 OP_NSTRLST
, /* Neon element/structure list */
4890 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4891 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
4892 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
4893 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
4894 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
4895 OP_VMOV
, /* Neon VMOV operands. */
4896 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
4897 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
4899 OP_I0
, /* immediate zero */
4900 OP_I7
, /* immediate value 0 .. 7 */
4901 OP_I15
, /* 0 .. 15 */
4902 OP_I16
, /* 1 .. 16 */
4903 OP_I16z
, /* 0 .. 16 */
4904 OP_I31
, /* 0 .. 31 */
4905 OP_I31w
, /* 0 .. 31, optional trailing ! */
4906 OP_I32
, /* 1 .. 32 */
4907 OP_I32z
, /* 0 .. 32 */
4908 OP_I63
, /* 0 .. 63 */
4909 OP_I63s
, /* -64 .. 63 */
4910 OP_I64
, /* 1 .. 64 */
4911 OP_I64z
, /* 0 .. 64 */
4912 OP_I255
, /* 0 .. 255 */
4913 OP_Iffff
, /* 0 .. 65535 */
4915 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
4916 OP_I7b
, /* 0 .. 7 */
4917 OP_I15b
, /* 0 .. 15 */
4918 OP_I31b
, /* 0 .. 31 */
4920 OP_SH
, /* shifter operand */
4921 OP_ADDR
, /* Memory address expression (any mode) */
4922 OP_EXP
, /* arbitrary expression */
4923 OP_EXPi
, /* same, with optional immediate prefix */
4924 OP_EXPr
, /* same, with optional relocation suffix */
4926 OP_CPSF
, /* CPS flags */
4927 OP_ENDI
, /* Endianness specifier */
4928 OP_PSR
, /* CPSR/SPSR mask for msr */
4929 OP_COND
, /* conditional code */
4930 OP_TB
, /* Table branch. */
4932 OP_RRnpc_I0
, /* ARM register or literal 0 */
4933 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
4934 OP_RR_EXi
, /* ARM register or expression with imm prefix */
4935 OP_RF_IF
, /* FPA register or immediate */
4936 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
4938 /* Optional operands. */
4939 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
4940 OP_oI31b
, /* 0 .. 31 */
4941 OP_oI32b
, /* 1 .. 32 */
4942 OP_oIffffb
, /* 0 .. 65535 */
4943 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
4945 OP_oRR
, /* ARM register */
4946 OP_oRRnpc
, /* ARM register, not the PC */
4947 OP_oRND
, /* Optional Neon double precision register */
4948 OP_oRNQ
, /* Optional Neon quad precision register */
4949 OP_oRNDQ
, /* Optional Neon double or quad precision register */
4950 OP_oSHll
, /* LSL immediate */
4951 OP_oSHar
, /* ASR immediate */
4952 OP_oSHllar
, /* LSL or ASR immediate */
4953 OP_oROR
, /* ROR 0/8/16/24 */
4954 OP_oBARRIER
, /* Option argument for a barrier instruction. */
4956 OP_FIRST_OPTIONAL
= OP_oI7b
4959 /* Generic instruction operand parser. This does no encoding and no
4960 semantic validation; it merely squirrels values away in the inst
4961 structure. Returns SUCCESS or FAIL depending on whether the
4962 specified grammar matched. */
4964 parse_operands (char *str
, const unsigned char *pattern
)
4966 unsigned const char *upat
= pattern
;
4967 char *backtrack_pos
= 0;
4968 const char *backtrack_error
= 0;
4969 int i
, val
, backtrack_index
= 0;
4970 enum arm_reg_type rtype
;
4972 #define po_char_or_fail(chr) do { \
4973 if (skip_past_char (&str, chr) == FAIL) \
4977 #define po_reg_or_fail(regtype) do { \
4978 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4979 &inst.operands[i].vectype); \
4982 first_error (_(reg_expected_msgs[regtype])); \
4985 inst.operands[i].reg = val; \
4986 inst.operands[i].isreg = 1; \
4987 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4990 #define po_reg_or_goto(regtype, label) do { \
4991 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4992 &inst.operands[i].vectype); \
4996 inst.operands[i].reg = val; \
4997 inst.operands[i].isreg = 1; \
4998 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5001 #define po_imm_or_fail(min, max, popt) do { \
5002 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5004 inst.operands[i].imm = val; \
5007 #define po_scalar_or_goto(elsz, label) do { \
5008 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isscalar = 1; \
5015 #define po_misc_or_fail(expr) do { \
5020 skip_whitespace (str
);
5022 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5024 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5026 /* Remember where we are in case we need to backtrack. */
5027 assert (!backtrack_pos
);
5028 backtrack_pos
= str
;
5029 backtrack_error
= inst
.error
;
5030 backtrack_index
= i
;
5034 po_char_or_fail (',');
5042 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5043 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5044 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5045 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5046 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5047 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5049 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5050 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5051 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5052 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5053 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5054 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5055 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5056 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5057 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5058 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5059 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5060 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5062 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5064 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5066 /* Neon scalar. Using an element size of 8 means that some invalid
5067 scalars are accepted here, so deal with those in later code. */
5068 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5070 /* WARNING: We can expand to two operands here. This has the potential
5071 to totally confuse the backtracking mechanism! It will be OK at
5072 least as long as we don't try to use optional args as well,
5076 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5078 skip_past_comma (&str
);
5079 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5082 /* Optional register operand was omitted. Unfortunately, it's in
5083 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5084 here (this is a bit grotty). */
5085 inst
.operands
[i
] = inst
.operands
[i
-1];
5086 inst
.operands
[i
-1].present
= 0;
5089 /* Immediate gets verified properly later, so accept any now. */
5090 po_imm_or_fail (INT_MIN
, INT_MAX
, TRUE
);
5096 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5099 po_imm_or_fail (0, 0, TRUE
);
5105 po_scalar_or_goto (8, try_rr
);
5108 po_reg_or_fail (REG_TYPE_RN
);
5114 po_scalar_or_goto (8, try_ndq
);
5117 po_reg_or_fail (REG_TYPE_NDQ
);
5123 po_scalar_or_goto (8, try_vfd
);
5126 po_reg_or_fail (REG_TYPE_VFD
);
5131 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5132 not careful then bad things might happen. */
5133 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5138 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5141 /* There's a possibility of getting a 64-bit immediate here, so
5142 we need special handling. */
5143 if (parse_big_immediate (&str
, i
) == FAIL
)
5145 inst
.error
= _("immediate value is out of range");
5153 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5156 po_imm_or_fail (0, 63, TRUE
);
5161 po_char_or_fail ('[');
5162 po_reg_or_fail (REG_TYPE_RN
);
5163 po_char_or_fail (']');
5167 po_reg_or_fail (REG_TYPE_RN
);
5168 if (skip_past_char (&str
, '!') == SUCCESS
)
5169 inst
.operands
[i
].writeback
= 1;
5173 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5174 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5175 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5176 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5177 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5178 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5179 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5180 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5181 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5182 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5183 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5184 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5185 case OP_Iffff
: po_imm_or_fail ( 0, 0xffff, FALSE
); break;
5187 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5189 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5190 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5192 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5193 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5194 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5196 /* Immediate variants */
5198 po_char_or_fail ('{');
5199 po_imm_or_fail (0, 255, TRUE
);
5200 po_char_or_fail ('}');
5204 /* The expression parser chokes on a trailing !, so we have
5205 to find it first and zap it. */
5208 while (*s
&& *s
!= ',')
5213 inst
.operands
[i
].writeback
= 1;
5215 po_imm_or_fail (0, 31, TRUE
);
5223 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5228 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5233 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5235 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5237 val
= parse_reloc (&str
);
5240 inst
.error
= _("unrecognized relocation suffix");
5243 else if (val
!= BFD_RELOC_UNUSED
)
5245 inst
.operands
[i
].imm
= val
;
5246 inst
.operands
[i
].hasreloc
= 1;
5251 /* Register or expression */
5252 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5253 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5255 /* Register or immediate */
5256 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5257 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5259 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5261 if (!is_immediate_prefix (*str
))
5264 val
= parse_fpa_immediate (&str
);
5267 /* FPA immediates are encoded as registers 8-15.
5268 parse_fpa_immediate has already applied the offset. */
5269 inst
.operands
[i
].reg
= val
;
5270 inst
.operands
[i
].isreg
= 1;
5273 /* Two kinds of register */
5276 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5277 if (rege
->type
!= REG_TYPE_MMXWR
5278 && rege
->type
!= REG_TYPE_MMXWC
5279 && rege
->type
!= REG_TYPE_MMXWCG
)
5281 inst
.error
= _("iWMMXt data or control register expected");
5284 inst
.operands
[i
].reg
= rege
->number
;
5285 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5290 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5291 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5292 case OP_oROR
: val
= parse_ror (&str
); break;
5293 case OP_PSR
: val
= parse_psr (&str
); break;
5294 case OP_COND
: val
= parse_cond (&str
); break;
5295 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5298 po_misc_or_fail (parse_tb (&str
));
5301 /* Register lists */
5303 val
= parse_reg_list (&str
);
5306 inst
.operands
[1].writeback
= 1;
5312 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5316 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5320 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5325 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5326 &inst
.operands
[i
].vectype
);
5329 /* Addressing modes */
5331 po_misc_or_fail (parse_address (&str
, i
));
5335 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5339 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5343 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5347 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5351 as_fatal ("unhandled operand code %d", upat
[i
]);
5354 /* Various value-based sanity checks and shared operations. We
5355 do not signal immediate failures for the register constraints;
5356 this allows a syntax error to take precedence. */
5364 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
5365 inst
.error
= BAD_PC
;
5381 inst
.operands
[i
].imm
= val
;
5388 /* If we get here, this operand was successfully parsed. */
5389 inst
.operands
[i
].present
= 1;
5393 inst
.error
= BAD_ARGS
;
5398 /* The parse routine should already have set inst.error, but set a
5399 defaut here just in case. */
5401 inst
.error
= _("syntax error");
5405 /* Do not backtrack over a trailing optional argument that
5406 absorbed some text. We will only fail again, with the
5407 'garbage following instruction' error message, which is
5408 probably less helpful than the current one. */
5409 if (backtrack_index
== i
&& backtrack_pos
!= str
5410 && upat
[i
+1] == OP_stop
)
5413 inst
.error
= _("syntax error");
5417 /* Try again, skipping the optional argument at backtrack_pos. */
5418 str
= backtrack_pos
;
5419 inst
.error
= backtrack_error
;
5420 inst
.operands
[backtrack_index
].present
= 0;
5421 i
= backtrack_index
;
5425 /* Check that we have parsed all the arguments. */
5426 if (*str
!= '\0' && !inst
.error
)
5427 inst
.error
= _("garbage following instruction");
5429 return inst
.error
? FAIL
: SUCCESS
;
5432 #undef po_char_or_fail
5433 #undef po_reg_or_fail
5434 #undef po_reg_or_goto
5435 #undef po_imm_or_fail
5436 #undef po_scalar_or_fail
5438 /* Shorthand macro for instruction encoding functions issuing errors. */
5439 #define constraint(expr, err) do { \
5447 /* Functions for operand encoding. ARM, then Thumb. */
5449 #define rotate_left(v, n) (v << n | v >> (32 - n))
5451 /* If VAL can be encoded in the immediate field of an ARM instruction,
5452 return the encoded form. Otherwise, return FAIL. */
5455 encode_arm_immediate (unsigned int val
)
5459 for (i
= 0; i
< 32; i
+= 2)
5460 if ((a
= rotate_left (val
, i
)) <= 0xff)
5461 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
5466 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5469 encode_thumb32_immediate (unsigned int val
)
5476 for (i
= 1; i
<= 24; i
++)
5479 if ((val
& ~(0xff << i
)) == 0)
5480 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
5484 if (val
== ((a
<< 16) | a
))
5486 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
5490 if (val
== ((a
<< 16) | a
))
5491 return 0x200 | (a
>> 8);
5495 /* Encode a VFP SP or DP register number into inst.instruction. */
5498 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
5500 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
5503 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
5506 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
5509 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
5514 first_error (_("D register out of range for selected VFP version"));
5522 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
5526 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
5530 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
5534 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
5538 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
5542 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
5550 /* Encode a <shift> in an ARM-format instruction. The immediate,
5551 if any, is handled by md_apply_fix. */
5553 encode_arm_shift (int i
)
5555 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5556 inst
.instruction
|= SHIFT_ROR
<< 5;
5559 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5560 if (inst
.operands
[i
].immisreg
)
5562 inst
.instruction
|= SHIFT_BY_REG
;
5563 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
5566 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5571 encode_arm_shifter_operand (int i
)
5573 if (inst
.operands
[i
].isreg
)
5575 inst
.instruction
|= inst
.operands
[i
].reg
;
5576 encode_arm_shift (i
);
5579 inst
.instruction
|= INST_IMMEDIATE
;
5582 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5584 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
5586 assert (inst
.operands
[i
].isreg
);
5587 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5589 if (inst
.operands
[i
].preind
)
5593 inst
.error
= _("instruction does not accept preindexed addressing");
5596 inst
.instruction
|= PRE_INDEX
;
5597 if (inst
.operands
[i
].writeback
)
5598 inst
.instruction
|= WRITE_BACK
;
5601 else if (inst
.operands
[i
].postind
)
5603 assert (inst
.operands
[i
].writeback
);
5605 inst
.instruction
|= WRITE_BACK
;
5607 else /* unindexed - only for coprocessor */
5609 inst
.error
= _("instruction does not accept unindexed addressing");
5613 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
5614 && (((inst
.instruction
& 0x000f0000) >> 16)
5615 == ((inst
.instruction
& 0x0000f000) >> 12)))
5616 as_warn ((inst
.instruction
& LOAD_BIT
)
5617 ? _("destination register same as write-back base")
5618 : _("source register same as write-back base"));
5621 /* inst.operands[i] was set up by parse_address. Encode it into an
5622 ARM-format mode 2 load or store instruction. If is_t is true,
5623 reject forms that cannot be used with a T instruction (i.e. not
5626 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
5628 encode_arm_addr_mode_common (i
, is_t
);
5630 if (inst
.operands
[i
].immisreg
)
5632 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
5633 inst
.instruction
|= inst
.operands
[i
].imm
;
5634 if (!inst
.operands
[i
].negative
)
5635 inst
.instruction
|= INDEX_UP
;
5636 if (inst
.operands
[i
].shifted
)
5638 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5639 inst
.instruction
|= SHIFT_ROR
<< 5;
5642 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5643 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5647 else /* immediate offset in inst.reloc */
5649 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5650 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
5654 /* inst.operands[i] was set up by parse_address. Encode it into an
5655 ARM-format mode 3 load or store instruction. Reject forms that
5656 cannot be used with such instructions. If is_t is true, reject
5657 forms that cannot be used with a T instruction (i.e. not
5660 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
5662 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
5664 inst
.error
= _("instruction does not accept scaled register index");
5668 encode_arm_addr_mode_common (i
, is_t
);
5670 if (inst
.operands
[i
].immisreg
)
5672 inst
.instruction
|= inst
.operands
[i
].imm
;
5673 if (!inst
.operands
[i
].negative
)
5674 inst
.instruction
|= INDEX_UP
;
5676 else /* immediate offset in inst.reloc */
5678 inst
.instruction
|= HWOFFSET_IMM
;
5679 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5680 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
5684 /* inst.operands[i] was set up by parse_address. Encode it into an
5685 ARM-format instruction. Reject all forms which cannot be encoded
5686 into a coprocessor load/store instruction. If wb_ok is false,
5687 reject use of writeback; if unind_ok is false, reject use of
5688 unindexed addressing. If reloc_override is not 0, use it instead
5689 of BFD_ARM_CP_OFF_IMM. */
5692 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
5694 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5696 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
5698 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
5700 assert (!inst
.operands
[i
].writeback
);
5703 inst
.error
= _("instruction does not support unindexed addressing");
5706 inst
.instruction
|= inst
.operands
[i
].imm
;
5707 inst
.instruction
|= INDEX_UP
;
5711 if (inst
.operands
[i
].preind
)
5712 inst
.instruction
|= PRE_INDEX
;
5714 if (inst
.operands
[i
].writeback
)
5716 if (inst
.operands
[i
].reg
== REG_PC
)
5718 inst
.error
= _("pc may not be used with write-back");
5723 inst
.error
= _("instruction does not support writeback");
5726 inst
.instruction
|= WRITE_BACK
;
5730 inst
.reloc
.type
= reloc_override
;
5731 else if (thumb_mode
)
5732 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
5734 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
5738 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5739 Determine whether it can be performed with a move instruction; if
5740 it can, convert inst.instruction to that move instruction and
5741 return 1; if it can't, convert inst.instruction to a literal-pool
5742 load and return 0. If this is not a valid thing to do in the
5743 current context, set inst.error and return 1.
5745 inst.operands[i] describes the destination register. */
5748 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
5753 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
5757 if ((inst
.instruction
& tbit
) == 0)
5759 inst
.error
= _("invalid pseudo operation");
5762 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
5764 inst
.error
= _("constant expression expected");
5767 if (inst
.reloc
.exp
.X_op
== O_constant
)
5771 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
5773 /* This can be done with a mov(1) instruction. */
5774 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
5775 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
5781 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
5784 /* This can be done with a mov instruction. */
5785 inst
.instruction
&= LITERAL_MASK
;
5786 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
5787 inst
.instruction
|= value
& 0xfff;
5791 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
5794 /* This can be done with a mvn instruction. */
5795 inst
.instruction
&= LITERAL_MASK
;
5796 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
5797 inst
.instruction
|= value
& 0xfff;
5803 if (add_to_lit_pool () == FAIL
)
5805 inst
.error
= _("literal pool insertion failed");
5808 inst
.operands
[1].reg
= REG_PC
;
5809 inst
.operands
[1].isreg
= 1;
5810 inst
.operands
[1].preind
= 1;
5811 inst
.reloc
.pc_rel
= 1;
5812 inst
.reloc
.type
= (thumb_p
5813 ? BFD_RELOC_ARM_THUMB_OFFSET
5815 ? BFD_RELOC_ARM_HWLITERAL
5816 : BFD_RELOC_ARM_LITERAL
));
5820 /* Functions for instruction encoding, sorted by subarchitecture.
5821 First some generics; their names are taken from the conventional
5822 bit positions for register arguments in ARM format instructions. */
5832 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5839 inst
.instruction
|= inst
.operands
[1].reg
;
5845 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5846 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5852 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
5853 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5859 unsigned Rn
= inst
.operands
[2].reg
;
5860 /* Enforce restrictions on SWP instruction. */
5861 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
5862 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
5863 _("Rn must not overlap other operands"));
5864 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5865 inst
.instruction
|= inst
.operands
[1].reg
;
5866 inst
.instruction
|= Rn
<< 16;
5872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5873 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5874 inst
.instruction
|= inst
.operands
[2].reg
;
5880 inst
.instruction
|= inst
.operands
[0].reg
;
5881 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5882 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
5888 inst
.instruction
|= inst
.operands
[0].imm
;
5894 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5895 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
5898 /* ARM instructions, in alphabetical order by function name (except
5899 that wrapper functions appear immediately after the function they
5902 /* This is a pseudo-op of the form "adr rd, label" to be converted
5903 into a relative address of the form "add rd, pc, #label-.-8". */
5908 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5910 /* Frag hacking will turn this into a sub instruction if the offset turns
5911 out to be negative. */
5912 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5913 inst
.reloc
.pc_rel
= 1;
5914 inst
.reloc
.exp
.X_add_number
-= 8;
5917 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5918 into a relative address of the form:
5919 add rd, pc, #low(label-.-8)"
5920 add rd, rd, #high(label-.-8)" */
5925 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5927 /* Frag hacking will turn this into a sub instruction if the offset turns
5928 out to be negative. */
5929 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
5930 inst
.reloc
.pc_rel
= 1;
5931 inst
.size
= INSN_SIZE
* 2;
5932 inst
.reloc
.exp
.X_add_number
-= 8;
5938 if (!inst
.operands
[1].present
)
5939 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
5940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5942 encode_arm_shifter_operand (2);
5948 if (inst
.operands
[0].present
)
5950 constraint ((inst
.instruction
& 0xf0) != 0x40
5951 && inst
.operands
[0].imm
!= 0xf,
5952 "bad barrier type");
5953 inst
.instruction
|= inst
.operands
[0].imm
;
5956 inst
.instruction
|= 0xf;
5962 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
5963 constraint (msb
> 32, _("bit-field extends past end of register"));
5964 /* The instruction encoding stores the LSB and MSB,
5965 not the LSB and width. */
5966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5967 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
5968 inst
.instruction
|= (msb
- 1) << 16;
5976 /* #0 in second position is alternative syntax for bfc, which is
5977 the same instruction but with REG_PC in the Rm field. */
5978 if (!inst
.operands
[1].isreg
)
5979 inst
.operands
[1].reg
= REG_PC
;
5981 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
5982 constraint (msb
> 32, _("bit-field extends past end of register"));
5983 /* The instruction encoding stores the LSB and MSB,
5984 not the LSB and width. */
5985 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5986 inst
.instruction
|= inst
.operands
[1].reg
;
5987 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5988 inst
.instruction
|= (msb
- 1) << 16;
5994 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
5995 _("bit-field extends past end of register"));
5996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5997 inst
.instruction
|= inst
.operands
[1].reg
;
5998 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5999 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6002 /* ARM V5 breakpoint instruction (argument parse)
6003 BKPT <16 bit unsigned immediate>
6004 Instruction is not conditional.
6005 The bit pattern given in insns[] has the COND_ALWAYS condition,
6006 and it is an error if the caller tried to override that. */
6011 /* Top 12 of 16 bits to bits 19:8. */
6012 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6014 /* Bottom 4 of 16 bits to bits 3:0. */
6015 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6019 encode_branch (int default_reloc
)
6021 if (inst
.operands
[0].hasreloc
)
6023 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6024 _("the only suffix valid here is '(plt)'"));
6025 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6029 inst
.reloc
.type
= default_reloc
;
6031 inst
.reloc
.pc_rel
= 1;
6038 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6039 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6042 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6049 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6051 if (inst
.cond
== COND_ALWAYS
)
6052 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6058 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6061 /* ARM V5 branch-link-exchange instruction (argument parse)
6062 BLX <target_addr> ie BLX(1)
6063 BLX{<condition>} <Rm> ie BLX(2)
6064 Unfortunately, there are two different opcodes for this mnemonic.
6065 So, the insns[].value is not used, and the code here zaps values
6066 into inst.instruction.
6067 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6072 if (inst
.operands
[0].isreg
)
6074 /* Arg is a register; the opcode provided by insns[] is correct.
6075 It is not illegal to do "blx pc", just useless. */
6076 if (inst
.operands
[0].reg
== REG_PC
)
6077 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6079 inst
.instruction
|= inst
.operands
[0].reg
;
6083 /* Arg is an address; this instruction cannot be executed
6084 conditionally, and the opcode must be adjusted. */
6085 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6086 inst
.instruction
= 0xfa000000;
6088 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6089 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6092 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6099 if (inst
.operands
[0].reg
== REG_PC
)
6100 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6102 inst
.instruction
|= inst
.operands
[0].reg
;
6106 /* ARM v5TEJ. Jump to Jazelle code. */
6111 if (inst
.operands
[0].reg
== REG_PC
)
6112 as_tsktsk (_("use of r15 in bxj is not really useful"));
6114 inst
.instruction
|= inst
.operands
[0].reg
;
6117 /* Co-processor data operation:
6118 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6119 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6123 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6124 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6125 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6126 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6127 inst
.instruction
|= inst
.operands
[4].reg
;
6128 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6134 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6135 encode_arm_shifter_operand (1);
6138 /* Transfer between coprocessor and ARM registers.
6139 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6144 No special properties. */
6149 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6150 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6151 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6152 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6153 inst
.instruction
|= inst
.operands
[4].reg
;
6154 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6157 /* Transfer between coprocessor register and pair of ARM registers.
6158 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6163 Two XScale instructions are special cases of these:
6165 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6166 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6168 Result unpredicatable if Rd or Rn is R15. */
6173 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6174 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6175 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6176 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6177 inst
.instruction
|= inst
.operands
[4].reg
;
6183 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6184 inst
.instruction
|= inst
.operands
[1].imm
;
6190 inst
.instruction
|= inst
.operands
[0].imm
;
6196 /* There is no IT instruction in ARM mode. We
6197 process it but do not generate code for it. */
6204 int base_reg
= inst
.operands
[0].reg
;
6205 int range
= inst
.operands
[1].imm
;
6207 inst
.instruction
|= base_reg
<< 16;
6208 inst
.instruction
|= range
;
6210 if (inst
.operands
[1].writeback
)
6211 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6213 if (inst
.operands
[0].writeback
)
6215 inst
.instruction
|= WRITE_BACK
;
6216 /* Check for unpredictable uses of writeback. */
6217 if (inst
.instruction
& LOAD_BIT
)
6219 /* Not allowed in LDM type 2. */
6220 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6221 && ((range
& (1 << REG_PC
)) == 0))
6222 as_warn (_("writeback of base register is UNPREDICTABLE"));
6223 /* Only allowed if base reg not in list for other types. */
6224 else if (range
& (1 << base_reg
))
6225 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6229 /* Not allowed for type 2. */
6230 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6231 as_warn (_("writeback of base register is UNPREDICTABLE"));
6232 /* Only allowed if base reg not in list, or first in list. */
6233 else if ((range
& (1 << base_reg
))
6234 && (range
& ((1 << base_reg
) - 1)))
6235 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6240 /* ARMv5TE load-consecutive (argument parse)
6249 constraint (inst
.operands
[0].reg
% 2 != 0,
6250 _("first destination register must be even"));
6251 constraint (inst
.operands
[1].present
6252 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6253 _("can only load two consecutive registers"));
6254 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6255 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6257 if (!inst
.operands
[1].present
)
6258 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6260 if (inst
.instruction
& LOAD_BIT
)
6262 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6263 register and the first register written; we have to diagnose
6264 overlap between the base and the second register written here. */
6266 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6267 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6268 as_warn (_("base register written back, and overlaps "
6269 "second destination register"));
6271 /* For an index-register load, the index register must not overlap the
6272 destination (even if not write-back). */
6273 else if (inst
.operands
[2].immisreg
6274 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6275 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6276 as_warn (_("index register overlaps destination register"));
6279 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6280 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6286 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6287 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6288 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6289 || inst
.operands
[1].negative
6290 /* This can arise if the programmer has written
6292 or if they have mistakenly used a register name as the last
6295 It is very difficult to distinguish between these two cases
6296 because "rX" might actually be a label. ie the register
6297 name has been occluded by a symbol of the same name. So we
6298 just generate a general 'bad addressing mode' type error
6299 message and leave it up to the programmer to discover the
6300 true cause and fix their mistake. */
6301 || (inst
.operands
[1].reg
== REG_PC
),
6304 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6305 || inst
.reloc
.exp
.X_add_number
!= 0,
6306 _("offset must be zero in ARM encoding"));
6308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6309 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6310 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6316 constraint (inst
.operands
[0].reg
% 2 != 0,
6317 _("even register required"));
6318 constraint (inst
.operands
[1].present
6319 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6320 _("can only load two consecutive registers"));
6321 /* If op 1 were present and equal to PC, this function wouldn't
6322 have been called in the first place. */
6323 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6326 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6333 if (!inst
.operands
[1].isreg
)
6334 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6336 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6342 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6344 if (inst
.operands
[1].preind
)
6346 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6347 inst
.reloc
.exp
.X_add_number
!= 0,
6348 _("this instruction requires a post-indexed address"));
6350 inst
.operands
[1].preind
= 0;
6351 inst
.operands
[1].postind
= 1;
6352 inst
.operands
[1].writeback
= 1;
6354 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6355 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
6358 /* Halfword and signed-byte load/store operations. */
6363 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6364 if (!inst
.operands
[1].isreg
)
6365 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
6367 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
6373 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6375 if (inst
.operands
[1].preind
)
6377 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6378 inst
.reloc
.exp
.X_add_number
!= 0,
6379 _("this instruction requires a post-indexed address"));
6381 inst
.operands
[1].preind
= 0;
6382 inst
.operands
[1].postind
= 1;
6383 inst
.operands
[1].writeback
= 1;
6385 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6386 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
6389 /* Co-processor register load/store.
6390 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6394 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6395 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6396 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6402 /* This restriction does not apply to mls (nor to mla in v6, but
6403 that's hard to detect at present). */
6404 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6405 && !(inst
.instruction
& 0x00400000))
6406 as_tsktsk (_("rd and rm should be different in mla"));
6408 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6409 inst
.instruction
|= inst
.operands
[1].reg
;
6410 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6411 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6419 encode_arm_shifter_operand (1);
6422 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6426 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6427 /* The value is in two pieces: 0:11, 16:19. */
6428 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00000fff);
6429 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0000f000) << 4;
6435 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6436 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
6438 _("'CPSR' or 'SPSR' expected"));
6439 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6440 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
6443 /* Two possible forms:
6444 "{C|S}PSR_<field>, Rm",
6445 "{C|S}PSR_f, #expression". */
6450 inst
.instruction
|= inst
.operands
[0].imm
;
6451 if (inst
.operands
[1].isreg
)
6452 inst
.instruction
|= inst
.operands
[1].reg
;
6455 inst
.instruction
|= INST_IMMEDIATE
;
6456 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6457 inst
.reloc
.pc_rel
= 0;
6464 if (!inst
.operands
[2].present
)
6465 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
6466 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6467 inst
.instruction
|= inst
.operands
[1].reg
;
6468 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6470 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6471 as_tsktsk (_("rd and rm should be different in mul"));
6474 /* Long Multiply Parser
6475 UMULL RdLo, RdHi, Rm, Rs
6476 SMULL RdLo, RdHi, Rm, Rs
6477 UMLAL RdLo, RdHi, Rm, Rs
6478 SMLAL RdLo, RdHi, Rm, Rs. */
6483 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6485 inst
.instruction
|= inst
.operands
[2].reg
;
6486 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6488 /* rdhi, rdlo and rm must all be different. */
6489 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6490 || inst
.operands
[0].reg
== inst
.operands
[2].reg
6491 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
6492 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6498 if (inst
.operands
[0].present
)
6500 /* Architectural NOP hints are CPSR sets with no bits selected. */
6501 inst
.instruction
&= 0xf0000000;
6502 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
6506 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6507 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6508 Condition defaults to COND_ALWAYS.
6509 Error if Rd, Rn or Rm are R15. */
6514 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6515 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6516 inst
.instruction
|= inst
.operands
[2].reg
;
6517 if (inst
.operands
[3].present
)
6518 encode_arm_shift (3);
6521 /* ARM V6 PKHTB (Argument Parse). */
6526 if (!inst
.operands
[3].present
)
6528 /* If the shift specifier is omitted, turn the instruction
6529 into pkhbt rd, rm, rn. */
6530 inst
.instruction
&= 0xfff00010;
6531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6532 inst
.instruction
|= inst
.operands
[1].reg
;
6533 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6537 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6538 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6539 inst
.instruction
|= inst
.operands
[2].reg
;
6540 encode_arm_shift (3);
6544 /* ARMv5TE: Preload-Cache
6548 Syntactically, like LDR with B=1, W=0, L=1. */
6553 constraint (!inst
.operands
[0].isreg
,
6554 _("'[' expected after PLD mnemonic"));
6555 constraint (inst
.operands
[0].postind
,
6556 _("post-indexed expression used in preload instruction"));
6557 constraint (inst
.operands
[0].writeback
,
6558 _("writeback used in preload instruction"));
6559 constraint (!inst
.operands
[0].preind
,
6560 _("unindexed addressing used in preload instruction"));
6561 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6564 /* ARMv7: PLI <addr_mode> */
6568 constraint (!inst
.operands
[0].isreg
,
6569 _("'[' expected after PLI mnemonic"));
6570 constraint (inst
.operands
[0].postind
,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst
.operands
[0].writeback
,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst
.operands
[0].preind
,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6577 inst
.instruction
&= ~PRE_INDEX
;
6583 inst
.operands
[1] = inst
.operands
[0];
6584 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
6585 inst
.operands
[0].isreg
= 1;
6586 inst
.operands
[0].writeback
= 1;
6587 inst
.operands
[0].reg
= REG_SP
;
6591 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6592 word at the specified address and the following word
6594 Unconditionally executed.
6595 Error if Rn is R15. */
6600 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6601 if (inst
.operands
[0].writeback
)
6602 inst
.instruction
|= WRITE_BACK
;
6605 /* ARM V6 ssat (argument parse). */
6610 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6611 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
6612 inst
.instruction
|= inst
.operands
[2].reg
;
6614 if (inst
.operands
[3].present
)
6615 encode_arm_shift (3);
6618 /* ARM V6 usat (argument parse). */
6623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6624 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6625 inst
.instruction
|= inst
.operands
[2].reg
;
6627 if (inst
.operands
[3].present
)
6628 encode_arm_shift (3);
6631 /* ARM V6 ssat16 (argument parse). */
6636 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6637 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
6638 inst
.instruction
|= inst
.operands
[2].reg
;
6644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6645 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6646 inst
.instruction
|= inst
.operands
[2].reg
;
6649 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6650 preserving the other bits.
6652 setend <endian_specifier>, where <endian_specifier> is either
6658 if (inst
.operands
[0].imm
)
6659 inst
.instruction
|= 0x200;
6665 unsigned int Rm
= (inst
.operands
[1].present
6666 ? inst
.operands
[1].reg
6667 : inst
.operands
[0].reg
);
6669 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6670 inst
.instruction
|= Rm
;
6671 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
6673 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6674 inst
.instruction
|= SHIFT_BY_REG
;
6677 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6683 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
6684 inst
.reloc
.pc_rel
= 0;
6690 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
6691 inst
.reloc
.pc_rel
= 0;
6694 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6695 SMLAxy{cond} Rd,Rm,Rs,Rn
6696 SMLAWy{cond} Rd,Rm,Rs,Rn
6697 Error if any register is R15. */
6702 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6703 inst
.instruction
|= inst
.operands
[1].reg
;
6704 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6705 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6708 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6709 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6710 Error if any register is R15.
6711 Warning if Rdlo == Rdhi. */
6716 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6717 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6718 inst
.instruction
|= inst
.operands
[2].reg
;
6719 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6721 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6722 as_tsktsk (_("rdhi and rdlo must be different"));
6725 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6726 SMULxy{cond} Rd,Rm,Rs
6727 Error if any register is R15. */
6732 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6733 inst
.instruction
|= inst
.operands
[1].reg
;
6734 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6737 /* ARM V6 srs (argument parse). */
6742 inst
.instruction
|= inst
.operands
[0].imm
;
6743 if (inst
.operands
[0].writeback
)
6744 inst
.instruction
|= WRITE_BACK
;
6747 /* ARM V6 strex (argument parse). */
6752 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
6753 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
6754 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
6755 || inst
.operands
[2].negative
6756 /* See comment in do_ldrex(). */
6757 || (inst
.operands
[2].reg
== REG_PC
),
6760 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6761 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
6763 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6764 || inst
.reloc
.exp
.X_add_number
!= 0,
6765 _("offset must be zero in ARM encoding"));
6767 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6768 inst
.instruction
|= inst
.operands
[1].reg
;
6769 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6770 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6776 constraint (inst
.operands
[1].reg
% 2 != 0,
6777 _("even register required"));
6778 constraint (inst
.operands
[2].present
6779 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
6780 _("can only store two consecutive registers"));
6781 /* If op 2 were present and equal to PC, this function wouldn't
6782 have been called in the first place. */
6783 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
6785 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6786 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
6787 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
6790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6791 inst
.instruction
|= inst
.operands
[1].reg
;
6792 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6795 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6796 extends it to 32-bits, and adds the result to a value in another
6797 register. You can specify a rotation by 0, 8, 16, or 24 bits
6798 before extracting the 16-bit value.
6799 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6800 Condition defaults to COND_ALWAYS.
6801 Error if any register uses R15. */
6806 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6807 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6808 inst
.instruction
|= inst
.operands
[2].reg
;
6809 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
6814 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6821 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6822 inst
.instruction
|= inst
.operands
[1].reg
;
6823 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
6826 /* VFP instructions. In a logical order: SP variant first, monad
6827 before dyad, arithmetic then move then load/store. */
6830 do_vfp_sp_monadic (void)
6832 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6833 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6837 do_vfp_sp_dyadic (void)
6839 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6840 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6841 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6845 do_vfp_sp_compare_z (void)
6847 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6851 do_vfp_dp_sp_cvt (void)
6853 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6854 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6858 do_vfp_sp_dp_cvt (void)
6860 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6861 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6865 do_vfp_reg_from_sp (void)
6867 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6868 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6872 do_vfp_reg2_from_sp2 (void)
6874 constraint (inst
.operands
[2].imm
!= 2,
6875 _("only two consecutive VFP SP registers allowed here"));
6876 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6877 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6878 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6882 do_vfp_sp_from_reg (void)
6884 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
6885 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6889 do_vfp_sp2_from_reg2 (void)
6891 constraint (inst
.operands
[0].imm
!= 2,
6892 _("only two consecutive VFP SP registers allowed here"));
6893 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
6894 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6895 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6899 do_vfp_sp_ldst (void)
6901 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6902 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6906 do_vfp_dp_ldst (void)
6908 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6909 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6914 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
6916 if (inst
.operands
[0].writeback
)
6917 inst
.instruction
|= WRITE_BACK
;
6919 constraint (ldstm_type
!= VFP_LDSTMIA
,
6920 _("this addressing mode requires base-register writeback"));
6921 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6922 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
6923 inst
.instruction
|= inst
.operands
[1].imm
;
6927 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
6931 if (inst
.operands
[0].writeback
)
6932 inst
.instruction
|= WRITE_BACK
;
6934 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
6935 _("this addressing mode requires base-register writeback"));
6937 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6938 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6940 count
= inst
.operands
[1].imm
<< 1;
6941 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
6944 inst
.instruction
|= count
;
6948 do_vfp_sp_ldstmia (void)
6950 vfp_sp_ldstm (VFP_LDSTMIA
);
6954 do_vfp_sp_ldstmdb (void)
6956 vfp_sp_ldstm (VFP_LDSTMDB
);
6960 do_vfp_dp_ldstmia (void)
6962 vfp_dp_ldstm (VFP_LDSTMIA
);
6966 do_vfp_dp_ldstmdb (void)
6968 vfp_dp_ldstm (VFP_LDSTMDB
);
6972 do_vfp_xp_ldstmia (void)
6974 vfp_dp_ldstm (VFP_LDSTMIAX
);
6978 do_vfp_xp_ldstmdb (void)
6980 vfp_dp_ldstm (VFP_LDSTMDBX
);
6984 do_vfp_dp_rd_rm (void)
6986 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6987 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6991 do_vfp_dp_rn_rd (void)
6993 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
6994 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6998 do_vfp_dp_rd_rn (void)
7000 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7001 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7005 do_vfp_dp_rd_rn_rm (void)
7007 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7008 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7009 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7015 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7019 do_vfp_dp_rm_rd_rn (void)
7021 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7022 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7023 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7026 /* VFPv3 instructions. */
7028 do_vfp_sp_const (void)
7030 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7031 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7032 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7036 do_vfp_dp_const (void)
7038 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7039 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7040 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7044 vfp_conv (int srcsize
)
7046 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7047 inst
.instruction
|= (immbits
& 1) << 5;
7048 inst
.instruction
|= (immbits
>> 1);
7052 do_vfp_sp_conv_16 (void)
7054 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7059 do_vfp_dp_conv_16 (void)
7061 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7066 do_vfp_sp_conv_32 (void)
7068 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7073 do_vfp_dp_conv_32 (void)
7075 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7080 /* FPA instructions. Also in a logical order. */
7085 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7086 inst
.instruction
|= inst
.operands
[1].reg
;
7090 do_fpa_ldmstm (void)
7092 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7093 switch (inst
.operands
[1].imm
)
7095 case 1: inst
.instruction
|= CP_T_X
; break;
7096 case 2: inst
.instruction
|= CP_T_Y
; break;
7097 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7102 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7104 /* The instruction specified "ea" or "fd", so we can only accept
7105 [Rn]{!}. The instruction does not really support stacking or
7106 unstacking, so we have to emulate these by setting appropriate
7107 bits and offsets. */
7108 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7109 || inst
.reloc
.exp
.X_add_number
!= 0,
7110 _("this instruction does not support indexing"));
7112 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7113 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7115 if (!(inst
.instruction
& INDEX_UP
))
7116 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7118 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7120 inst
.operands
[2].preind
= 0;
7121 inst
.operands
[2].postind
= 1;
7125 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7128 /* iWMMXt instructions: strictly in alphabetical order. */
7131 do_iwmmxt_tandorc (void)
7133 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7137 do_iwmmxt_textrc (void)
7139 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7140 inst
.instruction
|= inst
.operands
[1].imm
;
7144 do_iwmmxt_textrm (void)
7146 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7147 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7148 inst
.instruction
|= inst
.operands
[2].imm
;
7152 do_iwmmxt_tinsr (void)
7154 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7155 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7156 inst
.instruction
|= inst
.operands
[2].imm
;
7160 do_iwmmxt_tmia (void)
7162 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7163 inst
.instruction
|= inst
.operands
[1].reg
;
7164 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7168 do_iwmmxt_waligni (void)
7170 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7171 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7172 inst
.instruction
|= inst
.operands
[2].reg
;
7173 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7177 do_iwmmxt_wmov (void)
7179 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7180 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7181 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7182 inst
.instruction
|= inst
.operands
[1].reg
;
7186 do_iwmmxt_wldstbh (void)
7189 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7191 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7193 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7194 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7198 do_iwmmxt_wldstw (void)
7200 /* RIWR_RIWC clears .isreg for a control register. */
7201 if (!inst
.operands
[0].isreg
)
7203 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7204 inst
.instruction
|= 0xf0000000;
7207 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7208 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7212 do_iwmmxt_wldstd (void)
7214 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7215 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7219 do_iwmmxt_wshufh (void)
7221 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7222 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7223 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7224 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7228 do_iwmmxt_wzero (void)
7230 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7231 inst
.instruction
|= inst
.operands
[0].reg
;
7232 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7233 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7236 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7237 operations first, then control, shift, and load/store. */
7239 /* Insns like "foo X,Y,Z". */
7242 do_mav_triple (void)
7244 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7245 inst
.instruction
|= inst
.operands
[1].reg
;
7246 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7249 /* Insns like "foo W,X,Y,Z".
7250 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7255 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7256 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7257 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7258 inst
.instruction
|= inst
.operands
[3].reg
;
7261 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7265 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7268 /* Maverick shift immediate instructions.
7269 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7270 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7275 int imm
= inst
.operands
[2].imm
;
7277 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7278 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7280 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7281 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7282 Bit 4 should be 0. */
7283 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
7285 inst
.instruction
|= imm
;
7288 /* XScale instructions. Also sorted arithmetic before move. */
7290 /* Xscale multiply-accumulate (argument parse)
7293 MIAxycc acc0,Rm,Rs. */
7298 inst
.instruction
|= inst
.operands
[1].reg
;
7299 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7302 /* Xscale move-accumulator-register (argument parse)
7304 MARcc acc0,RdLo,RdHi. */
7309 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7310 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7313 /* Xscale move-register-accumulator (argument parse)
7315 MRAcc RdLo,RdHi,acc0. */
7320 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
7321 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7322 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7325 /* Encoding functions relevant only to Thumb. */
7327 /* inst.operands[i] is a shifted-register operand; encode
7328 it into inst.instruction in the format used by Thumb32. */
7331 encode_thumb32_shifted_operand (int i
)
7333 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
7334 unsigned int shift
= inst
.operands
[i
].shift_kind
;
7336 constraint (inst
.operands
[i
].immisreg
,
7337 _("shift by register not allowed in thumb mode"));
7338 inst
.instruction
|= inst
.operands
[i
].reg
;
7339 if (shift
== SHIFT_RRX
)
7340 inst
.instruction
|= SHIFT_ROR
<< 4;
7343 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7344 _("expression too complex"));
7346 constraint (value
> 32
7347 || (value
== 32 && (shift
== SHIFT_LSL
7348 || shift
== SHIFT_ROR
)),
7349 _("shift expression is too large"));
7353 else if (value
== 32)
7356 inst
.instruction
|= shift
<< 4;
7357 inst
.instruction
|= (value
& 0x1c) << 10;
7358 inst
.instruction
|= (value
& 0x03) << 6;
7363 /* inst.operands[i] was set up by parse_address. Encode it into a
7364 Thumb32 format load or store instruction. Reject forms that cannot
7365 be used with such instructions. If is_t is true, reject forms that
7366 cannot be used with a T instruction; if is_d is true, reject forms
7367 that cannot be used with a D instruction. */
7370 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
7372 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7374 constraint (!inst
.operands
[i
].isreg
,
7375 _("Instruction does not support =N addresses"));
7377 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7378 if (inst
.operands
[i
].immisreg
)
7380 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
7381 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
7382 constraint (inst
.operands
[i
].negative
,
7383 _("Thumb does not support negative register indexing"));
7384 constraint (inst
.operands
[i
].postind
,
7385 _("Thumb does not support register post-indexing"));
7386 constraint (inst
.operands
[i
].writeback
,
7387 _("Thumb does not support register indexing with writeback"));
7388 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
7389 _("Thumb supports only LSL in shifted register indexing"));
7391 inst
.instruction
|= inst
.operands
[i
].imm
;
7392 if (inst
.operands
[i
].shifted
)
7394 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7395 _("expression too complex"));
7396 constraint (inst
.reloc
.exp
.X_add_number
< 0
7397 || inst
.reloc
.exp
.X_add_number
> 3,
7398 _("shift out of range"));
7399 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7401 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7403 else if (inst
.operands
[i
].preind
)
7405 constraint (is_pc
&& inst
.operands
[i
].writeback
,
7406 _("cannot use writeback with PC-relative addressing"));
7407 constraint (is_t
&& inst
.operands
[i
].writeback
,
7408 _("cannot use writeback with this instruction"));
7412 inst
.instruction
|= 0x01000000;
7413 if (inst
.operands
[i
].writeback
)
7414 inst
.instruction
|= 0x00200000;
7418 inst
.instruction
|= 0x00000c00;
7419 if (inst
.operands
[i
].writeback
)
7420 inst
.instruction
|= 0x00000100;
7422 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7424 else if (inst
.operands
[i
].postind
)
7426 assert (inst
.operands
[i
].writeback
);
7427 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
7428 constraint (is_t
, _("cannot use post-indexing with this instruction"));
7431 inst
.instruction
|= 0x00200000;
7433 inst
.instruction
|= 0x00000900;
7434 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7436 else /* unindexed - only for coprocessor */
7437 inst
.error
= _("instruction does not accept unindexed addressing");
7440 /* Table of Thumb instructions which exist in both 16- and 32-bit
7441 encodings (the latter only in post-V6T2 cores). The index is the
7442 value used in the insns table below. When there is more than one
7443 possible 16-bit encoding for the instruction, this table always
7445 Also contains several pseudo-instructions used during relaxation. */
7446 #define T16_32_TAB \
7447 X(adc, 4140, eb400000), \
7448 X(adcs, 4140, eb500000), \
7449 X(add, 1c00, eb000000), \
7450 X(adds, 1c00, eb100000), \
7451 X(addi, 0000, f1000000), \
7452 X(addis, 0000, f1100000), \
7453 X(add_pc,000f, f20f0000), \
7454 X(add_sp,000d, f10d0000), \
7455 X(adr, 000f, f20f0000), \
7456 X(and, 4000, ea000000), \
7457 X(ands, 4000, ea100000), \
7458 X(asr, 1000, fa40f000), \
7459 X(asrs, 1000, fa50f000), \
7460 X(b, e000, f000b000), \
7461 X(bcond, d000, f0008000), \
7462 X(bic, 4380, ea200000), \
7463 X(bics, 4380, ea300000), \
7464 X(cmn, 42c0, eb100f00), \
7465 X(cmp, 2800, ebb00f00), \
7466 X(cpsie, b660, f3af8400), \
7467 X(cpsid, b670, f3af8600), \
7468 X(cpy, 4600, ea4f0000), \
7469 X(dec_sp,80dd, f1bd0d00), \
7470 X(eor, 4040, ea800000), \
7471 X(eors, 4040, ea900000), \
7472 X(inc_sp,00dd, f10d0d00), \
7473 X(ldmia, c800, e8900000), \
7474 X(ldr, 6800, f8500000), \
7475 X(ldrb, 7800, f8100000), \
7476 X(ldrh, 8800, f8300000), \
7477 X(ldrsb, 5600, f9100000), \
7478 X(ldrsh, 5e00, f9300000), \
7479 X(ldr_pc,4800, f85f0000), \
7480 X(ldr_pc2,4800, f85f0000), \
7481 X(ldr_sp,9800, f85d0000), \
7482 X(lsl, 0000, fa00f000), \
7483 X(lsls, 0000, fa10f000), \
7484 X(lsr, 0800, fa20f000), \
7485 X(lsrs, 0800, fa30f000), \
7486 X(mov, 2000, ea4f0000), \
7487 X(movs, 2000, ea5f0000), \
7488 X(mul, 4340, fb00f000), \
7489 X(muls, 4340, ffffffff), /* no 32b muls */ \
7490 X(mvn, 43c0, ea6f0000), \
7491 X(mvns, 43c0, ea7f0000), \
7492 X(neg, 4240, f1c00000), /* rsb #0 */ \
7493 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7494 X(orr, 4300, ea400000), \
7495 X(orrs, 4300, ea500000), \
7496 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7497 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7498 X(rev, ba00, fa90f080), \
7499 X(rev16, ba40, fa90f090), \
7500 X(revsh, bac0, fa90f0b0), \
7501 X(ror, 41c0, fa60f000), \
7502 X(rors, 41c0, fa70f000), \
7503 X(sbc, 4180, eb600000), \
7504 X(sbcs, 4180, eb700000), \
7505 X(stmia, c000, e8800000), \
7506 X(str, 6000, f8400000), \
7507 X(strb, 7000, f8000000), \
7508 X(strh, 8000, f8200000), \
7509 X(str_sp,9000, f84d0000), \
7510 X(sub, 1e00, eba00000), \
7511 X(subs, 1e00, ebb00000), \
7512 X(subi, 8000, f1a00000), \
7513 X(subis, 8000, f1b00000), \
7514 X(sxtb, b240, fa4ff080), \
7515 X(sxth, b200, fa0ff080), \
7516 X(tst, 4200, ea100f00), \
7517 X(uxtb, b2c0, fa5ff080), \
7518 X(uxth, b280, fa1ff080), \
7519 X(nop, bf00, f3af8000), \
7520 X(yield, bf10, f3af8001), \
7521 X(wfe, bf20, f3af8002), \
7522 X(wfi, bf30, f3af8003), \
7523 X(sev, bf40, f3af9004), /* typo, 8004? */
7525 /* To catch errors in encoding functions, the codes are all offset by
7526 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7527 as 16-bit instructions. */
7528 #define X(a,b,c) T_MNEM_##a
7529 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
7532 #define X(a,b,c) 0x##b
7533 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
7534 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7537 #define X(a,b,c) 0x##c
7538 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
7539 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7540 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7544 /* Thumb instruction encoders, in alphabetical order. */
7548 do_t_add_sub_w (void)
7552 Rd
= inst
.operands
[0].reg
;
7553 Rn
= inst
.operands
[1].reg
;
7555 constraint (Rd
== 15, _("PC not allowed as destination"));
7556 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
7557 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7560 /* Parse an add or subtract instruction. We get here with inst.instruction
7561 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7568 Rd
= inst
.operands
[0].reg
;
7569 Rs
= (inst
.operands
[1].present
7570 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7571 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7579 flags
= (inst
.instruction
== T_MNEM_adds
7580 || inst
.instruction
== T_MNEM_subs
);
7582 narrow
= (current_it_mask
== 0);
7584 narrow
= (current_it_mask
!= 0);
7585 if (!inst
.operands
[2].isreg
)
7588 if (inst
.size_req
!= 4)
7592 add
= (inst
.instruction
== T_MNEM_add
7593 || inst
.instruction
== T_MNEM_adds
);
7594 /* Attempt to use a narrow opcode, with relaxation if
7596 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
7597 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
7598 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
7599 opcode
= T_MNEM_add_sp
;
7600 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
7601 opcode
= T_MNEM_add_pc
;
7602 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
7605 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
7607 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
7611 inst
.instruction
= THUMB_OP16(opcode
);
7612 inst
.instruction
|= (Rd
<< 4) | Rs
;
7613 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7614 if (inst
.size_req
!= 2)
7615 inst
.relax
= opcode
;
7618 constraint (inst
.size_req
== 2, BAD_HIREG
);
7620 if (inst
.size_req
== 4
7621 || (inst
.size_req
!= 2 && !opcode
))
7623 /* ??? Convert large immediates to addw/subw. */
7624 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7625 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7626 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7627 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7628 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7633 Rn
= inst
.operands
[2].reg
;
7634 /* See if we can do this with a 16-bit instruction. */
7635 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
7637 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7642 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
7643 || inst
.instruction
== T_MNEM_add
)
7646 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7650 if (inst
.instruction
== T_MNEM_add
)
7654 inst
.instruction
= T_OPCODE_ADD_HI
;
7655 inst
.instruction
|= (Rd
& 8) << 4;
7656 inst
.instruction
|= (Rd
& 7);
7657 inst
.instruction
|= Rn
<< 3;
7660 /* ... because addition is commutative! */
7663 inst
.instruction
= T_OPCODE_ADD_HI
;
7664 inst
.instruction
|= (Rd
& 8) << 4;
7665 inst
.instruction
|= (Rd
& 7);
7666 inst
.instruction
|= Rs
<< 3;
7671 /* If we get here, it can't be done in 16 bits. */
7672 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
7673 _("shift must be constant"));
7674 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7675 inst
.instruction
|= Rd
<< 8;
7676 inst
.instruction
|= Rs
<< 16;
7677 encode_thumb32_shifted_operand (2);
7682 constraint (inst
.instruction
== T_MNEM_adds
7683 || inst
.instruction
== T_MNEM_subs
,
7686 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
7688 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
7689 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
7692 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7694 inst
.instruction
|= (Rd
<< 4) | Rs
;
7695 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7699 Rn
= inst
.operands
[2].reg
;
7700 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
7702 /* We now have Rd, Rs, and Rn set to registers. */
7703 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7705 /* Can't do this for SUB. */
7706 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
7707 inst
.instruction
= T_OPCODE_ADD_HI
;
7708 inst
.instruction
|= (Rd
& 8) << 4;
7709 inst
.instruction
|= (Rd
& 7);
7711 inst
.instruction
|= Rn
<< 3;
7713 inst
.instruction
|= Rs
<< 3;
7715 constraint (1, _("dest must overlap one source register"));
7719 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7720 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
7721 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7729 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
7731 /* Defer to section relaxation. */
7732 inst
.relax
= inst
.instruction
;
7733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7734 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7736 else if (unified_syntax
&& inst
.size_req
!= 2)
7738 /* Generate a 32-bit opcode. */
7739 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7740 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7741 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
7742 inst
.reloc
.pc_rel
= 1;
7746 /* Generate a 16-bit opcode. */
7747 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7748 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7749 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
7750 inst
.reloc
.pc_rel
= 1;
7752 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7756 /* Arithmetic instructions for which there is just one 16-bit
7757 instruction encoding, and it allows only two low registers.
7758 For maximal compatibility with ARM syntax, we allow three register
7759 operands even when Thumb-32 instructions are not available, as long
7760 as the first two are identical. For instance, both "sbc r0,r1" and
7761 "sbc r0,r0,r1" are allowed. */
7767 Rd
= inst
.operands
[0].reg
;
7768 Rs
= (inst
.operands
[1].present
7769 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7770 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7771 Rn
= inst
.operands
[2].reg
;
7775 if (!inst
.operands
[2].isreg
)
7777 /* For an immediate, we always generate a 32-bit opcode;
7778 section relaxation will shrink it later if possible. */
7779 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7780 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7781 inst
.instruction
|= Rd
<< 8;
7782 inst
.instruction
|= Rs
<< 16;
7783 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7789 /* See if we can do this with a 16-bit instruction. */
7790 if (THUMB_SETS_FLAGS (inst
.instruction
))
7791 narrow
= current_it_mask
== 0;
7793 narrow
= current_it_mask
!= 0;
7795 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7797 if (inst
.operands
[2].shifted
)
7799 if (inst
.size_req
== 4)
7805 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7806 inst
.instruction
|= Rd
;
7807 inst
.instruction
|= Rn
<< 3;
7811 /* If we get here, it can't be done in 16 bits. */
7812 constraint (inst
.operands
[2].shifted
7813 && inst
.operands
[2].immisreg
,
7814 _("shift must be constant"));
7815 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7816 inst
.instruction
|= Rd
<< 8;
7817 inst
.instruction
|= Rs
<< 16;
7818 encode_thumb32_shifted_operand (2);
7823 /* On its face this is a lie - the instruction does set the
7824 flags. However, the only supported mnemonic in this mode
7826 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7828 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7829 _("unshifted register required"));
7830 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7831 constraint (Rd
!= Rs
,
7832 _("dest and source1 must be the same register"));
7834 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7835 inst
.instruction
|= Rd
;
7836 inst
.instruction
|= Rn
<< 3;
7840 /* Similarly, but for instructions where the arithmetic operation is
7841 commutative, so we can allow either of them to be different from
7842 the destination operand in a 16-bit instruction. For instance, all
7843 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7850 Rd
= inst
.operands
[0].reg
;
7851 Rs
= (inst
.operands
[1].present
7852 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7853 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7854 Rn
= inst
.operands
[2].reg
;
7858 if (!inst
.operands
[2].isreg
)
7860 /* For an immediate, we always generate a 32-bit opcode;
7861 section relaxation will shrink it later if possible. */
7862 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7863 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7864 inst
.instruction
|= Rd
<< 8;
7865 inst
.instruction
|= Rs
<< 16;
7866 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7872 /* See if we can do this with a 16-bit instruction. */
7873 if (THUMB_SETS_FLAGS (inst
.instruction
))
7874 narrow
= current_it_mask
== 0;
7876 narrow
= current_it_mask
!= 0;
7878 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7880 if (inst
.operands
[2].shifted
)
7882 if (inst
.size_req
== 4)
7889 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7890 inst
.instruction
|= Rd
;
7891 inst
.instruction
|= Rn
<< 3;
7896 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7897 inst
.instruction
|= Rd
;
7898 inst
.instruction
|= Rs
<< 3;
7903 /* If we get here, it can't be done in 16 bits. */
7904 constraint (inst
.operands
[2].shifted
7905 && inst
.operands
[2].immisreg
,
7906 _("shift must be constant"));
7907 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7908 inst
.instruction
|= Rd
<< 8;
7909 inst
.instruction
|= Rs
<< 16;
7910 encode_thumb32_shifted_operand (2);
7915 /* On its face this is a lie - the instruction does set the
7916 flags. However, the only supported mnemonic in this mode
7918 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7920 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7921 _("unshifted register required"));
7922 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7924 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7925 inst
.instruction
|= Rd
;
7928 inst
.instruction
|= Rn
<< 3;
7930 inst
.instruction
|= Rs
<< 3;
7932 constraint (1, _("dest must overlap one source register"));
7939 if (inst
.operands
[0].present
)
7941 constraint ((inst
.instruction
& 0xf0) != 0x40
7942 && inst
.operands
[0].imm
!= 0xf,
7943 "bad barrier type");
7944 inst
.instruction
|= inst
.operands
[0].imm
;
7947 inst
.instruction
|= 0xf;
7953 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7954 constraint (msb
> 32, _("bit-field extends past end of register"));
7955 /* The instruction encoding stores the LSB and MSB,
7956 not the LSB and width. */
7957 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7958 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
7959 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
7960 inst
.instruction
|= msb
- 1;
7968 /* #0 in second position is alternative syntax for bfc, which is
7969 the same instruction but with REG_PC in the Rm field. */
7970 if (!inst
.operands
[1].isreg
)
7971 inst
.operands
[1].reg
= REG_PC
;
7973 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7974 constraint (msb
> 32, _("bit-field extends past end of register"));
7975 /* The instruction encoding stores the LSB and MSB,
7976 not the LSB and width. */
7977 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7978 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7979 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7980 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7981 inst
.instruction
|= msb
- 1;
7987 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7988 _("bit-field extends past end of register"));
7989 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7990 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7991 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7992 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7993 inst
.instruction
|= inst
.operands
[3].imm
- 1;
7996 /* ARM V5 Thumb BLX (argument parse)
7997 BLX <target_addr> which is BLX(1)
7998 BLX <Rm> which is BLX(2)
7999 Unfortunately, there are two different opcodes for this mnemonic.
8000 So, the insns[].value is not used, and the code here zaps values
8001 into inst.instruction.
8003 ??? How to take advantage of the additional two bits of displacement
8004 available in Thumb32 mode? Need new relocation? */
8009 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8010 if (inst
.operands
[0].isreg
)
8011 /* We have a register, so this is BLX(2). */
8012 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8015 /* No register. This must be BLX(1). */
8016 inst
.instruction
= 0xf000e800;
8018 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8019 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8022 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8023 inst
.reloc
.pc_rel
= 1;
8033 if (current_it_mask
)
8035 /* Conditional branches inside IT blocks are encoded as unconditional
8038 /* A branch must be the last instruction in an IT block. */
8039 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8044 if (cond
!= COND_ALWAYS
)
8045 opcode
= T_MNEM_bcond
;
8047 opcode
= inst
.instruction
;
8049 if (unified_syntax
&& inst
.size_req
== 4)
8051 inst
.instruction
= THUMB_OP32(opcode
);
8052 if (cond
== COND_ALWAYS
)
8053 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8056 assert (cond
!= 0xF);
8057 inst
.instruction
|= cond
<< 22;
8058 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8063 inst
.instruction
= THUMB_OP16(opcode
);
8064 if (cond
== COND_ALWAYS
)
8065 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8068 inst
.instruction
|= cond
<< 8;
8069 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8071 /* Allow section relaxation. */
8072 if (unified_syntax
&& inst
.size_req
!= 2)
8073 inst
.relax
= opcode
;
8076 inst
.reloc
.pc_rel
= 1;
8082 constraint (inst
.cond
!= COND_ALWAYS
,
8083 _("instruction is always unconditional"));
8084 if (inst
.operands
[0].present
)
8086 constraint (inst
.operands
[0].imm
> 255,
8087 _("immediate value out of range"));
8088 inst
.instruction
|= inst
.operands
[0].imm
;
8093 do_t_branch23 (void)
8095 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8096 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8097 inst
.reloc
.pc_rel
= 1;
8099 /* If the destination of the branch is a defined symbol which does not have
8100 the THUMB_FUNC attribute, then we must be calling a function which has
8101 the (interfacearm) attribute. We look for the Thumb entry point to that
8102 function and change the branch to refer to that function instead. */
8103 if ( inst
.reloc
.exp
.X_op
== O_symbol
8104 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8105 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8106 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8107 inst
.reloc
.exp
.X_add_symbol
=
8108 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8114 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8115 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8116 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8117 should cause the alignment to be checked once it is known. This is
8118 because BX PC only works if the instruction is word aligned. */
8124 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8125 if (inst
.operands
[0].reg
== REG_PC
)
8126 as_tsktsk (_("use of r15 in bxj is not really useful"));
8128 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8134 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8135 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8136 inst
.instruction
|= inst
.operands
[1].reg
;
8142 constraint (current_it_mask
, BAD_NOT_IT
);
8143 inst
.instruction
|= inst
.operands
[0].imm
;
8149 constraint (current_it_mask
, BAD_NOT_IT
);
8151 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8152 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8154 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8155 inst
.instruction
= 0xf3af8000;
8156 inst
.instruction
|= imod
<< 9;
8157 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8158 if (inst
.operands
[1].present
)
8159 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8164 && (inst
.operands
[0].imm
& 4),
8165 _("selected processor does not support 'A' form "
8166 "of this instruction"));
8167 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8168 _("Thumb does not support the 2-argument "
8169 "form of this instruction"));
8170 inst
.instruction
|= inst
.operands
[0].imm
;
8174 /* THUMB CPY instruction (argument parse). */
8179 if (inst
.size_req
== 4)
8181 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8182 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8183 inst
.instruction
|= inst
.operands
[1].reg
;
8187 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8188 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8189 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8196 constraint (current_it_mask
, BAD_NOT_IT
);
8197 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8198 inst
.instruction
|= inst
.operands
[0].reg
;
8199 inst
.reloc
.pc_rel
= 1;
8200 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8206 inst
.instruction
|= inst
.operands
[0].imm
;
8212 if (!inst
.operands
[1].present
)
8213 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8214 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8215 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8216 inst
.instruction
|= inst
.operands
[2].reg
;
8222 if (unified_syntax
&& inst
.size_req
== 4)
8223 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8225 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8231 unsigned int cond
= inst
.operands
[0].imm
;
8233 constraint (current_it_mask
, BAD_NOT_IT
);
8234 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
8237 /* If the condition is a negative condition, invert the mask. */
8238 if ((cond
& 0x1) == 0x0)
8240 unsigned int mask
= inst
.instruction
& 0x000f;
8242 if ((mask
& 0x7) == 0)
8243 /* no conversion needed */;
8244 else if ((mask
& 0x3) == 0)
8246 else if ((mask
& 0x1) == 0)
8251 inst
.instruction
&= 0xfff0;
8252 inst
.instruction
|= mask
;
8255 inst
.instruction
|= cond
<< 4;
8261 /* This really doesn't seem worth it. */
8262 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8263 _("expression too complex"));
8264 constraint (inst
.operands
[1].writeback
,
8265 _("Thumb load/store multiple does not support {reglist}^"));
8269 /* See if we can use a 16-bit instruction. */
8270 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
8271 && inst
.size_req
!= 4
8272 && inst
.operands
[0].reg
<= 7
8273 && !(inst
.operands
[1].imm
& ~0xff)
8274 && (inst
.instruction
== T_MNEM_stmia
8275 ? inst
.operands
[0].writeback
8276 : (inst
.operands
[0].writeback
8277 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
8279 if (inst
.instruction
== T_MNEM_stmia
8280 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8281 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8282 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8283 inst
.operands
[0].reg
);
8285 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8286 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8287 inst
.instruction
|= inst
.operands
[1].imm
;
8291 if (inst
.operands
[1].imm
& (1 << 13))
8292 as_warn (_("SP should not be in register list"));
8293 if (inst
.instruction
== T_MNEM_stmia
)
8295 if (inst
.operands
[1].imm
& (1 << 15))
8296 as_warn (_("PC should not be in register list"));
8297 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8298 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8299 inst
.operands
[0].reg
);
8303 if (inst
.operands
[1].imm
& (1 << 14)
8304 && inst
.operands
[1].imm
& (1 << 15))
8305 as_warn (_("LR and PC should not both be in register list"));
8306 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8307 && inst
.operands
[0].writeback
)
8308 as_warn (_("base register should not be in register list "
8309 "when written back"));
8311 if (inst
.instruction
< 0xffff)
8312 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8313 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8314 inst
.instruction
|= inst
.operands
[1].imm
;
8315 if (inst
.operands
[0].writeback
)
8316 inst
.instruction
|= WRITE_BACK
;
8321 constraint (inst
.operands
[0].reg
> 7
8322 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
8323 if (inst
.instruction
== T_MNEM_stmia
)
8325 if (!inst
.operands
[0].writeback
)
8326 as_warn (_("this instruction will write back the base register"));
8327 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8328 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8329 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8330 inst
.operands
[0].reg
);
8334 if (!inst
.operands
[0].writeback
8335 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8336 as_warn (_("this instruction will write back the base register"));
8337 else if (inst
.operands
[0].writeback
8338 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8339 as_warn (_("this instruction will not write back the base register"));
8342 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8343 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8344 inst
.instruction
|= inst
.operands
[1].imm
;
8351 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8352 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8353 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8354 || inst
.operands
[1].negative
,
8357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8358 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8359 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
8365 if (!inst
.operands
[1].present
)
8367 constraint (inst
.operands
[0].reg
== REG_LR
,
8368 _("r14 not allowed as first register "
8369 "when second register is omitted"));
8370 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8372 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
8375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8376 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8377 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8383 unsigned long opcode
;
8386 opcode
= inst
.instruction
;
8389 if (!inst
.operands
[1].isreg
)
8391 if (opcode
<= 0xffff)
8392 inst
.instruction
= THUMB_OP32 (opcode
);
8393 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8396 if (inst
.operands
[1].isreg
8397 && !inst
.operands
[1].writeback
8398 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
8399 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
8401 && inst
.size_req
!= 4)
8403 /* Insn may have a 16-bit form. */
8404 Rn
= inst
.operands
[1].reg
;
8405 if (inst
.operands
[1].immisreg
)
8407 inst
.instruction
= THUMB_OP16 (opcode
);
8409 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
8412 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
8413 && opcode
!= T_MNEM_ldrsb
)
8414 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
8415 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
8422 if (inst
.reloc
.pc_rel
)
8423 opcode
= T_MNEM_ldr_pc2
;
8425 opcode
= T_MNEM_ldr_pc
;
8429 if (opcode
== T_MNEM_ldr
)
8430 opcode
= T_MNEM_ldr_sp
;
8432 opcode
= T_MNEM_str_sp
;
8434 inst
.instruction
= inst
.operands
[0].reg
<< 8;
8438 inst
.instruction
= inst
.operands
[0].reg
;
8439 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8441 inst
.instruction
|= THUMB_OP16 (opcode
);
8442 if (inst
.size_req
== 2)
8443 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8445 inst
.relax
= opcode
;
8449 /* Definitely a 32-bit variant. */
8450 inst
.instruction
= THUMB_OP32 (opcode
);
8451 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8452 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8456 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8458 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
8460 /* Only [Rn,Rm] is acceptable. */
8461 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
8462 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
8463 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
8464 || inst
.operands
[1].negative
,
8465 _("Thumb does not support this addressing mode"));
8466 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8470 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8471 if (!inst
.operands
[1].isreg
)
8472 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8475 constraint (!inst
.operands
[1].preind
8476 || inst
.operands
[1].shifted
8477 || inst
.operands
[1].writeback
,
8478 _("Thumb does not support this addressing mode"));
8479 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
8481 constraint (inst
.instruction
& 0x0600,
8482 _("byte or halfword not valid for base register"));
8483 constraint (inst
.operands
[1].reg
== REG_PC
8484 && !(inst
.instruction
& THUMB_LOAD_BIT
),
8485 _("r15 based store not allowed"));
8486 constraint (inst
.operands
[1].immisreg
,
8487 _("invalid base register for register offset"));
8489 if (inst
.operands
[1].reg
== REG_PC
)
8490 inst
.instruction
= T_OPCODE_LDR_PC
;
8491 else if (inst
.instruction
& THUMB_LOAD_BIT
)
8492 inst
.instruction
= T_OPCODE_LDR_SP
;
8494 inst
.instruction
= T_OPCODE_STR_SP
;
8496 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8497 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8501 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
8502 if (!inst
.operands
[1].immisreg
)
8504 /* Immediate offset. */
8505 inst
.instruction
|= inst
.operands
[0].reg
;
8506 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8507 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8511 /* Register offset. */
8512 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
8513 constraint (inst
.operands
[1].negative
,
8514 _("Thumb does not support this addressing mode"));
8517 switch (inst
.instruction
)
8519 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
8520 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
8521 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
8522 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
8523 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
8524 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
8525 case 0x5600 /* ldrsb */:
8526 case 0x5e00 /* ldrsh */: break;
8530 inst
.instruction
|= inst
.operands
[0].reg
;
8531 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8532 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
8538 if (!inst
.operands
[1].present
)
8540 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8541 constraint (inst
.operands
[0].reg
== REG_LR
,
8542 _("r14 not allowed here"));
8544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8545 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8546 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
8553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8554 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
8560 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8561 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8562 inst
.instruction
|= inst
.operands
[2].reg
;
8563 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8570 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8571 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8572 inst
.instruction
|= inst
.operands
[3].reg
;
8580 int r0off
= (inst
.instruction
== T_MNEM_mov
8581 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
8582 unsigned long opcode
;
8584 bfd_boolean low_regs
;
8586 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
8587 opcode
= inst
.instruction
;
8588 if (current_it_mask
)
8589 narrow
= opcode
!= T_MNEM_movs
;
8591 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
8592 if (inst
.size_req
== 4
8593 || inst
.operands
[1].shifted
)
8596 if (!inst
.operands
[1].isreg
)
8598 /* Immediate operand. */
8599 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
8601 if (low_regs
&& narrow
)
8603 inst
.instruction
= THUMB_OP16 (opcode
);
8604 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8605 if (inst
.size_req
== 2)
8606 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8608 inst
.relax
= opcode
;
8612 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8613 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8614 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8615 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8620 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8621 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8622 encode_thumb32_shifted_operand (1);
8625 switch (inst
.instruction
)
8628 inst
.instruction
= T_OPCODE_MOV_HR
;
8629 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8630 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8631 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8635 /* We know we have low registers at this point.
8636 Generate ADD Rd, Rs, #0. */
8637 inst
.instruction
= T_OPCODE_ADD_I3
;
8638 inst
.instruction
|= inst
.operands
[0].reg
;
8639 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8645 inst
.instruction
= T_OPCODE_CMP_LR
;
8646 inst
.instruction
|= inst
.operands
[0].reg
;
8647 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8651 inst
.instruction
= T_OPCODE_CMP_HR
;
8652 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8653 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8654 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8661 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8662 if (inst
.operands
[1].isreg
)
8664 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
8666 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8667 since a MOV instruction produces unpredictable results. */
8668 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8669 inst
.instruction
= T_OPCODE_ADD_I3
;
8671 inst
.instruction
= T_OPCODE_CMP_LR
;
8673 inst
.instruction
|= inst
.operands
[0].reg
;
8674 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8678 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8679 inst
.instruction
= T_OPCODE_MOV_HR
;
8681 inst
.instruction
= T_OPCODE_CMP_HR
;
8687 constraint (inst
.operands
[0].reg
> 7,
8688 _("only lo regs allowed with immediate"));
8689 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8690 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8697 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8698 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf000) << 4;
8699 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0800) << 15;
8700 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0700) << 4;
8701 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00ff);
8709 int r0off
= (inst
.instruction
== T_MNEM_mvn
8710 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
8713 if (inst
.size_req
== 4
8714 || inst
.instruction
> 0xffff
8715 || inst
.operands
[1].shifted
8716 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8718 else if (inst
.instruction
== T_MNEM_cmn
)
8720 else if (THUMB_SETS_FLAGS (inst
.instruction
))
8721 narrow
= (current_it_mask
== 0);
8723 narrow
= (current_it_mask
!= 0);
8725 if (!inst
.operands
[1].isreg
)
8727 /* For an immediate, we always generate a 32-bit opcode;
8728 section relaxation will shrink it later if possible. */
8729 if (inst
.instruction
< 0xffff)
8730 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8731 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8732 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8733 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8737 /* See if we can do this with a 16-bit instruction. */
8740 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8741 inst
.instruction
|= inst
.operands
[0].reg
;
8742 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8746 constraint (inst
.operands
[1].shifted
8747 && inst
.operands
[1].immisreg
,
8748 _("shift must be constant"));
8749 if (inst
.instruction
< 0xffff)
8750 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8751 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8752 encode_thumb32_shifted_operand (1);
8758 constraint (inst
.instruction
> 0xffff
8759 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
8760 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
8761 _("unshifted register required"));
8762 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8765 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8766 inst
.instruction
|= inst
.operands
[0].reg
;
8767 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8775 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
8778 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8779 _("selected processor does not support "
8780 "requested special purpose register"));
8784 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8785 _("selected processor does not support "
8786 "requested special purpose register %x"));
8787 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8788 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
8789 _("'CPSR' or 'SPSR' expected"));
8792 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8793 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8794 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
8802 constraint (!inst
.operands
[1].isreg
,
8803 _("Thumb encoding does not support an immediate here"));
8804 flags
= inst
.operands
[0].imm
;
8807 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8808 _("selected processor does not support "
8809 "requested special purpose register"));
8813 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8814 _("selected processor does not support "
8815 "requested special purpose register"));
8818 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8819 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
8820 inst
.instruction
|= (flags
& 0xff);
8821 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8827 if (!inst
.operands
[2].present
)
8828 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
8830 /* There is no 32-bit MULS and no 16-bit MUL. */
8831 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
8833 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8834 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8835 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8836 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
8840 constraint (!unified_syntax
8841 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
8842 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8845 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8846 inst
.instruction
|= inst
.operands
[0].reg
;
8848 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8849 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
8850 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
8851 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8853 constraint (1, _("dest must overlap one source register"));
8860 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8861 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8862 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8863 inst
.instruction
|= inst
.operands
[3].reg
;
8865 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8866 as_tsktsk (_("rdhi and rdlo must be different"));
8874 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
8876 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8877 inst
.instruction
|= inst
.operands
[0].imm
;
8881 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8882 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
8887 constraint (inst
.operands
[0].present
,
8888 _("Thumb does not support NOP with hints"));
8889 inst
.instruction
= 0x46c0;
8900 if (THUMB_SETS_FLAGS (inst
.instruction
))
8901 narrow
= (current_it_mask
== 0);
8903 narrow
= (current_it_mask
!= 0);
8904 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8906 if (inst
.size_req
== 4)
8911 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8912 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8913 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8917 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8918 inst
.instruction
|= inst
.operands
[0].reg
;
8919 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8924 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8926 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8928 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8929 inst
.instruction
|= inst
.operands
[0].reg
;
8930 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8937 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8938 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8939 inst
.instruction
|= inst
.operands
[2].reg
;
8940 if (inst
.operands
[3].present
)
8942 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
8943 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8944 _("expression too complex"));
8945 inst
.instruction
|= (val
& 0x1c) << 10;
8946 inst
.instruction
|= (val
& 0x03) << 6;
8953 if (!inst
.operands
[3].present
)
8954 inst
.instruction
&= ~0x00000020;
8961 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8965 do_t_push_pop (void)
8969 constraint (inst
.operands
[0].writeback
,
8970 _("push/pop do not support {reglist}^"));
8971 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8972 _("expression too complex"));
8974 mask
= inst
.operands
[0].imm
;
8975 if ((mask
& ~0xff) == 0)
8976 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8977 else if ((inst
.instruction
== T_MNEM_push
8978 && (mask
& ~0xff) == 1 << REG_LR
)
8979 || (inst
.instruction
== T_MNEM_pop
8980 && (mask
& ~0xff) == 1 << REG_PC
))
8982 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8983 inst
.instruction
|= THUMB_PP_PC_LR
;
8986 else if (unified_syntax
)
8988 if (mask
& (1 << 13))
8989 inst
.error
= _("SP not allowed in register list");
8990 if (inst
.instruction
== T_MNEM_push
)
8992 if (mask
& (1 << 15))
8993 inst
.error
= _("PC not allowed in register list");
8997 if (mask
& (1 << 14)
8998 && mask
& (1 << 15))
8999 inst
.error
= _("LR and PC should not both be in register list");
9001 if ((mask
& (mask
- 1)) == 0)
9003 /* Single register push/pop implemented as str/ldr. */
9004 if (inst
.instruction
== T_MNEM_push
)
9005 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9007 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9008 mask
= ffs(mask
) - 1;
9012 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9016 inst
.error
= _("invalid register list to push/pop instruction");
9020 inst
.instruction
|= mask
;
9026 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9027 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9033 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9034 && inst
.size_req
!= 4)
9036 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9037 inst
.instruction
|= inst
.operands
[0].reg
;
9038 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9040 else if (unified_syntax
)
9042 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9043 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9044 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9045 inst
.instruction
|= inst
.operands
[1].reg
;
9048 inst
.error
= BAD_HIREG
;
9056 Rd
= inst
.operands
[0].reg
;
9057 Rs
= (inst
.operands
[1].present
9058 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9059 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9061 inst
.instruction
|= Rd
<< 8;
9062 inst
.instruction
|= Rs
<< 16;
9063 if (!inst
.operands
[2].isreg
)
9065 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9066 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9069 encode_thumb32_shifted_operand (2);
9075 constraint (current_it_mask
, BAD_NOT_IT
);
9076 if (inst
.operands
[0].imm
)
9077 inst
.instruction
|= 0x8;
9083 if (!inst
.operands
[1].present
)
9084 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9091 switch (inst
.instruction
)
9094 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9096 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9098 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9100 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9104 if (THUMB_SETS_FLAGS (inst
.instruction
))
9105 narrow
= (current_it_mask
== 0);
9107 narrow
= (current_it_mask
!= 0);
9108 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9110 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9112 if (inst
.operands
[2].isreg
9113 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9114 || inst
.operands
[2].reg
> 7))
9116 if (inst
.size_req
== 4)
9121 if (inst
.operands
[2].isreg
)
9123 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9124 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9125 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9126 inst
.instruction
|= inst
.operands
[2].reg
;
9130 inst
.operands
[1].shifted
= 1;
9131 inst
.operands
[1].shift_kind
= shift_kind
;
9132 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9133 ? T_MNEM_movs
: T_MNEM_mov
);
9134 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9135 encode_thumb32_shifted_operand (1);
9136 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9137 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9142 if (inst
.operands
[2].isreg
)
9146 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9147 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9148 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9149 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9153 inst
.instruction
|= inst
.operands
[0].reg
;
9154 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9160 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9161 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9162 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9165 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9166 inst
.instruction
|= inst
.operands
[0].reg
;
9167 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9173 constraint (inst
.operands
[0].reg
> 7
9174 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9175 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9177 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9179 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9180 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9181 _("source1 and dest must be same register"));
9183 switch (inst
.instruction
)
9185 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9186 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9187 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9188 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9192 inst
.instruction
|= inst
.operands
[0].reg
;
9193 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9197 switch (inst
.instruction
)
9199 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9200 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9201 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9202 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
9205 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9206 inst
.instruction
|= inst
.operands
[0].reg
;
9207 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9215 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9216 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9217 inst
.instruction
|= inst
.operands
[2].reg
;
9223 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9224 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9225 _("expression too complex"));
9226 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9227 inst
.instruction
|= (value
& 0xf000) >> 12;
9228 inst
.instruction
|= (value
& 0x0ff0);
9229 inst
.instruction
|= (value
& 0x000f) << 16;
9235 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9236 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9237 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9239 if (inst
.operands
[3].present
)
9241 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9242 _("expression too complex"));
9244 if (inst
.reloc
.exp
.X_add_number
!= 0)
9246 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9247 inst
.instruction
|= 0x00200000; /* sh bit */
9248 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9249 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9251 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9258 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9259 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9260 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9266 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9267 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9268 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9269 || inst
.operands
[2].negative
,
9272 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9273 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9274 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9275 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9281 if (!inst
.operands
[2].present
)
9282 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
9284 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9285 || inst
.operands
[0].reg
== inst
.operands
[2].reg
9286 || inst
.operands
[0].reg
== inst
.operands
[3].reg
9287 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
9290 inst
.instruction
|= inst
.operands
[0].reg
;
9291 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9292 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9293 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9299 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9300 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9301 inst
.instruction
|= inst
.operands
[2].reg
;
9302 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
9308 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
9309 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9310 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
9312 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9313 inst
.instruction
|= inst
.operands
[0].reg
;
9314 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9316 else if (unified_syntax
)
9318 if (inst
.instruction
<= 0xffff)
9319 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9320 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9321 inst
.instruction
|= inst
.operands
[1].reg
;
9322 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
9326 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
9327 _("Thumb encoding does not support rotation"));
9328 constraint (1, BAD_HIREG
);
9335 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9343 half
= (inst
.instruction
& 0x10) != 0;
9344 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9345 constraint (inst
.operands
[0].immisreg
,
9346 _("instruction requires register index"));
9347 constraint (inst
.operands
[0].imm
== 15,
9348 _("PC is not a valid index register"));
9349 constraint (!half
&& inst
.operands
[0].shifted
,
9350 _("instruction does not allow shifted index"));
9351 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
9357 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9358 inst
.instruction
|= inst
.operands
[1].imm
;
9359 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9361 if (inst
.operands
[3].present
)
9363 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9364 _("expression too complex"));
9365 if (inst
.reloc
.exp
.X_add_number
!= 0)
9367 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9368 inst
.instruction
|= 0x00200000; /* sh bit */
9370 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9371 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9373 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9380 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9381 inst
.instruction
|= inst
.operands
[1].imm
;
9382 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9385 /* Neon instruction encoder helpers. */
9387 /* Encodings for the different types for various Neon opcodes. */
9389 /* An "invalid" code for the following tables. */
9392 struct neon_tab_entry
9395 unsigned float_or_poly
;
9396 unsigned scalar_or_imm
;
9399 /* Map overloaded Neon opcodes to their respective encodings. */
9400 #define NEON_ENC_TAB \
9401 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9402 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9403 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9404 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9405 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9406 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9407 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9408 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9409 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9410 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9411 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9412 /* Register variants of the following two instructions are encoded as
9413 vcge / vcgt with the operands reversed. */ \
9414 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9415 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9416 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9417 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9418 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9419 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9420 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9421 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9422 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9423 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9424 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9425 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9426 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9427 X(vshl, 0x0000400, N_INV, 0x0800510), \
9428 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9429 X(vand, 0x0000110, N_INV, 0x0800030), \
9430 X(vbic, 0x0100110, N_INV, 0x0800030), \
9431 X(veor, 0x1000110, N_INV, N_INV), \
9432 X(vorn, 0x0300110, N_INV, 0x0800010), \
9433 X(vorr, 0x0200110, N_INV, 0x0800010), \
9434 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9435 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9436 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9437 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9438 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9439 X(vst1, 0x0000000, 0x0800000, N_INV), \
9440 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9441 X(vst2, 0x0000100, 0x0800100, N_INV), \
9442 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9443 X(vst3, 0x0000200, 0x0800200, N_INV), \
9444 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9445 X(vst4, 0x0000300, 0x0800300, N_INV), \
9446 X(vmovn, 0x1b20200, N_INV, N_INV), \
9447 X(vtrn, 0x1b20080, N_INV, N_INV), \
9448 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9449 X(vqmovun, 0x1b20240, N_INV, N_INV)
9453 #define X(OPC,I,F,S) N_MNEM_##OPC
9458 static const struct neon_tab_entry neon_enc_tab
[] =
9460 #define X(OPC,I,F,S) { (I), (F), (S) }
9465 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9466 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9467 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9468 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9469 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9470 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9471 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9472 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9473 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9475 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9476 shapes which an instruction can accept. The following mnemonic characters
9477 are used in the tag names for this enumeration:
9479 D - Neon D<n> register
9480 Q - Neon Q<n> register
9484 L - D<n> register list
9525 /* Bit masks used in type checking given instructions.
9526 'N_EQK' means the type must be the same as (or based on in some way) the key
9527 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9528 set, various other bits can be set as well in order to modify the meaning of
9529 the type constraint. */
9552 N_KEY
= 0x080000, /* key element (main type specifier). */
9553 N_EQK
= 0x100000, /* given operand has the same type & size as the key. */
9554 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
9555 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
9556 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
9557 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9558 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
9559 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
9560 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9562 N_MAX_NONSPECIAL
= N_F32
9565 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9567 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9568 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9569 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9570 #define N_SUF_32 (N_SU_32 | N_F32)
9571 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9572 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9574 /* Pass this as the first type argument to neon_check_type to ignore types
9576 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9578 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9579 specific shape when there are two alternatives. For non-polymorphic shapes,
9580 checking is done during operand parsing, so is not implemented here. */
9582 static enum neon_shape
9583 neon_check_shape (enum neon_shape req
)
9585 #define RR(X) (inst.operands[(X)].isreg)
9586 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9587 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9588 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9589 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9591 /* Fix missing optional operands. FIXME: we don't know at this point how
9592 many arguments we should have, so this makes the assumption that we have
9593 > 1. This is true of all current Neon opcodes, I think, but may not be
9594 true in the future. */
9595 if (!inst
.operands
[1].present
)
9596 inst
.operands
[1] = inst
.operands
[0];
9602 if (RD(0) && RD(1) && RD(2))
9604 else if (RQ(0) && RQ(1) && RQ(2))
9607 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9614 if (RD(0) && RD(1) && IM(2))
9616 else if (RQ(0) && RQ(1) && IM(2))
9619 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9626 if (RD(0) && RD(1) && RD(2) && IM(3))
9628 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9631 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9632 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9638 if (RD(0) && RD(1) && SC(2))
9640 else if (RQ(0) && RQ(1) && SC(2))
9643 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9652 else if (RQ(0) && RQ(1))
9655 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9663 else if (RQ(0) && SC(1))
9666 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9674 else if (RQ(0) && RR(1))
9677 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9685 else if (RQ(0) && IM(1))
9688 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9705 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
9708 /* Allow modification to be made to types which are constrained to be
9709 based on the key element, based on bits set alongside N_EQK. */
9710 if ((typebits
& N_EQK
) != 0)
9712 if ((typebits
& N_HLF
) != 0)
9714 else if ((typebits
& N_DBL
) != 0)
9716 if ((typebits
& N_SGN
) != 0)
9717 *g_type
= NT_signed
;
9718 else if ((typebits
& N_UNS
) != 0)
9719 *g_type
= NT_unsigned
;
9720 else if ((typebits
& N_INT
) != 0)
9721 *g_type
= NT_integer
;
9722 else if ((typebits
& N_FLT
) != 0)
9724 else if ((typebits
& N_SIZ
) != 0)
9725 *g_type
= NT_untyped
;
9729 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9730 operand type, i.e. the single type specified in a Neon instruction when it
9731 is the only one given. */
9733 static struct neon_type_el
9734 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
9736 struct neon_type_el dest
= *key
;
9738 assert ((thisarg
& N_EQK
) != 0);
9740 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
9745 /* Convert Neon type and size into compact bitmask representation. */
9747 static enum neon_type_mask
9748 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
9756 case 16: return N_16
;
9757 case 32: return N_32
;
9758 case 64: return N_64
;
9766 case 8: return N_I8
;
9767 case 16: return N_I16
;
9768 case 32: return N_I32
;
9769 case 64: return N_I64
;
9782 case 8: return N_P8
;
9783 case 16: return N_P16
;
9791 case 8: return N_S8
;
9792 case 16: return N_S16
;
9793 case 32: return N_S32
;
9794 case 64: return N_S64
;
9802 case 8: return N_U8
;
9803 case 16: return N_U16
;
9804 case 32: return N_U32
;
9805 case 64: return N_U64
;
9816 /* Convert compact Neon bitmask type representation to a type and size. Only
9817 handles the case where a single bit is set in the mask. */
9820 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
9821 enum neon_type_mask mask
)
9823 if ((mask
& N_EQK
) != 0)
9826 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
9828 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
9830 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
9832 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
)) != 0)
9837 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
9839 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
9840 *type
= NT_unsigned
;
9841 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
9843 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
9845 else if ((mask
& (N_P8
| N_P16
)) != 0)
9847 else if ((mask
& N_F32
) != 0)
9855 /* Modify a bitmask of allowed types. This is only needed for type
9859 modify_types_allowed (unsigned allowed
, unsigned mods
)
9862 enum neon_el_type type
;
9868 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
9870 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
9872 neon_modify_type_size (mods
, &type
, &size
);
9873 destmask
|= type_chk_of_el_type (type
, size
);
9880 /* Check type and return type classification.
9881 The manual states (paraphrase): If one datatype is given, it indicates the
9883 - the second operand, if there is one
9884 - the operand, if there is no second operand
9885 - the result, if there are no operands.
9886 This isn't quite good enough though, so we use a concept of a "key" datatype
9887 which is set on a per-instruction basis, which is the one which matters when
9888 only one data type is written.
9889 Note: this function has side-effects (e.g. filling in missing operands). All
9890 Neon instructions should call it before performing bit encoding.
9893 static struct neon_type_el
9894 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
9897 unsigned i
, pass
, key_el
= 0;
9898 unsigned types
[NEON_MAX_TYPE_ELS
];
9899 enum neon_el_type k_type
= NT_invtype
;
9900 unsigned k_size
= -1u;
9901 struct neon_type_el badtype
= {NT_invtype
, -1};
9902 unsigned key_allowed
= 0;
9904 /* Optional registers in Neon instructions are always (not) in operand 1.
9905 Fill in the missing operand here, if it was omitted. */
9906 if (els
> 1 && !inst
.operands
[1].present
)
9907 inst
.operands
[1] = inst
.operands
[0];
9909 /* Suck up all the varargs. */
9911 for (i
= 0; i
< els
; i
++)
9913 unsigned thisarg
= va_arg (ap
, unsigned);
9914 if (thisarg
== N_IGNORE_TYPE
)
9920 if ((thisarg
& N_KEY
) != 0)
9925 if (inst
.vectype
.elems
> 0)
9926 for (i
= 0; i
< els
; i
++)
9927 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
9929 first_error (_("types specified in both the mnemonic and operands"));
9933 /* Duplicate inst.vectype elements here as necessary.
9934 FIXME: No idea if this is exactly the same as the ARM assembler,
9935 particularly when an insn takes one register and one non-register
9937 if (inst
.vectype
.elems
== 1 && els
> 1)
9940 inst
.vectype
.elems
= els
;
9941 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
9942 for (j
= 0; j
< els
; j
++)
9944 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9947 else if (inst
.vectype
.elems
== 0 && els
> 0)
9950 /* No types were given after the mnemonic, so look for types specified
9951 after each operand. We allow some flexibility here; as long as the
9952 "key" operand has a type, we can infer the others. */
9953 for (j
= 0; j
< els
; j
++)
9954 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
9955 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
9957 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
9959 for (j
= 0; j
< els
; j
++)
9960 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
9961 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9966 first_error (_("operand types can't be inferred"));
9970 else if (inst
.vectype
.elems
!= els
)
9972 first_error (_("type specifier has the wrong number of parts"));
9976 for (pass
= 0; pass
< 2; pass
++)
9978 for (i
= 0; i
< els
; i
++)
9980 unsigned thisarg
= types
[i
];
9981 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
9982 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
9983 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
9984 unsigned g_size
= inst
.vectype
.el
[i
].size
;
9986 /* Decay more-specific signed & unsigned types to sign-insensitive
9987 integer types if sign-specific variants are unavailable. */
9988 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
9989 && (types_allowed
& N_SU_ALL
) == 0)
9990 g_type
= NT_integer
;
9992 /* If only untyped args are allowed, decay any more specific types to
9993 them. Some instructions only care about signs for some element
9994 sizes, so handle that properly. */
9995 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
9996 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
9997 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
9998 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
9999 g_type
= NT_untyped
;
10003 if ((thisarg
& N_KEY
) != 0)
10007 key_allowed
= thisarg
& ~N_KEY
;
10012 if ((thisarg
& N_EQK
) == 0)
10014 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10016 if ((given_type
& types_allowed
) == 0)
10018 first_error (_("bad type in Neon instruction"));
10024 enum neon_el_type mod_k_type
= k_type
;
10025 unsigned mod_k_size
= k_size
;
10026 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10027 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10029 first_error (_("inconsistent types in Neon instruction"));
10037 return inst
.vectype
.el
[key_el
];
10040 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10041 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10044 neon_dp_fixup (unsigned i
)
10048 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10062 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10066 neon_logbits (unsigned x
)
10068 return ffs (x
) - 4;
10071 #define LOW4(R) ((R) & 0xf)
10072 #define HI1(R) (((R) >> 4) & 1)
10074 /* Encode insns with bit pattern:
10076 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10077 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10079 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10080 different meaning for some instruction. */
10083 neon_three_same (int isquad
, int ubit
, int size
)
10085 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10086 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10087 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10088 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10089 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
10090 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
10091 inst
.instruction
|= (isquad
!= 0) << 6;
10092 inst
.instruction
|= (ubit
!= 0) << 24;
10094 inst
.instruction
|= neon_logbits (size
) << 20;
10096 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10099 /* Encode instructions of the form:
10101 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10102 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10104 Don't write size if SIZE == -1. */
10107 neon_two_same (int qbit
, int ubit
, int size
)
10109 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10110 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10111 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10112 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10113 inst
.instruction
|= (qbit
!= 0) << 6;
10114 inst
.instruction
|= (ubit
!= 0) << 24;
10117 inst
.instruction
|= neon_logbits (size
) << 18;
10119 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10122 /* Neon instruction encoders, in approximate order of appearance. */
10125 do_neon_dyadic_i_su (void)
10127 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10128 struct neon_type_el et
= neon_check_type (3, rs
,
10129 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
10130 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10134 do_neon_dyadic_i64_su (void)
10136 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10137 struct neon_type_el et
= neon_check_type (3, rs
,
10138 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10139 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10143 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
10146 unsigned size
= et
.size
>> 3;
10147 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10148 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10149 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10150 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10151 inst
.instruction
|= (isquad
!= 0) << 6;
10152 inst
.instruction
|= immbits
<< 16;
10153 inst
.instruction
|= (size
>> 3) << 7;
10154 inst
.instruction
|= (size
& 0x7) << 19;
10156 inst
.instruction
|= (uval
!= 0) << 24;
10158 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10162 do_neon_shl_imm (void)
10164 if (!inst
.operands
[2].isreg
)
10166 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10167 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
10168 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10169 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, inst
.operands
[2].imm
);
10173 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10174 struct neon_type_el et
= neon_check_type (3, rs
,
10175 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10176 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10177 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10182 do_neon_qshl_imm (void)
10184 if (!inst
.operands
[2].isreg
)
10186 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10187 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
10188 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10189 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
10190 inst
.operands
[2].imm
);
10194 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10195 struct neon_type_el et
= neon_check_type (3, rs
,
10196 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10197 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10198 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10203 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
10205 /* Handle .I8 and .I64 as pseudo-instructions. */
10209 /* Unfortunately, this will make everything apart from zero out-of-range.
10210 FIXME is this the intended semantics? There doesn't seem much point in
10211 accepting .I8 if so. */
10212 immediate
|= immediate
<< 8;
10216 /* Similarly, anything other than zero will be replicated in bits [63:32],
10217 which probably isn't want we want if we specified .I64. */
10218 if (immediate
!= 0)
10219 goto bad_immediate
;
10225 if (immediate
== (immediate
& 0x000000ff))
10227 *immbits
= immediate
;
10228 return (size
== 16) ? 0x9 : 0x1;
10230 else if (immediate
== (immediate
& 0x0000ff00))
10232 *immbits
= immediate
>> 8;
10233 return (size
== 16) ? 0xb : 0x3;
10235 else if (immediate
== (immediate
& 0x00ff0000))
10237 *immbits
= immediate
>> 16;
10240 else if (immediate
== (immediate
& 0xff000000))
10242 *immbits
= immediate
>> 24;
10247 first_error (_("immediate value out of range"));
10251 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10255 neon_bits_same_in_bytes (unsigned imm
)
10257 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
10258 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
10259 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
10260 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
10263 /* For immediate of above form, return 0bABCD. */
10266 neon_squash_bits (unsigned imm
)
10268 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
10269 | ((imm
& 0x01000000) >> 21);
10272 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10275 neon_qfloat_bits (unsigned imm
)
10277 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
10280 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10281 the instruction. *OP is passed as the initial value of the op field, and
10282 may be set to a different value depending on the constant (i.e.
10283 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10287 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
10288 int *op
, int size
, enum neon_el_type type
)
10290 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
10292 if (size
!= 32 || *op
== 1)
10294 *immbits
= neon_qfloat_bits (immlo
);
10297 else if (size
== 64 && neon_bits_same_in_bytes (immhi
)
10298 && neon_bits_same_in_bytes (immlo
))
10300 /* Check this one first so we don't have to bother with immhi in later
10304 *immbits
= (neon_squash_bits (immhi
) << 4) | neon_squash_bits (immlo
);
10308 else if (immhi
!= 0)
10310 else if (immlo
== (immlo
& 0x000000ff))
10312 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10314 if ((size
!= 8 && size
!= 16 && size
!= 32)
10315 || (size
== 8 && *op
== 1))
10318 return (size
== 8) ? 0xe : (size
== 16) ? 0x8 : 0x0;
10320 else if (immlo
== (immlo
& 0x0000ff00))
10322 if (size
!= 16 && size
!= 32)
10324 *immbits
= immlo
>> 8;
10325 return (size
== 16) ? 0xa : 0x2;
10327 else if (immlo
== (immlo
& 0x00ff0000))
10331 *immbits
= immlo
>> 16;
10334 else if (immlo
== (immlo
& 0xff000000))
10338 *immbits
= immlo
>> 24;
10341 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
10345 *immbits
= (immlo
>> 8) & 0xff;
10348 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
10352 *immbits
= (immlo
>> 16) & 0xff;
10359 /* Write immediate bits [7:0] to the following locations:
10361 |28/24|23 19|18 16|15 4|3 0|
10362 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10364 This function is used by VMOV/VMVN/VORR/VBIC. */
10367 neon_write_immbits (unsigned immbits
)
10369 inst
.instruction
|= immbits
& 0xf;
10370 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
10371 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
10374 /* Invert low-order SIZE bits of XHI:XLO. */
10377 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
10379 unsigned immlo
= xlo
? *xlo
: 0;
10380 unsigned immhi
= xhi
? *xhi
: 0;
10385 immlo
= (~immlo
) & 0xff;
10389 immlo
= (~immlo
) & 0xffff;
10393 immhi
= (~immhi
) & 0xffffffff;
10394 /* fall through. */
10397 immlo
= (~immlo
) & 0xffffffff;
10412 do_neon_logic (void)
10414 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
10416 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10417 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10418 /* U bit and size field were set as part of the bitmask. */
10419 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10420 neon_three_same (rs
== NS_QQQ
, 0, -1);
10424 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
10425 struct neon_type_el et
= neon_check_type (1, rs
, N_I8
| N_I16
| N_I32
10427 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
10431 if (et
.type
== NT_invtype
)
10434 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10439 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10444 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10449 /* Pseudo-instruction for VBIC. */
10450 immbits
= inst
.operands
[1].imm
;
10451 neon_invert_size (&immbits
, 0, et
.size
);
10452 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10456 /* Pseudo-instruction for VORR. */
10457 immbits
= inst
.operands
[1].imm
;
10458 neon_invert_size (&immbits
, 0, et
.size
);
10459 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10469 inst
.instruction
|= (rs
== NS_QI
) << 6;
10470 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10471 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10472 inst
.instruction
|= cmode
<< 8;
10473 neon_write_immbits (immbits
);
10475 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10480 do_neon_bitfield (void)
10482 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10483 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10484 neon_three_same (rs
== NS_QQQ
, 0, -1);
10488 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
10491 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10492 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
10494 if (et
.type
== NT_float
)
10496 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
10497 neon_three_same (rs
== NS_QQQ
, 0, -1);
10501 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10502 neon_three_same (rs
== NS_QQQ
, et
.type
== ubit_meaning
, et
.size
);
10507 do_neon_dyadic_if_su (void)
10509 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10513 do_neon_dyadic_if_su_d (void)
10515 /* This version only allow D registers, but that constraint is enforced during
10516 operand parsing so we don't need to do anything extra here. */
10517 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10521 do_neon_dyadic_if_i (void)
10523 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10527 do_neon_dyadic_if_i_d (void)
10529 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10533 do_neon_addsub_if_i (void)
10535 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10536 affected if we specify unsigned args. */
10537 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
10540 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10542 V<op> A,B (A is operand 0, B is operand 2)
10547 so handle that case specially. */
10550 neon_exchange_operands (void)
10552 void *scratch
= alloca (sizeof (inst
.operands
[0]));
10553 if (inst
.operands
[1].present
)
10555 /* Swap operands[1] and operands[2]. */
10556 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
10557 inst
.operands
[1] = inst
.operands
[2];
10558 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
10562 inst
.operands
[1] = inst
.operands
[2];
10563 inst
.operands
[2] = inst
.operands
[0];
10568 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
10570 if (inst
.operands
[2].isreg
)
10573 neon_exchange_operands ();
10574 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
10578 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10579 struct neon_type_el et
= neon_check_type (2, rs
,
10580 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
10582 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10583 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10584 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10585 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10586 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10587 inst
.instruction
|= (rs
== NS_QQI
) << 6;
10588 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10589 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10591 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10598 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
10602 do_neon_cmp_inv (void)
10604 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
10610 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
10613 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10614 scalars, which are encoded in 5 bits, M : Rm.
10615 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10616 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10620 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
10622 unsigned regno
= NEON_SCALAR_REG (scalar
);
10623 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
10628 if (regno
> 7 || elno
> 3)
10630 return regno
| (elno
<< 3);
10633 if (regno
> 15 || elno
> 1)
10635 return regno
| (elno
<< 4);
10639 first_error (_("scalar out of range for multiply instruction"));
10645 /* Encode multiply / multiply-accumulate scalar instructions. */
10648 neon_mul_mac (struct neon_type_el et
, int ubit
)
10652 /* Give a more helpful error message if we have an invalid type. */
10653 if (et
.type
== NT_invtype
)
10656 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
10657 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10658 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10659 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10660 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10661 inst
.instruction
|= LOW4 (scalar
);
10662 inst
.instruction
|= HI1 (scalar
) << 5;
10663 inst
.instruction
|= (et
.type
== NT_float
) << 8;
10664 inst
.instruction
|= neon_logbits (et
.size
) << 20;
10665 inst
.instruction
|= (ubit
!= 0) << 24;
10667 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10671 do_neon_mac_maybe_scalar (void)
10673 if (inst
.operands
[2].isscalar
)
10675 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10676 struct neon_type_el et
= neon_check_type (3, rs
,
10677 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
10678 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10679 neon_mul_mac (et
, rs
== NS_QQS
);
10682 do_neon_dyadic_if_i ();
10688 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10689 struct neon_type_el et
= neon_check_type (3, rs
,
10690 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
10691 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10694 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10695 same types as the MAC equivalents. The polynomial type for this instruction
10696 is encoded the same as the integer type. */
10701 if (inst
.operands
[2].isscalar
)
10702 do_neon_mac_maybe_scalar ();
10704 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
10708 do_neon_qdmulh (void)
10710 if (inst
.operands
[2].isscalar
)
10712 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10713 struct neon_type_el et
= neon_check_type (3, rs
,
10714 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10715 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10716 neon_mul_mac (et
, rs
== NS_QQS
);
10720 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10721 struct neon_type_el et
= neon_check_type (3, rs
,
10722 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10723 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10724 /* The U bit (rounding) comes from bit mask. */
10725 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10730 do_neon_fcmp_absolute (void)
10732 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10733 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10734 /* Size field comes from bit mask. */
10735 neon_three_same (rs
== NS_QQQ
, 1, -1);
10739 do_neon_fcmp_absolute_inv (void)
10741 neon_exchange_operands ();
10742 do_neon_fcmp_absolute ();
10746 do_neon_step (void)
10748 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10749 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10750 neon_three_same (rs
== NS_QQQ
, 0, -1);
10754 do_neon_abs_neg (void)
10756 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
10757 struct neon_type_el et
= neon_check_type (3, rs
,
10758 N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
10759 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10760 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10761 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10762 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10763 inst
.instruction
|= (rs
== NS_QQ
) << 6;
10764 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10765 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10767 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10773 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10774 struct neon_type_el et
= neon_check_type (2, rs
,
10775 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10776 int imm
= inst
.operands
[2].imm
;
10777 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10778 _("immediate out of range for insert"));
10779 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10785 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10786 struct neon_type_el et
= neon_check_type (2, rs
,
10787 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10788 int imm
= inst
.operands
[2].imm
;
10789 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10790 _("immediate out of range for insert"));
10791 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, et
.size
- imm
);
10795 do_neon_qshlu_imm (void)
10797 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10798 struct neon_type_el et
= neon_check_type (2, rs
,
10799 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
10800 int imm
= inst
.operands
[2].imm
;
10801 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10802 _("immediate out of range for shift"));
10803 /* Only encodes the 'U present' variant of the instruction.
10804 In this case, signed types have OP (bit 8) set to 0.
10805 Unsigned types have OP set to 1. */
10806 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
10807 /* The rest of the bits are the same as other immediate shifts. */
10808 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10812 do_neon_qmovn (void)
10814 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10815 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10816 /* Saturating move where operands can be signed or unsigned, and the
10817 destination has the same signedness. */
10818 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10819 if (et
.type
== NT_unsigned
)
10820 inst
.instruction
|= 0xc0;
10822 inst
.instruction
|= 0x80;
10823 neon_two_same (0, 1, et
.size
/ 2);
10827 do_neon_qmovun (void)
10829 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10830 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10831 /* Saturating move with unsigned results. Operands must be signed. */
10832 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10833 neon_two_same (0, 1, et
.size
/ 2);
10837 do_neon_rshift_sat_narrow (void)
10839 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10840 or unsigned. If operands are unsigned, results must also be unsigned. */
10841 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10842 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10843 int imm
= inst
.operands
[2].imm
;
10844 /* This gets the bounds check, size encoding and immediate bits calculation
10848 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10849 VQMOVN.I<size> <Dd>, <Qm>. */
10852 inst
.operands
[2].present
= 0;
10853 inst
.instruction
= N_MNEM_vqmovn
;
10858 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10859 _("immediate out of range"));
10860 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
10864 do_neon_rshift_sat_narrow_u (void)
10866 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10867 or unsigned. If operands are unsigned, results must also be unsigned. */
10868 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10869 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10870 int imm
= inst
.operands
[2].imm
;
10871 /* This gets the bounds check, size encoding and immediate bits calculation
10875 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10876 VQMOVUN.I<size> <Dd>, <Qm>. */
10879 inst
.operands
[2].present
= 0;
10880 inst
.instruction
= N_MNEM_vqmovun
;
10885 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10886 _("immediate out of range"));
10887 /* FIXME: The manual is kind of unclear about what value U should have in
10888 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10890 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
10894 do_neon_movn (void)
10896 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10897 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10898 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10899 neon_two_same (0, 1, et
.size
/ 2);
10903 do_neon_rshift_narrow (void)
10905 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10906 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10907 int imm
= inst
.operands
[2].imm
;
10908 /* This gets the bounds check, size encoding and immediate bits calculation
10912 /* If immediate is zero then we are a pseudo-instruction for
10913 VMOVN.I<size> <Dd>, <Qm> */
10916 inst
.operands
[2].present
= 0;
10917 inst
.instruction
= N_MNEM_vmovn
;
10922 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10923 _("immediate out of range for narrowing operation"));
10924 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
10928 do_neon_shll (void)
10930 /* FIXME: Type checking when lengthening. */
10931 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
10932 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
10933 unsigned imm
= inst
.operands
[2].imm
;
10935 if (imm
== et
.size
)
10937 /* Maximum shift variant. */
10938 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10939 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10940 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10941 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10942 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10943 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10945 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10949 /* A more-specific type check for non-max versions. */
10950 et
= neon_check_type (2, NS_QDI
,
10951 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
10952 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10953 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
10957 /* Check the various types for the VCVT instruction, and return the one that
10958 the current instruction is. */
10961 neon_cvt_flavour (enum neon_shape rs
)
10963 #define CVT_VAR(C,X,Y) \
10964 et = neon_check_type (2, rs, (X), (Y)); \
10965 if (et.type != NT_invtype) \
10967 inst.error = NULL; \
10970 struct neon_type_el et
;
10972 CVT_VAR (0, N_S32
, N_F32
);
10973 CVT_VAR (1, N_U32
, N_F32
);
10974 CVT_VAR (2, N_F32
, N_S32
);
10975 CVT_VAR (3, N_F32
, N_U32
);
10984 /* Fixed-point conversion with #0 immediate is encoded as an integer
10986 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0)
10988 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10989 int flavour
= neon_cvt_flavour (rs
);
10990 unsigned immbits
= 32 - inst
.operands
[2].imm
;
10991 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10992 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10994 inst
.instruction
|= enctab
[flavour
];
10995 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10996 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10997 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10998 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10999 inst
.instruction
|= (rs
== NS_QQI
) << 6;
11000 inst
.instruction
|= 1 << 21;
11001 inst
.instruction
|= immbits
<< 16;
11005 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11006 int flavour
= neon_cvt_flavour (rs
);
11007 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
11008 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11010 inst
.instruction
|= enctab
[flavour
];
11011 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11012 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11013 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11014 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11015 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11016 inst
.instruction
|= 2 << 18;
11018 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11022 neon_move_immediate (void)
11024 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
11025 struct neon_type_el et
= neon_check_type (1, rs
,
11026 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
);
11027 unsigned immlo
, immhi
= 0, immbits
;
11030 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11031 op
= (inst
.instruction
& (1 << 5)) != 0;
11033 immlo
= inst
.operands
[1].imm
;
11034 if (inst
.operands
[1].regisimm
)
11035 immhi
= inst
.operands
[1].reg
;
11037 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
11038 _("immediate has bits set outside the operand size"));
11040 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11041 et
.size
, et
.type
)) == FAIL
)
11043 /* Invert relevant bits only. */
11044 neon_invert_size (&immlo
, &immhi
, et
.size
);
11045 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11046 with one or the other; those cases are caught by
11047 neon_cmode_for_move_imm. */
11049 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11050 et
.size
, et
.type
)) == FAIL
)
11052 first_error (_("immediate out of range"));
11057 inst
.instruction
&= ~(1 << 5);
11058 inst
.instruction
|= op
<< 5;
11060 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11061 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11062 inst
.instruction
|= (rs
== NS_QI
) << 6;
11063 inst
.instruction
|= cmode
<< 8;
11065 neon_write_immbits (immbits
);
11071 if (inst
.operands
[1].isreg
)
11073 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11075 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11076 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11077 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11078 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11079 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11080 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11084 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11085 neon_move_immediate ();
11088 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11091 /* Encode instructions of form:
11093 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11094 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11099 neon_mixed_length (struct neon_type_el et
, unsigned size
)
11101 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11102 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11103 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11104 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11105 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11106 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11107 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
11108 inst
.instruction
|= neon_logbits (size
) << 20;
11110 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11114 do_neon_dyadic_long (void)
11116 /* FIXME: Type checking for lengthening op. */
11117 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11118 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11119 neon_mixed_length (et
, et
.size
);
11123 do_neon_abal (void)
11125 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11126 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11127 neon_mixed_length (et
, et
.size
);
11131 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
11133 if (inst
.operands
[2].isscalar
)
11135 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
11136 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
11137 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11138 neon_mul_mac (et
, et
.type
== NT_unsigned
);
11142 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11143 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
11144 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11145 neon_mixed_length (et
, et
.size
);
11150 do_neon_mac_maybe_scalar_long (void)
11152 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
11156 do_neon_dyadic_wide (void)
11158 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
11159 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11160 neon_mixed_length (et
, et
.size
);
11164 do_neon_dyadic_narrow (void)
11166 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11167 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
11168 neon_mixed_length (et
, et
.size
/ 2);
11172 do_neon_mul_sat_scalar_long (void)
11174 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
11178 do_neon_vmull (void)
11180 if (inst
.operands
[2].isscalar
)
11181 do_neon_mac_maybe_scalar_long ();
11184 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11185 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
11186 if (et
.type
== NT_poly
)
11187 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
11189 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11190 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11191 zero. Should be OK as-is. */
11192 neon_mixed_length (et
, et
.size
);
11199 enum neon_shape rs
= neon_check_shape (NS_DDDI_QQQI
);
11200 struct neon_type_el et
= neon_check_type (3, rs
,
11201 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11202 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
11203 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11204 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11205 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11206 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11207 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11208 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11209 inst
.instruction
|= (rs
== NS_QQQI
) << 6;
11210 inst
.instruction
|= imm
<< 8;
11212 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11218 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11219 struct neon_type_el et
= neon_check_type (2, rs
,
11220 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11221 unsigned op
= (inst
.instruction
>> 7) & 3;
11222 /* N (width of reversed regions) is encoded as part of the bitmask. We
11223 extract it here to check the elements to be reversed are smaller.
11224 Otherwise we'd get a reserved instruction. */
11225 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
11226 assert (elsize
!= 0);
11227 constraint (et
.size
>= elsize
,
11228 _("elements must be smaller than reversal region"));
11229 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11235 if (inst
.operands
[1].isscalar
)
11237 enum neon_shape rs
= neon_check_shape (NS_DS_QS
);
11238 struct neon_type_el et
= neon_check_type (2, rs
,
11239 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11240 unsigned sizebits
= et
.size
>> 3;
11241 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11242 int logsize
= neon_logbits (et
.size
);
11243 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
11244 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11245 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11246 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11247 inst
.instruction
|= LOW4 (dm
);
11248 inst
.instruction
|= HI1 (dm
) << 5;
11249 inst
.instruction
|= (rs
== NS_QS
) << 6;
11250 inst
.instruction
|= x
<< 17;
11251 inst
.instruction
|= sizebits
<< 16;
11253 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11257 enum neon_shape rs
= neon_check_shape (NS_DR_QR
);
11258 struct neon_type_el et
= neon_check_type (1, rs
,
11259 N_8
| N_16
| N_32
| N_KEY
);
11260 unsigned save_cond
= inst
.instruction
& 0xf0000000;
11261 /* Duplicate ARM register to lanes of vector. */
11262 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
11265 case 8: inst
.instruction
|= 0x400000; break;
11266 case 16: inst
.instruction
|= 0x000020; break;
11267 case 32: inst
.instruction
|= 0x000000; break;
11270 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11271 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
11272 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
11273 inst
.instruction
|= (rs
== NS_QR
) << 21;
11274 /* The encoding for this instruction is identical for the ARM and Thumb
11275 variants, except for the condition field. */
11277 inst
.instruction
|= 0xe0000000;
11279 inst
.instruction
|= save_cond
;
11283 /* VMOV has particularly many variations. It can be one of:
11284 0. VMOV<c><q> <Qd>, <Qm>
11285 1. VMOV<c><q> <Dd>, <Dm>
11286 (Register operations, which are VORR with Rm = Rn.)
11287 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11288 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11290 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11291 (ARM register to scalar.)
11292 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11293 (Two ARM registers to vector.)
11294 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11295 (Scalar to ARM register.)
11296 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11297 (Vector to two ARM registers.)
11299 We should have just enough information to be able to disambiguate most of
11300 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11301 registers" cases. For these, abuse the .regisimm operand field to signify a
11304 All the encoded bits are hardcoded by this function.
11306 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11307 can specify a type where it doesn't make sense to, and is ignored).
11313 int nargs
= inst
.operands
[0].present
+ inst
.operands
[1].present
11314 + inst
.operands
[2].present
;
11315 unsigned save_cond
= thumb_mode
? 0xe0000000 : inst
.instruction
& 0xf0000000;
11320 /* Cases 0, 1, 2, 3, 4, 6. */
11321 if (inst
.operands
[1].isscalar
)
11324 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11325 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
11326 unsigned logsize
= neon_logbits (et
.size
);
11327 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11328 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
11329 unsigned abcdebits
= 0;
11331 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11332 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11336 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
11337 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
11338 case 32: abcdebits
= 0x00; break;
11342 abcdebits
|= x
<< logsize
;
11343 inst
.instruction
= save_cond
;
11344 inst
.instruction
|= 0xe100b10;
11345 inst
.instruction
|= LOW4 (dn
) << 16;
11346 inst
.instruction
|= HI1 (dn
) << 7;
11347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11348 inst
.instruction
|= (abcdebits
& 3) << 5;
11349 inst
.instruction
|= (abcdebits
>> 2) << 21;
11351 else if (inst
.operands
[1].isreg
)
11353 /* Cases 0, 1, 4. */
11354 if (inst
.operands
[0].isscalar
)
11357 unsigned bcdebits
= 0;
11358 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11359 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
11360 int logsize
= neon_logbits (et
.size
);
11361 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
11362 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
11364 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11365 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11369 case 8: bcdebits
= 0x8; break;
11370 case 16: bcdebits
= 0x1; break;
11371 case 32: bcdebits
= 0x0; break;
11375 bcdebits
|= x
<< logsize
;
11376 inst
.instruction
= save_cond
;
11377 inst
.instruction
|= 0xe000b10;
11378 inst
.instruction
|= LOW4 (dn
) << 16;
11379 inst
.instruction
|= HI1 (dn
) << 7;
11380 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11381 inst
.instruction
|= (bcdebits
& 3) << 5;
11382 inst
.instruction
|= (bcdebits
>> 2) << 21;
11387 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11388 /* The architecture manual I have doesn't explicitly state which
11389 value the U bit should have for register->register moves, but
11390 the equivalent VORR instruction has U = 0, so do that. */
11391 inst
.instruction
= 0x0200110;
11392 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11393 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11394 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11395 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11396 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11397 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11398 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11400 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11406 inst
.instruction
= 0x0800010;
11407 neon_move_immediate ();
11408 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11414 if (inst
.operands
[0].regisimm
)
11417 inst
.instruction
= save_cond
;
11418 inst
.instruction
|= 0xc400b10;
11419 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
11420 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
11421 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11422 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11427 inst
.instruction
= save_cond
;
11428 inst
.instruction
|= 0xc500b10;
11429 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11430 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11431 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11432 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11442 do_neon_rshift_round_imm (void)
11444 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
11445 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11446 int imm
= inst
.operands
[2].imm
;
11448 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11451 inst
.operands
[2].present
= 0;
11456 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11457 _("immediate out of range for shift"));
11458 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
11463 do_neon_movl (void)
11465 struct neon_type_el et
= neon_check_type (2, NS_QD
,
11466 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11467 unsigned sizebits
= et
.size
>> 3;
11468 inst
.instruction
|= sizebits
<< 19;
11469 neon_two_same (0, et
.type
== NT_unsigned
, -1);
11475 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11476 struct neon_type_el et
= neon_check_type (2, rs
,
11477 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11478 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11479 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11483 do_neon_zip_uzp (void)
11485 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11486 struct neon_type_el et
= neon_check_type (2, rs
,
11487 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11488 if (rs
== NS_DD
&& et
.size
== 32)
11490 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11491 inst
.instruction
= N_MNEM_vtrn
;
11495 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11499 do_neon_sat_abs_neg (void)
11501 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11502 struct neon_type_el et
= neon_check_type (2, rs
,
11503 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11504 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11508 do_neon_pair_long (void)
11510 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11511 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
11512 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11513 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
11514 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11518 do_neon_recip_est (void)
11520 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11521 struct neon_type_el et
= neon_check_type (2, rs
,
11522 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
11523 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11524 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11530 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11531 struct neon_type_el et
= neon_check_type (2, rs
,
11532 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11533 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11539 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11540 struct neon_type_el et
= neon_check_type (2, rs
,
11541 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
11542 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11548 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11549 struct neon_type_el et
= neon_check_type (2, rs
,
11550 N_EQK
| N_INT
, N_8
| N_KEY
);
11551 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11557 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11558 neon_two_same (rs
== NS_QQ
, 1, -1);
11562 do_neon_tbl_tbx (void)
11564 unsigned listlenbits
;
11565 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
11567 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
11569 first_error (_("bad list length for table lookup"));
11573 listlenbits
= inst
.operands
[1].imm
- 1;
11574 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11575 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11576 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11577 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11578 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11579 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11580 inst
.instruction
|= listlenbits
<< 8;
11582 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11586 do_neon_ldm_stm (void)
11588 /* P, U and L bits are part of bitmask. */
11589 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
11590 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
11592 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
11593 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11595 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
11596 _("register list must contain at least 1 and at most 16 "
11599 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11600 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
11601 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11602 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
11604 inst
.instruction
|= offsetbits
;
11607 inst
.instruction
|= 0xe0000000;
11611 do_neon_ldr_str (void)
11613 unsigned offsetbits
;
11615 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
11617 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11618 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11620 constraint (inst
.reloc
.pc_rel
&& !is_ldr
,
11621 _("PC-relative addressing unavailable with VSTR"));
11623 constraint (!inst
.reloc
.pc_rel
&& inst
.reloc
.exp
.X_op
!= O_constant
,
11624 _("Immediate value must be a constant"));
11626 if (inst
.reloc
.exp
.X_add_number
< 0)
11629 offsetbits
= -inst
.reloc
.exp
.X_add_number
/ 4;
11632 offsetbits
= inst
.reloc
.exp
.X_add_number
/ 4;
11634 /* FIXME: Does this catch everything? */
11635 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11636 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11637 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
,
11639 constraint ((inst
.operands
[1].imm
& 3) != 0,
11640 _("Offset must be a multiple of 4"));
11641 constraint (offsetbits
!= (offsetbits
& 0xff),
11642 _("Immediate offset out of range"));
11644 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11645 inst
.instruction
|= offsetbits
& 0xff;
11646 inst
.instruction
|= offset_up
<< 23;
11649 inst
.instruction
|= 0xe0000000;
11651 if (inst
.reloc
.pc_rel
)
11654 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
11656 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
11659 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11662 /* "interleave" version also handles non-interleaving register VLD1/VST1
11666 do_neon_ld_st_interleave (void)
11668 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
,
11669 N_8
| N_16
| N_32
| N_64
);
11670 unsigned alignbits
= 0;
11672 /* The bits in this table go:
11673 0: register stride of one (0) or two (1)
11674 1,2: register list length, minus one (1, 2, 3, 4).
11675 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11676 We use -1 for invalid entries. */
11677 const int typetable
[] =
11679 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11680 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11681 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11682 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11686 if (et
.type
== NT_invtype
)
11689 if (inst
.operands
[1].immisalign
)
11690 switch (inst
.operands
[1].imm
>> 8)
11692 case 64: alignbits
= 1; break;
11694 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11695 goto bad_alignment
;
11699 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11700 goto bad_alignment
;
11705 first_error (_("bad alignment"));
11709 inst
.instruction
|= alignbits
<< 4;
11710 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11712 /* Bits [4:6] of the immediate in a list specifier encode register stride
11713 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11714 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11715 up the right value for "type" in a table based on this value and the given
11716 list style, then stick it back. */
11717 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
11718 | (((inst
.instruction
>> 8) & 3) << 3);
11720 typebits
= typetable
[idx
];
11722 constraint (typebits
== -1, _("bad list type for instruction"));
11724 inst
.instruction
&= ~0xf00;
11725 inst
.instruction
|= typebits
<< 8;
11728 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11729 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11730 otherwise. The variable arguments are a list of pairs of legal (size, align)
11731 values, terminated with -1. */
11734 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
11737 int result
= FAIL
, thissize
, thisalign
;
11739 if (!inst
.operands
[1].immisalign
)
11745 va_start (ap
, do_align
);
11749 thissize
= va_arg (ap
, int);
11750 if (thissize
== -1)
11752 thisalign
= va_arg (ap
, int);
11754 if (size
== thissize
&& align
== thisalign
)
11757 while (result
!= SUCCESS
);
11761 if (result
== SUCCESS
)
11764 first_error (_("unsupported alignment for instruction"));
11770 do_neon_ld_st_lane (void)
11772 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11773 int align_good
, do_align
= 0;
11774 int logsize
= neon_logbits (et
.size
);
11775 int align
= inst
.operands
[1].imm
>> 8;
11776 int n
= (inst
.instruction
>> 8) & 3;
11777 int max_el
= 64 / et
.size
;
11779 if (et
.type
== NT_invtype
)
11782 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
11783 _("bad list length"));
11784 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
11785 _("scalar index out of range"));
11786 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
11788 _("stride of 2 unavailable when element size is 8"));
11792 case 0: /* VLD1 / VST1. */
11793 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
11795 if (align_good
== FAIL
)
11799 unsigned alignbits
= 0;
11802 case 16: alignbits
= 0x1; break;
11803 case 32: alignbits
= 0x3; break;
11806 inst
.instruction
|= alignbits
<< 4;
11810 case 1: /* VLD2 / VST2. */
11811 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
11813 if (align_good
== FAIL
)
11816 inst
.instruction
|= 1 << 4;
11819 case 2: /* VLD3 / VST3. */
11820 constraint (inst
.operands
[1].immisalign
,
11821 _("can't use alignment with this instruction"));
11824 case 3: /* VLD4 / VST4. */
11825 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11826 16, 64, 32, 64, 32, 128, -1);
11827 if (align_good
== FAIL
)
11831 unsigned alignbits
= 0;
11834 case 8: alignbits
= 0x1; break;
11835 case 16: alignbits
= 0x1; break;
11836 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
11839 inst
.instruction
|= alignbits
<< 4;
11846 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11847 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11848 inst
.instruction
|= 1 << (4 + logsize
);
11850 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
11851 inst
.instruction
|= logsize
<< 10;
11854 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11857 do_neon_ld_dup (void)
11859 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11860 int align_good
, do_align
= 0;
11862 if (et
.type
== NT_invtype
)
11865 switch ((inst
.instruction
>> 8) & 3)
11867 case 0: /* VLD1. */
11868 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
11869 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11870 &do_align
, 16, 16, 32, 32, -1);
11871 if (align_good
== FAIL
)
11873 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
11876 case 2: inst
.instruction
|= 1 << 5; break;
11877 default: first_error (_("bad list length")); return;
11879 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11882 case 1: /* VLD2. */
11883 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11884 &do_align
, 8, 16, 16, 32, 32, 64, -1);
11885 if (align_good
== FAIL
)
11887 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
11888 _("bad list length"));
11889 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11890 inst
.instruction
|= 1 << 5;
11891 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11894 case 2: /* VLD3. */
11895 constraint (inst
.operands
[1].immisalign
,
11896 _("can't use alignment with this instruction"));
11897 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
11898 _("bad list length"));
11899 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11900 inst
.instruction
|= 1 << 5;
11901 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11904 case 3: /* VLD4. */
11906 int align
= inst
.operands
[1].imm
>> 8;
11907 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11908 16, 64, 32, 64, 32, 128, -1);
11909 if (align_good
== FAIL
)
11911 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
11912 _("bad list length"));
11913 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11914 inst
.instruction
|= 1 << 5;
11915 if (et
.size
== 32 && align
== 128)
11916 inst
.instruction
|= 0x3 << 6;
11918 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11925 inst
.instruction
|= do_align
<< 4;
11928 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11929 apart from bits [11:4]. */
11932 do_neon_ldx_stx (void)
11934 switch (NEON_LANE (inst
.operands
[0].imm
))
11936 case NEON_INTERLEAVE_LANES
:
11937 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
11938 do_neon_ld_st_interleave ();
11941 case NEON_ALL_LANES
:
11942 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
11947 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
11948 do_neon_ld_st_lane ();
11951 /* L bit comes from bit mask. */
11952 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11953 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11954 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11956 if (inst
.operands
[1].postind
)
11958 int postreg
= inst
.operands
[1].imm
& 0xf;
11959 constraint (!inst
.operands
[1].immisreg
,
11960 _("post-index must be a register"));
11961 constraint (postreg
== 0xd || postreg
== 0xf,
11962 _("bad register for post-index"));
11963 inst
.instruction
|= postreg
;
11965 else if (inst
.operands
[1].writeback
)
11967 inst
.instruction
|= 0xd;
11970 inst
.instruction
|= 0xf;
11973 inst
.instruction
|= 0xf9000000;
11975 inst
.instruction
|= 0xf4000000;
11979 /* Overall per-instruction processing. */
11981 /* We need to be able to fix up arbitrary expressions in some statements.
11982 This is so that we can handle symbols that are an arbitrary distance from
11983 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11984 which returns part of an address in a form which will be valid for
11985 a data instruction. We do this by pushing the expression into a symbol
11986 in the expr_section, and creating a fix for that. */
11989 fix_new_arm (fragS
* frag
,
12004 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
12008 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
12013 /* Mark whether the fix is to a THUMB instruction, or an ARM
12015 new_fix
->tc_fix_data
= thumb_mode
;
12018 /* Create a frg for an instruction requiring relaxation. */
12020 output_relax_insn (void)
12027 /* The size of the instruction is unknown, so tie the debug info to the
12028 start of the instruction. */
12029 dwarf2_emit_insn (0);
12032 switch (inst
.reloc
.exp
.X_op
)
12035 sym
= inst
.reloc
.exp
.X_add_symbol
;
12036 offset
= inst
.reloc
.exp
.X_add_number
;
12040 offset
= inst
.reloc
.exp
.X_add_number
;
12043 sym
= make_expr_symbol (&inst
.reloc
.exp
);
12047 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
12048 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
12049 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
12052 /* Write a 32-bit thumb instruction to buf. */
12054 put_thumb32_insn (char * buf
, unsigned long insn
)
12056 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
12057 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
12061 output_inst (const char * str
)
12067 as_bad ("%s -- `%s'", inst
.error
, str
);
12071 output_relax_insn();
12074 if (inst
.size
== 0)
12077 to
= frag_more (inst
.size
);
12079 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
12081 assert (inst
.size
== (2 * THUMB_SIZE
));
12082 put_thumb32_insn (to
, inst
.instruction
);
12084 else if (inst
.size
> INSN_SIZE
)
12086 assert (inst
.size
== (2 * INSN_SIZE
));
12087 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
12088 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
12091 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
12093 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
12094 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
12095 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
12099 dwarf2_emit_insn (inst
.size
);
12103 /* Tag values used in struct asm_opcode's tag field. */
12106 OT_unconditional
, /* Instruction cannot be conditionalized.
12107 The ARM condition field is still 0xE. */
12108 OT_unconditionalF
, /* Instruction cannot be conditionalized
12109 and carries 0xF in its ARM condition field. */
12110 OT_csuffix
, /* Instruction takes a conditional suffix. */
12111 OT_cinfix3
, /* Instruction takes a conditional infix,
12112 beginning at character index 3. (In
12113 unified mode, it becomes a suffix.) */
12114 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
12115 character index 3, even in unified mode. Used for
12116 legacy instructions where suffix and infix forms
12117 may be ambiguous. */
12118 OT_csuf_or_in3
, /* Instruction takes either a conditional
12119 suffix or an infix at character index 3. */
12120 OT_odd_infix_unc
, /* This is the unconditional variant of an
12121 instruction that takes a conditional infix
12122 at an unusual position. In unified mode,
12123 this variant will accept a suffix. */
12124 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
12125 are the conditional variants of instructions that
12126 take conditional infixes in unusual positions.
12127 The infix appears at character index
12128 (tag - OT_odd_infix_0). These are not accepted
12129 in unified mode. */
12132 /* Subroutine of md_assemble, responsible for looking up the primary
12133 opcode from the mnemonic the user wrote. STR points to the
12134 beginning of the mnemonic.
12136 This is not simply a hash table lookup, because of conditional
12137 variants. Most instructions have conditional variants, which are
12138 expressed with a _conditional affix_ to the mnemonic. If we were
12139 to encode each conditional variant as a literal string in the opcode
12140 table, it would have approximately 20,000 entries.
12142 Most mnemonics take this affix as a suffix, and in unified syntax,
12143 'most' is upgraded to 'all'. However, in the divided syntax, some
12144 instructions take the affix as an infix, notably the s-variants of
12145 the arithmetic instructions. Of those instructions, all but six
12146 have the infix appear after the third character of the mnemonic.
12148 Accordingly, the algorithm for looking up primary opcodes given
12151 1. Look up the identifier in the opcode table.
12152 If we find a match, go to step U.
12154 2. Look up the last two characters of the identifier in the
12155 conditions table. If we find a match, look up the first N-2
12156 characters of the identifier in the opcode table. If we
12157 find a match, go to step CE.
12159 3. Look up the fourth and fifth characters of the identifier in
12160 the conditions table. If we find a match, extract those
12161 characters from the identifier, and look up the remaining
12162 characters in the opcode table. If we find a match, go
12167 U. Examine the tag field of the opcode structure, in case this is
12168 one of the six instructions with its conditional infix in an
12169 unusual place. If it is, the tag tells us where to find the
12170 infix; look it up in the conditions table and set inst.cond
12171 accordingly. Otherwise, this is an unconditional instruction.
12172 Again set inst.cond accordingly. Return the opcode structure.
12174 CE. Examine the tag field to make sure this is an instruction that
12175 should receive a conditional suffix. If it is not, fail.
12176 Otherwise, set inst.cond from the suffix we already looked up,
12177 and return the opcode structure.
12179 CM. Examine the tag field to make sure this is an instruction that
12180 should receive a conditional infix after the third character.
12181 If it is not, fail. Otherwise, undo the edits to the current
12182 line of input and proceed as for case CE. */
12184 static const struct asm_opcode
*
12185 opcode_lookup (char **str
)
12189 const struct asm_opcode
*opcode
;
12190 const struct asm_cond
*cond
;
12193 /* Scan up to the end of the mnemonic, which must end in white space,
12194 '.' (in unified mode only), or end of string. */
12195 for (base
= end
= *str
; *end
!= '\0'; end
++)
12196 if (*end
== ' ' || (unified_syntax
&& *end
== '.'))
12202 /* Handle a possible width suffix and/or Neon type suffix. */
12209 else if (end
[1] == 'n')
12214 inst
.vectype
.elems
= 0;
12216 *str
= end
+ offset
;
12218 if (end
[offset
] == '.')
12220 /* See if we have a Neon type suffix. */
12221 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
12224 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
12230 /* Look for unaffixed or special-case affixed mnemonic. */
12231 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
12235 if (opcode
->tag
< OT_odd_infix_0
)
12237 inst
.cond
= COND_ALWAYS
;
12241 if (unified_syntax
)
12242 as_warn (_("conditional infixes are deprecated in unified syntax"));
12243 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
12244 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12247 inst
.cond
= cond
->value
;
12251 /* Cannot have a conditional suffix on a mnemonic of less than two
12253 if (end
- base
< 3)
12256 /* Look for suffixed mnemonic. */
12258 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12259 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
12260 if (opcode
&& cond
)
12263 switch (opcode
->tag
)
12265 case OT_cinfix3_legacy
:
12266 /* Ignore conditional suffixes matched on infix only mnemonics. */
12270 case OT_odd_infix_unc
:
12271 if (!unified_syntax
)
12273 /* else fall through */
12276 case OT_csuf_or_in3
:
12277 inst
.cond
= cond
->value
;
12280 case OT_unconditional
:
12281 case OT_unconditionalF
:
12284 inst
.cond
= cond
->value
;
12288 /* delayed diagnostic */
12289 inst
.error
= BAD_COND
;
12290 inst
.cond
= COND_ALWAYS
;
12299 /* Cannot have a usual-position infix on a mnemonic of less than
12300 six characters (five would be a suffix). */
12301 if (end
- base
< 6)
12304 /* Look for infixed mnemonic in the usual position. */
12306 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12310 memcpy (save
, affix
, 2);
12311 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
12312 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
12313 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
12314 memcpy (affix
, save
, 2);
12316 if (opcode
&& (opcode
->tag
== OT_cinfix3
|| opcode
->tag
== OT_csuf_or_in3
12317 || opcode
->tag
== OT_cinfix3_legacy
))
12320 if (unified_syntax
&& opcode
->tag
== OT_cinfix3
)
12321 as_warn (_("conditional infixes are deprecated in unified syntax"));
12323 inst
.cond
= cond
->value
;
12331 md_assemble (char *str
)
12334 const struct asm_opcode
* opcode
;
12336 /* Align the previous label if needed. */
12337 if (last_label_seen
!= NULL
)
12339 symbol_set_frag (last_label_seen
, frag_now
);
12340 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
12341 S_SET_SEGMENT (last_label_seen
, now_seg
);
12344 memset (&inst
, '\0', sizeof (inst
));
12345 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12347 opcode
= opcode_lookup (&p
);
12350 /* It wasn't an instruction, but it might be a register alias of
12351 the form alias .req reg, or a Neon .dn/.qn directive. */
12352 if (!create_register_alias (str
, p
)
12353 && !create_neon_reg_alias (str
, p
))
12354 as_bad (_("bad instruction `%s'"), str
);
12361 arm_feature_set variant
;
12363 variant
= cpu_variant
;
12364 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12365 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
12366 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
12367 /* Check that this instruction is supported for this CPU. */
12368 if (!opcode
->tvariant
12369 || (thumb_mode
== 1
12370 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
12372 as_bad (_("selected processor does not support `%s'"), str
);
12375 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
12376 && opcode
->tencode
!= do_t_branch
)
12378 as_bad (_("Thumb does not support conditional execution"));
12382 /* Check conditional suffixes. */
12383 if (current_it_mask
)
12386 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
12387 current_it_mask
<<= 1;
12388 current_it_mask
&= 0x1f;
12389 /* The BKPT instruction is unconditional even in an IT block. */
12391 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
12393 as_bad (_("incorrect condition in IT block"));
12397 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
12399 as_bad (_("thumb conditional instrunction not in IT block"));
12403 mapping_state (MAP_THUMB
);
12404 inst
.instruction
= opcode
->tvalue
;
12406 if (!parse_operands (p
, opcode
->operands
))
12407 opcode
->tencode ();
12409 /* Clear current_it_mask at the end of an IT block. */
12410 if (current_it_mask
== 0x10)
12411 current_it_mask
= 0;
12413 if (!(inst
.error
|| inst
.relax
))
12415 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
12416 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
12417 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
12419 as_bad (_("cannot honor width suffix -- `%s'"), str
);
12423 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12424 *opcode
->tvariant
);
12425 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12426 set those bits when Thumb-2 32-bit instructions are seen. ie.
12427 anything other than bl/blx.
12428 This is overly pessimistic for relaxable instructions. */
12429 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
12431 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12436 /* Check that this instruction is supported for this CPU. */
12437 if (!opcode
->avariant
||
12438 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
12440 as_bad (_("selected processor does not support `%s'"), str
);
12445 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
12449 mapping_state (MAP_ARM
);
12450 inst
.instruction
= opcode
->avalue
;
12451 if (opcode
->tag
== OT_unconditionalF
)
12452 inst
.instruction
|= 0xF << 28;
12454 inst
.instruction
|= inst
.cond
<< 28;
12455 inst
.size
= INSN_SIZE
;
12456 if (!parse_operands (p
, opcode
->operands
))
12457 opcode
->aencode ();
12458 /* Arm mode bx is marked as both v4T and v5 because it's still required
12459 on a hypothetical non-thumb v5 core. */
12460 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
12461 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
12462 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
12464 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
12465 *opcode
->avariant
);
12470 /* Various frobbings of labels and their addresses. */
12473 arm_start_line_hook (void)
12475 last_label_seen
= NULL
;
12479 arm_frob_label (symbolS
* sym
)
12481 last_label_seen
= sym
;
12483 ARM_SET_THUMB (sym
, thumb_mode
);
12485 #if defined OBJ_COFF || defined OBJ_ELF
12486 ARM_SET_INTERWORK (sym
, support_interwork
);
12489 /* Note - do not allow local symbols (.Lxxx) to be labeled
12490 as Thumb functions. This is because these labels, whilst
12491 they exist inside Thumb code, are not the entry points for
12492 possible ARM->Thumb calls. Also, these labels can be used
12493 as part of a computed goto or switch statement. eg gcc
12494 can generate code that looks like this:
12496 ldr r2, [pc, .Laaa]
12506 The first instruction loads the address of the jump table.
12507 The second instruction converts a table index into a byte offset.
12508 The third instruction gets the jump address out of the table.
12509 The fourth instruction performs the jump.
12511 If the address stored at .Laaa is that of a symbol which has the
12512 Thumb_Func bit set, then the linker will arrange for this address
12513 to have the bottom bit set, which in turn would mean that the
12514 address computation performed by the third instruction would end
12515 up with the bottom bit set. Since the ARM is capable of unaligned
12516 word loads, the instruction would then load the incorrect address
12517 out of the jump table, and chaos would ensue. */
12518 if (label_is_thumb_function_name
12519 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
12520 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
12522 /* When the address of a Thumb function is taken the bottom
12523 bit of that address should be set. This will allow
12524 interworking between Arm and Thumb functions to work
12527 THUMB_SET_FUNC (sym
, 1);
12529 label_is_thumb_function_name
= FALSE
;
12533 dwarf2_emit_label (sym
);
12538 arm_data_in_code (void)
12540 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
12542 *input_line_pointer
= '/';
12543 input_line_pointer
+= 5;
12544 *input_line_pointer
= 0;
12552 arm_canonicalize_symbol_name (char * name
)
12556 if (thumb_mode
&& (len
= strlen (name
)) > 5
12557 && streq (name
+ len
- 5, "/data"))
12558 *(name
+ len
- 5) = 0;
12563 /* Table of all register names defined by default. The user can
12564 define additional names with .req. Note that all register names
12565 should appear in both upper and lowercase variants. Some registers
12566 also have mixed-case names. */
12568 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12569 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12570 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12571 #define REGSET(p,t) \
12572 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12573 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12574 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12575 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12576 #define REGSETH(p,t) \
12577 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12578 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12579 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12580 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12581 #define REGSET2(p,t) \
12582 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12583 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12584 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12585 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12587 static const struct reg_entry reg_names
[] =
12589 /* ARM integer registers. */
12590 REGSET(r
, RN
), REGSET(R
, RN
),
12592 /* ATPCS synonyms. */
12593 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
12594 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
12595 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
12597 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
12598 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
12599 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
12601 /* Well-known aliases. */
12602 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
12603 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
12605 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
12606 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
12608 /* Coprocessor numbers. */
12609 REGSET(p
, CP
), REGSET(P
, CP
),
12611 /* Coprocessor register numbers. The "cr" variants are for backward
12613 REGSET(c
, CN
), REGSET(C
, CN
),
12614 REGSET(cr
, CN
), REGSET(CR
, CN
),
12616 /* FPA registers. */
12617 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
12618 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
12620 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
12621 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
12623 /* VFP SP registers. */
12624 REGSET(s
,VFS
), REGSET(S
,VFS
),
12625 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
12627 /* VFP DP Registers. */
12628 REGSET(d
,VFD
), REGSET(D
,VFD
),
12629 /* Extra Neon DP registers. */
12630 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
12632 /* Neon QP registers. */
12633 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
12635 /* VFP control registers. */
12636 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
12637 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
12639 /* Maverick DSP coprocessor registers. */
12640 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
12641 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
12643 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
12644 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
12645 REGDEF(dspsc
,0,DSPSC
),
12647 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
12648 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
12649 REGDEF(DSPSC
,0,DSPSC
),
12651 /* iWMMXt data registers - p0, c0-15. */
12652 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
12654 /* iWMMXt control registers - p1, c0-3. */
12655 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
12656 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
12657 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
12658 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
12660 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12661 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
12662 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
12663 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
12664 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
12666 /* XScale accumulator registers. */
12667 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
12673 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12674 within psr_required_here. */
12675 static const struct asm_psr psrs
[] =
12677 /* Backward compatibility notation. Note that "all" is no longer
12678 truly all possible PSR bits. */
12679 {"all", PSR_c
| PSR_f
},
12683 /* Individual flags. */
12688 /* Combinations of flags. */
12689 {"fs", PSR_f
| PSR_s
},
12690 {"fx", PSR_f
| PSR_x
},
12691 {"fc", PSR_f
| PSR_c
},
12692 {"sf", PSR_s
| PSR_f
},
12693 {"sx", PSR_s
| PSR_x
},
12694 {"sc", PSR_s
| PSR_c
},
12695 {"xf", PSR_x
| PSR_f
},
12696 {"xs", PSR_x
| PSR_s
},
12697 {"xc", PSR_x
| PSR_c
},
12698 {"cf", PSR_c
| PSR_f
},
12699 {"cs", PSR_c
| PSR_s
},
12700 {"cx", PSR_c
| PSR_x
},
12701 {"fsx", PSR_f
| PSR_s
| PSR_x
},
12702 {"fsc", PSR_f
| PSR_s
| PSR_c
},
12703 {"fxs", PSR_f
| PSR_x
| PSR_s
},
12704 {"fxc", PSR_f
| PSR_x
| PSR_c
},
12705 {"fcs", PSR_f
| PSR_c
| PSR_s
},
12706 {"fcx", PSR_f
| PSR_c
| PSR_x
},
12707 {"sfx", PSR_s
| PSR_f
| PSR_x
},
12708 {"sfc", PSR_s
| PSR_f
| PSR_c
},
12709 {"sxf", PSR_s
| PSR_x
| PSR_f
},
12710 {"sxc", PSR_s
| PSR_x
| PSR_c
},
12711 {"scf", PSR_s
| PSR_c
| PSR_f
},
12712 {"scx", PSR_s
| PSR_c
| PSR_x
},
12713 {"xfs", PSR_x
| PSR_f
| PSR_s
},
12714 {"xfc", PSR_x
| PSR_f
| PSR_c
},
12715 {"xsf", PSR_x
| PSR_s
| PSR_f
},
12716 {"xsc", PSR_x
| PSR_s
| PSR_c
},
12717 {"xcf", PSR_x
| PSR_c
| PSR_f
},
12718 {"xcs", PSR_x
| PSR_c
| PSR_s
},
12719 {"cfs", PSR_c
| PSR_f
| PSR_s
},
12720 {"cfx", PSR_c
| PSR_f
| PSR_x
},
12721 {"csf", PSR_c
| PSR_s
| PSR_f
},
12722 {"csx", PSR_c
| PSR_s
| PSR_x
},
12723 {"cxf", PSR_c
| PSR_x
| PSR_f
},
12724 {"cxs", PSR_c
| PSR_x
| PSR_s
},
12725 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
12726 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
12727 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
12728 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
12729 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
12730 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
12731 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
12732 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
12733 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
12734 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
12735 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
12736 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
12737 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
12738 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
12739 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
12740 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
12741 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
12742 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
12743 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
12744 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
12745 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
12746 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
12747 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
12748 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
12751 /* Table of V7M psr names. */
12752 static const struct asm_psr v7m_psrs
[] =
12765 {"basepri_max", 18},
12770 /* Table of all shift-in-operand names. */
12771 static const struct asm_shift_name shift_names
[] =
12773 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
12774 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
12775 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
12776 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
12777 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
12778 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
12781 /* Table of all explicit relocation names. */
12783 static struct reloc_entry reloc_names
[] =
12785 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
12786 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
12787 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
12788 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
12789 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
12790 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
12791 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
12792 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
12793 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
12794 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
12795 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
12799 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12800 static const struct asm_cond conds
[] =
12804 {"cs", 0x2}, {"hs", 0x2},
12805 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12819 static struct asm_barrier_opt barrier_opt_names
[] =
12827 /* Table of ARM-format instructions. */
12829 /* Macros for gluing together operand strings. N.B. In all cases
12830 other than OPS0, the trailing OP_stop comes from default
12831 zero-initialization of the unspecified elements of the array. */
12832 #define OPS0() { OP_stop, }
12833 #define OPS1(a) { OP_##a, }
12834 #define OPS2(a,b) { OP_##a,OP_##b, }
12835 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12836 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12837 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12838 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12840 /* These macros abstract out the exact format of the mnemonic table and
12841 save some repeated characters. */
12843 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12844 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12845 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12846 THUMB_VARIANT, do_##ae, do_##te }
12848 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12849 a T_MNEM_xyz enumerator. */
12850 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12851 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12852 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12853 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12855 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12856 infix after the third character. */
12857 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12858 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12859 THUMB_VARIANT, do_##ae, do_##te }
12860 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12861 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12862 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12863 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12865 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12866 appear in the condition table. */
12867 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12868 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12869 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12871 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12872 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12873 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12874 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12875 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12876 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12877 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12878 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12879 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12880 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12881 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12882 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12883 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12884 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12885 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12886 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12887 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12888 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12889 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12890 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12892 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12893 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12894 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12895 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12897 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12898 field is still 0xE. Many of the Thumb variants can be executed
12899 conditionally, so this is checked separately. */
12900 #define TUE(mnem, op, top, nops, ops, ae, te) \
12901 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12902 THUMB_VARIANT, do_##ae, do_##te }
12904 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12905 condition code field. */
12906 #define TUF(mnem, op, top, nops, ops, ae, te) \
12907 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12908 THUMB_VARIANT, do_##ae, do_##te }
12910 /* ARM-only variants of all the above. */
12911 #define CE(mnem, op, nops, ops, ae) \
12912 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12914 #define C3(mnem, op, nops, ops, ae) \
12915 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12917 /* Legacy mnemonics that always have conditional infix after the third
12919 #define CL(mnem, op, nops, ops, ae) \
12920 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12921 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12923 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12924 #define cCE(mnem, op, nops, ops, ae) \
12925 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12927 /* Legacy coprocessor instructions where conditional infix and conditional
12928 suffix are ambiguous. For consistency this includes all FPA instructions,
12929 not just the potentially ambiguous ones. */
12930 #define cCL(mnem, op, nops, ops, ae) \
12931 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12932 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12934 /* Coprocessor, takes either a suffix or a position-3 infix
12935 (for an FPA corner case). */
12936 #define C3E(mnem, op, nops, ops, ae) \
12937 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12938 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12940 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12941 { #m1 #m2 #m3, OPS##nops ops, \
12942 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12943 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12945 #define CM(m1, m2, op, nops, ops, ae) \
12946 xCM_(m1, , m2, op, nops, ops, ae), \
12947 xCM_(m1, eq, m2, op, nops, ops, ae), \
12948 xCM_(m1, ne, m2, op, nops, ops, ae), \
12949 xCM_(m1, cs, m2, op, nops, ops, ae), \
12950 xCM_(m1, hs, m2, op, nops, ops, ae), \
12951 xCM_(m1, cc, m2, op, nops, ops, ae), \
12952 xCM_(m1, ul, m2, op, nops, ops, ae), \
12953 xCM_(m1, lo, m2, op, nops, ops, ae), \
12954 xCM_(m1, mi, m2, op, nops, ops, ae), \
12955 xCM_(m1, pl, m2, op, nops, ops, ae), \
12956 xCM_(m1, vs, m2, op, nops, ops, ae), \
12957 xCM_(m1, vc, m2, op, nops, ops, ae), \
12958 xCM_(m1, hi, m2, op, nops, ops, ae), \
12959 xCM_(m1, ls, m2, op, nops, ops, ae), \
12960 xCM_(m1, ge, m2, op, nops, ops, ae), \
12961 xCM_(m1, lt, m2, op, nops, ops, ae), \
12962 xCM_(m1, gt, m2, op, nops, ops, ae), \
12963 xCM_(m1, le, m2, op, nops, ops, ae), \
12964 xCM_(m1, al, m2, op, nops, ops, ae)
12966 #define UE(mnem, op, nops, ops, ae) \
12967 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12969 #define UF(mnem, op, nops, ops, ae) \
12970 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12972 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
12973 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12974 use the same encoding function for each. */
12975 #define NUF(mnem, op, nops, ops, enc) \
12976 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12977 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12979 /* Neon data processing, version which indirects through neon_enc_tab for
12980 the various overloaded versions of opcodes. */
12981 #define nUF(mnem, op, nops, ops, enc) \
12982 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
12983 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12985 /* Neon insn with conditional suffix for the ARM version, non-overloaded
12987 #define NCE(mnem, op, nops, ops, enc) \
12988 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
12989 THUMB_VARIANT, do_##enc, do_##enc }
12991 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
12992 #define nCE(mnem, op, nops, ops, enc) \
12993 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
12994 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12998 /* Thumb-only, unconditional. */
12999 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13001 static const struct asm_opcode insns
[] =
13003 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13004 #define THUMB_VARIANT &arm_ext_v4t
13005 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13006 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13007 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13008 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13009 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13010 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13011 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13012 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13013 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13014 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13015 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13016 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13017 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13018 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13019 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13020 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13022 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13023 for setting PSR flag bits. They are obsolete in V6 and do not
13024 have Thumb equivalents. */
13025 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13026 tC3(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13027 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
13028 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13029 tC3(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13030 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
13031 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13032 tC3(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13033 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
13035 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13036 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13037 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13038 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13040 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13041 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13042 tCE(str
, 4000000, str
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13043 tC3(strb
, 4400000, strb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13045 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13046 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13047 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13048 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13049 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13050 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13052 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13053 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13054 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
13055 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
13058 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
13059 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
13060 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
13062 /* Thumb-compatibility pseudo ops. */
13063 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13064 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13065 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13066 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13067 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13068 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13069 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13070 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13071 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
13072 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
13073 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
13074 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
13076 #undef THUMB_VARIANT
13077 #define THUMB_VARIANT &arm_ext_v6
13078 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
13080 /* V1 instructions with no Thumb analogue prior to V6T2. */
13081 #undef THUMB_VARIANT
13082 #define THUMB_VARIANT &arm_ext_v6t2
13083 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13084 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13085 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13086 TC3(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13087 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
13089 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13090 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13091 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13092 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13094 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13095 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13097 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13098 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13100 /* V1 instructions with no Thumb analogue at all. */
13101 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
13102 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
13104 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13105 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13106 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13107 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13108 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13109 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13110 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13111 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13114 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13115 #undef THUMB_VARIANT
13116 #define THUMB_VARIANT &arm_ext_v4t
13117 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13118 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13120 #undef THUMB_VARIANT
13121 #define THUMB_VARIANT &arm_ext_v6t2
13122 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13123 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
13125 /* Generic coprocessor instructions. */
13126 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13127 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13128 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13129 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13130 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13131 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13132 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13135 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13136 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13137 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13140 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13141 TCE(mrs
, 10f0000
, f3ef8000
, 2, (RR
, PSR
), mrs
, t_mrs
),
13142 TCE(msr
, 120f000
, f3808000
, 2, (PSR
, RR_EXi
), msr
, t_msr
),
13145 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13146 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13147 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13148 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13149 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13150 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13151 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13152 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13153 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13156 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13157 #undef THUMB_VARIANT
13158 #define THUMB_VARIANT &arm_ext_v4t
13159 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13160 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13161 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13162 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13163 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13164 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13167 #define ARM_VARIANT &arm_ext_v4t_5
13168 /* ARM Architecture 4T. */
13169 /* Note: bx (and blx) are required on V5, even if the processor does
13170 not support Thumb. */
13171 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
13174 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13175 #undef THUMB_VARIANT
13176 #define THUMB_VARIANT &arm_ext_v5t
13177 /* Note: blx has 2 variants; the .value coded here is for
13178 BLX(2). Only this variant has conditional execution. */
13179 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
13180 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
13182 #undef THUMB_VARIANT
13183 #define THUMB_VARIANT &arm_ext_v6t2
13184 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
13185 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13186 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13187 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13188 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13189 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13190 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13191 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13194 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13195 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13196 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13197 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13198 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13200 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13201 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13203 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13204 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13205 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13206 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13208 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13209 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13210 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13211 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13213 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13214 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13216 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13217 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13218 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13219 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13222 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13223 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
13224 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13225 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13227 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13228 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13231 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13232 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
13235 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13236 #undef THUMB_VARIANT
13237 #define THUMB_VARIANT &arm_ext_v6
13238 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13239 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13240 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13241 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13242 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13243 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13244 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13245 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13246 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13247 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
13249 #undef THUMB_VARIANT
13250 #define THUMB_VARIANT &arm_ext_v6t2
13251 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
13252 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13253 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13255 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
13256 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
13258 /* ARM V6 not included in V7M (eg. integer SIMD). */
13259 #undef THUMB_VARIANT
13260 #define THUMB_VARIANT &arm_ext_v6_notm
13261 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
13262 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
13263 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
13264 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13265 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13266 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13267 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13268 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13269 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13270 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13271 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13272 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13273 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13274 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13275 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13276 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13277 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13278 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13279 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13280 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13281 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13282 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13283 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13284 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13285 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13286 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13287 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13288 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13289 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13290 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13291 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13292 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13293 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13294 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13295 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13296 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13297 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13298 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13299 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13300 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13301 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
13302 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
13303 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13304 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13305 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
13306 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
13307 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13308 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13309 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13310 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13311 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13312 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13313 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13314 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13315 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13316 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13317 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13318 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13319 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13320 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13321 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13322 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13323 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13324 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13325 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13326 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13327 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13328 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13329 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13330 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13331 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13332 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13333 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13334 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13335 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
13336 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
13337 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
13338 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
13339 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
13340 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
13341 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
13342 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13343 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13344 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
13347 #define ARM_VARIANT &arm_ext_v6k
13348 #undef THUMB_VARIANT
13349 #define THUMB_VARIANT &arm_ext_v6k
13350 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
13351 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
13352 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
13353 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
13355 #undef THUMB_VARIANT
13356 #define THUMB_VARIANT &arm_ext_v6_notm
13357 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
13358 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
13360 #undef THUMB_VARIANT
13361 #define THUMB_VARIANT &arm_ext_v6t2
13362 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13363 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13364 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13365 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13366 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
13369 #define ARM_VARIANT &arm_ext_v6z
13370 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
13373 #define ARM_VARIANT &arm_ext_v6t2
13374 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
13375 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
13376 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13377 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13379 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13380 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13381 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13382 TCE(rbit
, 3ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
13384 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13385 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13386 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13387 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13389 UT(cbnz
, b900
, 2, (RR
, EXP
), t_czb
),
13390 UT(cbz
, b100
, 2, (RR
, EXP
), t_czb
),
13391 /* ARM does not really have an IT instruction. */
13392 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
13393 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
13394 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
13395 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
13396 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
13397 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
13398 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
13399 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
13400 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
13401 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
13402 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
13403 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
13404 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
13405 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
13406 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
13408 /* Thumb2 only instructions. */
13410 #define ARM_VARIANT NULL
13412 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13413 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13414 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
13415 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
13417 /* Thumb-2 hardware division instructions (R and M profiles only). */
13418 #undef THUMB_VARIANT
13419 #define THUMB_VARIANT &arm_ext_div
13420 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13421 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13423 /* ARM V7 instructions. */
13425 #define ARM_VARIANT &arm_ext_v7
13426 #undef THUMB_VARIANT
13427 #define THUMB_VARIANT &arm_ext_v7
13428 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
13429 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
13430 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
13431 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
13432 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
13435 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13436 cCE(wfs
, e200110
, 1, (RR
), rd
),
13437 cCE(rfs
, e300110
, 1, (RR
), rd
),
13438 cCE(wfc
, e400110
, 1, (RR
), rd
),
13439 cCE(rfc
, e500110
, 1, (RR
), rd
),
13441 cCL(ldfs
, c100100
, 2, (RF
, ADDR
), rd_cpaddr
),
13442 cCL(ldfd
, c108100
, 2, (RF
, ADDR
), rd_cpaddr
),
13443 cCL(ldfe
, c500100
, 2, (RF
, ADDR
), rd_cpaddr
),
13444 cCL(ldfp
, c508100
, 2, (RF
, ADDR
), rd_cpaddr
),
13446 cCL(stfs
, c000100
, 2, (RF
, ADDR
), rd_cpaddr
),
13447 cCL(stfd
, c008100
, 2, (RF
, ADDR
), rd_cpaddr
),
13448 cCL(stfe
, c400100
, 2, (RF
, ADDR
), rd_cpaddr
),
13449 cCL(stfp
, c408100
, 2, (RF
, ADDR
), rd_cpaddr
),
13451 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
13452 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
13453 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
13454 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
13455 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
13456 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
13457 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
13458 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
13459 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
13460 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
13461 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
13462 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
13464 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
13465 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
13466 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
13467 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
13468 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
13469 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
13470 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
13471 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
13472 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
13473 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
13474 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
13475 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
13477 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
13478 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
13479 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
13480 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
13481 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
13482 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
13483 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
13484 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
13485 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
13486 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
13487 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
13488 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
13490 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
13491 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
13492 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
13493 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
13494 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
13495 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
13496 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
13497 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
13498 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
13499 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
13500 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
13501 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
13503 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
13504 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
13505 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
13506 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
13507 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
13508 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
13509 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
13510 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
13511 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
13512 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
13513 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
13514 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
13516 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
13517 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
13518 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
13519 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
13520 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
13521 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
13522 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
13523 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
13524 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
13525 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
13526 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
13527 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
13529 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
13530 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
13531 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
13532 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
13533 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
13534 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
13535 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
13536 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
13537 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
13538 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
13539 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
13540 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
13542 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
13543 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
13544 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
13545 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
13546 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
13547 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
13548 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
13549 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
13550 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
13551 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
13552 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
13553 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
13555 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
13556 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
13557 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
13558 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
13559 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
13560 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
13561 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
13562 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
13563 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
13564 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
13565 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
13566 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
13568 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
13569 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
13570 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
13571 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
13572 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
13573 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
13574 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
13575 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
13576 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
13577 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
13578 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
13579 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
13581 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
13582 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
13583 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
13584 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
13585 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
13586 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
13587 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
13588 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
13589 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
13590 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
13591 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
13592 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
13594 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
13595 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
13596 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
13597 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
13598 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
13599 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
13600 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
13601 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
13602 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
13603 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
13604 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
13605 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
13607 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
13608 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
13609 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
13610 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
13611 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
13612 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
13613 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
13614 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
13615 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
13616 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
13617 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
13618 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
13620 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
13621 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
13622 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
13623 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
13624 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
13625 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
13626 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
13627 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
13628 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
13629 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
13630 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
13631 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
13633 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
13634 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
13635 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
13636 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
13637 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
13638 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
13639 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
13640 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
13641 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
13642 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
13643 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
13644 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
13646 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
13647 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
13648 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
13649 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
13650 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
13651 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
13652 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
13653 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
13654 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
13655 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
13656 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
13657 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
13659 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13660 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13661 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13662 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13663 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13664 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13665 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13666 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13667 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13668 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13669 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13670 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13672 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13673 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13674 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13675 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13676 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13677 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13678 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13679 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13680 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13681 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13682 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13683 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13685 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13686 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13687 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13688 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13689 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13690 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13691 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13692 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13693 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13694 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13695 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13696 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13698 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13699 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13700 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13701 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13702 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13703 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13704 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13705 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13706 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13707 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13708 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13709 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13711 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13712 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13713 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13714 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13715 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13716 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13717 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13718 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13719 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13720 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13721 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13722 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13724 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13725 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13726 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13727 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13728 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13729 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13730 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13731 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13732 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13733 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13734 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13735 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13737 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13738 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13739 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13740 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13741 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13742 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13743 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13744 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13745 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13746 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13747 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13748 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13750 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13751 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13752 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13753 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13754 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13755 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13756 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13757 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13758 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13759 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13760 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13761 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13763 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13764 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13765 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13766 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13767 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13768 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13769 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13770 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13771 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13772 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13773 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13774 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13776 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13777 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13778 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13779 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13780 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13781 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13782 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13783 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13784 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13785 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13786 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13787 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13789 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13790 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13791 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13792 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13793 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13794 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13795 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13796 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13797 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13798 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13799 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13800 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13802 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13803 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13804 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13805 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13806 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13807 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13808 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13809 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13810 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13811 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13812 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13813 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13815 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13816 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13817 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13818 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13819 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13820 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13821 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13822 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13823 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13824 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13825 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13826 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13828 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13829 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13830 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13831 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13833 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
13834 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
13835 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
13836 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
13837 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
13838 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
13839 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
13840 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
13841 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
13842 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
13843 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
13844 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
13846 /* The implementation of the FIX instruction is broken on some
13847 assemblers, in that it accepts a precision specifier as well as a
13848 rounding specifier, despite the fact that this is meaningless.
13849 To be more compatible, we accept it as well, though of course it
13850 does not set any bits. */
13851 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
13852 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
13853 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
13854 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
13855 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
13856 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
13857 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
13858 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
13859 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
13860 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
13861 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
13862 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
13863 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
13865 /* Instructions that were new with the real FPA, call them V2. */
13867 #define ARM_VARIANT &fpu_fpa_ext_v2
13868 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13869 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13870 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13871 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13872 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13873 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13876 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13877 /* Moves and type conversions. */
13878 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13879 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
13880 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
13881 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
13882 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13883 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13884 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13885 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13886 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13887 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13888 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
13889 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
13891 /* Memory operations. */
13892 cCE(flds
, d100a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13893 cCE(fsts
, d000a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13894 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13895 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13896 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13897 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13898 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13899 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13900 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13901 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13902 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13903 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13904 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13905 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13906 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13907 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13908 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13909 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13911 /* Monadic operations. */
13912 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13913 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13914 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13916 /* Dyadic operations. */
13917 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13918 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13919 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13920 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13921 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13922 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13923 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13924 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13925 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13928 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13929 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
13930 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13931 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
13934 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13935 /* Moves and type conversions. */
13936 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13937 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13938 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13939 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13940 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13941 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13942 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13943 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13944 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13945 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13946 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13947 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13948 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13950 /* Memory operations. */
13951 cCE(fldd
, d100b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13952 cCE(fstd
, d000b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13953 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13954 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13955 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13956 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13957 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13958 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13959 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13960 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13962 /* Monadic operations. */
13963 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13964 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13965 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13967 /* Dyadic operations. */
13968 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13969 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13970 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13971 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13972 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13973 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13974 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13975 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13976 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13979 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13980 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
13981 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13982 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
13985 #define ARM_VARIANT &fpu_vfp_ext_v2
13986 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
13987 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
13988 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
13989 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
13991 #undef THUMB_VARIANT
13992 #define THUMB_VARIANT &fpu_neon_ext_v1
13994 #define ARM_VARIANT &fpu_neon_ext_v1
13995 /* Data processing with three registers of the same length. */
13996 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
13997 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
13998 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
13999 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14000 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14001 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14002 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14003 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14004 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14005 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14006 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14007 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14008 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14009 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14010 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14011 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14012 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14013 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14014 /* If not immediate, fall back to neon_dyadic_i64_su.
14015 shl_imm should accept I8 I16 I32 I64,
14016 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14017 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
14018 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
14019 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
14020 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
14021 /* Logic ops, types optional & ignored. */
14022 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
14023 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
14024 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
14025 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
14026 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
14027 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
14028 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
14029 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
14030 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
14031 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
14032 /* Bitfield ops, untyped. */
14033 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14034 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14035 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14036 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14037 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14038 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14039 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14040 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14041 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14042 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14043 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14044 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14045 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14046 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14047 back to neon_dyadic_if_su. */
14048 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14049 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14050 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14051 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14052 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14053 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14054 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14055 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14056 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14057 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
14058 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
14059 /* As above, D registers only. */
14060 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14061 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14062 /* Int and float variants, signedness unimportant. */
14063 /* If not scalar, fall back to neon_dyadic_if_i. */
14064 nUF(vmla
, vmla
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14065 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14066 nUF(vmls
, vmls
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14067 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14068 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
14069 /* Add/sub take types I8 I16 I32 I64 F32. */
14070 nUF(vadd
, vadd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14071 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14072 nUF(vsub
, vsub
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14073 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14074 /* vtst takes sizes 8, 16, 32. */
14075 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
14076 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
14077 /* VMUL takes I8 I16 I32 F32 P8. */
14078 nUF(vmul
, vmul
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mul
),
14079 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
14080 /* VQD{R}MULH takes S16 S32. */
14081 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14082 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14083 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14084 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14085 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14086 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14087 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14088 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14089 NUF(vaclt
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14090 NUF(vacltq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14091 NUF(vacle
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14092 NUF(vacleq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14093 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14094 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14095 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14096 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14098 /* Two address, int/float. Types S8 S16 S32 F32. */
14099 NUF(vabs
, 1b10300
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14100 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14101 NUF(vneg
, 1b10380
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14102 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14104 /* Data processing with two registers and a shift amount. */
14105 /* Right shifts, and variants with rounding.
14106 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14107 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14108 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14109 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14110 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14111 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14112 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14113 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14114 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14115 /* Shift and insert. Sizes accepted 8 16 32 64. */
14116 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
14117 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
14118 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
14119 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
14120 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14121 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
14122 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
14123 /* Right shift immediate, saturating & narrowing, with rounding variants.
14124 Types accepted S16 S32 S64 U16 U32 U64. */
14125 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14126 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14127 /* As above, unsigned. Types accepted S16 S32 S64. */
14128 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14129 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14130 /* Right shift narrowing. Types accepted I16 I32 I64. */
14131 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14132 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14133 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14134 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
14135 /* CVT with optional immediate for fixed-point variant. */
14136 nUF(vcvt
, vcvt
, 3, (RNDQ
, RNDQ
, oI32b
), neon_cvt
),
14137 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
14139 /* One register and an immediate value. All encoding special-cased! */
14140 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
14141 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
14142 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
14143 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
14145 /* Data processing, three registers of different lengths. */
14146 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14147 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
14148 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14149 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14150 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14151 /* If not scalar, fall back to neon_dyadic_long.
14152 Vector types as above, scalar types S16 S32 U16 U32. */
14153 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14154 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14155 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14156 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14157 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14158 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14159 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14160 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14161 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14162 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14163 /* Saturating doubling multiplies. Types S16 S32. */
14164 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14165 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14166 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14167 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14168 S16 S32 U16 U32. */
14169 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
14171 /* Extract. Size 8. */
14172 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
14173 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
14175 /* Two registers, miscellaneous. */
14176 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14177 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
14178 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
14179 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
14180 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
14181 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
14182 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
14183 /* Vector replicate. Sizes 8 16 32. */
14184 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
14185 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
14186 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14187 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
14188 /* VMOVN. Types I16 I32 I64. */
14189 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
14190 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14191 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
14192 /* VQMOVUN. Types S16 S32 S64. */
14193 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
14194 /* VZIP / VUZP. Sizes 8 16 32. */
14195 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14196 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14197 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14198 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14199 /* VQABS / VQNEG. Types S8 S16 S32. */
14200 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14201 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14202 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14203 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14204 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14205 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14206 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
14207 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14208 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
14209 /* Reciprocal estimates. Types U32 F32. */
14210 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14211 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
14212 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14213 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
14214 /* VCLS. Types S8 S16 S32. */
14215 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
14216 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
14217 /* VCLZ. Types I8 I16 I32. */
14218 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
14219 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
14220 /* VCNT. Size 8. */
14221 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
14222 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
14223 /* Two address, untyped. */
14224 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
14225 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
14226 /* VTRN. Sizes 8 16 32. */
14227 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
14228 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
14230 /* Table lookup. Size 8. */
14231 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14232 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14234 #undef THUMB_VARIANT
14235 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14237 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14239 /* Load/store instructions. Available in Neon or VFPv3. */
14240 NCE(vldm
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14241 NCE(vldmia
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14242 NCE(vldmdb
, d100b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14243 NCE(vstm
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14244 NCE(vstmia
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14245 NCE(vstmdb
, d000b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14246 NCE(vldr
, d100b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14247 NCE(vstr
, d000b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14249 /* Neon element/structure load/store. */
14250 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14251 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14252 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14253 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14254 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14255 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14256 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14257 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14259 #undef THUMB_VARIANT
14260 #define THUMB_VARIANT &fpu_vfp_ext_v3
14262 #define ARM_VARIANT &fpu_vfp_ext_v3
14264 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
14265 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
14266 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14267 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14268 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14269 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14270 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14271 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14272 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14273 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14274 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14275 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14276 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14277 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14278 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14279 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14280 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14281 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14283 #undef THUMB_VARIANT
14285 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14286 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14287 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14288 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14289 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14290 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14291 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14292 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
14293 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
14296 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14297 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
14298 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
14299 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
14300 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
14301 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
14302 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
14303 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
14304 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
14305 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
14306 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14307 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14308 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14309 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14310 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14311 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14312 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14313 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14314 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14315 cCE(tmcr
, e000110
, 2, (RIWC
, RR
), rn_rd
),
14316 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
14317 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14318 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14319 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14320 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14321 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14322 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14323 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
14324 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
14325 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
14326 cCE(tmrc
, e100110
, 2, (RR
, RIWC
), rd_rn
),
14327 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
14328 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
14329 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
14330 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
14331 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14332 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14333 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14334 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14335 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14336 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14337 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14338 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14339 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14340 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14341 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14342 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14343 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
14344 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14345 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14346 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14347 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14348 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14349 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14350 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14351 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14352 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14353 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14354 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14355 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14356 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14357 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14358 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14359 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14360 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14361 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14362 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14363 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14364 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14365 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14366 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14367 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14368 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14369 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14370 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14371 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14372 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14373 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14374 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14375 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14376 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14377 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14378 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14379 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14380 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14381 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14382 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14383 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14384 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14385 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
14386 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14387 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14388 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14389 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14390 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14391 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14392 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14393 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14394 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14395 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14396 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14397 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14398 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14399 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14400 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14401 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14402 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14403 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14404 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14405 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14406 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14407 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
14408 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14409 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14410 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14411 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14412 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14413 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14414 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14415 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14416 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14417 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14418 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14419 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14420 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14421 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14422 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14423 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14424 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14425 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14426 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14427 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14428 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14429 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14430 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14431 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14432 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14433 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14434 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14435 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14436 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14437 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14438 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14439 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14440 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14441 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14442 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14443 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14444 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14445 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14446 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14447 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14448 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14449 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14450 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14451 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14452 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14453 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14454 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14455 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14456 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14457 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14458 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
14461 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14462 cCE(cfldrs
, c100400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14463 cCE(cfldrd
, c500400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14464 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14465 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14466 cCE(cfstrs
, c000400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14467 cCE(cfstrd
, c400400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14468 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14469 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14470 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
14471 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
14472 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
14473 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
14474 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
14475 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
14476 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
14477 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
14478 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
14479 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
14480 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
14481 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
14482 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
14483 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
14484 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
14485 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
14486 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
14487 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
14488 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
14489 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
14490 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
14491 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
14492 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
14493 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
14494 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
14495 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
14496 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
14497 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
14498 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
14499 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
14500 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
14501 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
14502 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
14503 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
14504 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
14505 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
14506 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
14507 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
14508 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
14509 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
14510 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
14511 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
14512 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
14513 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
14514 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
14515 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
14516 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14517 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14518 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14519 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14520 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14521 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14522 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
14523 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
14524 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
14525 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
14526 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14527 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14528 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14529 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14530 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14531 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14532 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14533 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14534 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14535 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14536 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14537 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14540 #undef THUMB_VARIANT
14567 /* MD interface: bits in the object file. */
14569 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14570 for use in the a.out file, and stores them in the array pointed to by buf.
14571 This knows about the endian-ness of the target machine and does
14572 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14573 2 (short) and 4 (long) Floating numbers are put out as a series of
14574 LITTLENUMS (shorts, here at least). */
14577 md_number_to_chars (char * buf
, valueT val
, int n
)
14579 if (target_big_endian
)
14580 number_to_chars_bigendian (buf
, val
, n
);
14582 number_to_chars_littleendian (buf
, val
, n
);
14586 md_chars_to_number (char * buf
, int n
)
14589 unsigned char * where
= (unsigned char *) buf
;
14591 if (target_big_endian
)
14596 result
|= (*where
++ & 255);
14604 result
|= (where
[n
] & 255);
14611 /* MD interface: Sections. */
14613 /* Estimate the size of a frag before relaxing. Assume everything fits in
14617 md_estimate_size_before_relax (fragS
* fragp
,
14618 segT segtype ATTRIBUTE_UNUSED
)
14624 /* Convert a machine dependent frag. */
14627 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
14629 unsigned long insn
;
14630 unsigned long old_op
;
14638 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14640 old_op
= bfd_get_16(abfd
, buf
);
14641 if (fragp
->fr_symbol
) {
14642 exp
.X_op
= O_symbol
;
14643 exp
.X_add_symbol
= fragp
->fr_symbol
;
14645 exp
.X_op
= O_constant
;
14647 exp
.X_add_number
= fragp
->fr_offset
;
14648 opcode
= fragp
->fr_subtype
;
14651 case T_MNEM_ldr_pc
:
14652 case T_MNEM_ldr_pc2
:
14653 case T_MNEM_ldr_sp
:
14654 case T_MNEM_str_sp
:
14661 if (fragp
->fr_var
== 4)
14663 insn
= THUMB_OP32(opcode
);
14664 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
14666 insn
|= (old_op
& 0x700) << 4;
14670 insn
|= (old_op
& 7) << 12;
14671 insn
|= (old_op
& 0x38) << 13;
14673 insn
|= 0x00000c00;
14674 put_thumb32_insn (buf
, insn
);
14675 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
14679 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
14681 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
14684 if (fragp
->fr_var
== 4)
14686 insn
= THUMB_OP32 (opcode
);
14687 insn
|= (old_op
& 0xf0) << 4;
14688 put_thumb32_insn (buf
, insn
);
14689 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
14693 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14694 exp
.X_add_number
-= 4;
14702 if (fragp
->fr_var
== 4)
14704 int r0off
= (opcode
== T_MNEM_mov
14705 || opcode
== T_MNEM_movs
) ? 0 : 8;
14706 insn
= THUMB_OP32 (opcode
);
14707 insn
= (insn
& 0xe1ffffff) | 0x10000000;
14708 insn
|= (old_op
& 0x700) << r0off
;
14709 put_thumb32_insn (buf
, insn
);
14710 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14714 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
14719 if (fragp
->fr_var
== 4)
14721 insn
= THUMB_OP32(opcode
);
14722 put_thumb32_insn (buf
, insn
);
14723 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
14726 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
14730 if (fragp
->fr_var
== 4)
14732 insn
= THUMB_OP32(opcode
);
14733 insn
|= (old_op
& 0xf00) << 14;
14734 put_thumb32_insn (buf
, insn
);
14735 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
14738 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
14741 case T_MNEM_add_sp
:
14742 case T_MNEM_add_pc
:
14743 case T_MNEM_inc_sp
:
14744 case T_MNEM_dec_sp
:
14745 if (fragp
->fr_var
== 4)
14747 /* ??? Choose between add and addw. */
14748 insn
= THUMB_OP32 (opcode
);
14749 insn
|= (old_op
& 0xf0) << 4;
14750 put_thumb32_insn (buf
, insn
);
14751 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14754 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14762 if (fragp
->fr_var
== 4)
14764 insn
= THUMB_OP32 (opcode
);
14765 insn
|= (old_op
& 0xf0) << 4;
14766 insn
|= (old_op
& 0xf) << 16;
14767 put_thumb32_insn (buf
, insn
);
14768 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14771 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14777 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
14779 fixp
->fx_file
= fragp
->fr_file
;
14780 fixp
->fx_line
= fragp
->fr_line
;
14781 fragp
->fr_fix
+= fragp
->fr_var
;
14784 /* Return the size of a relaxable immediate operand instruction.
14785 SHIFT and SIZE specify the form of the allowable immediate. */
14787 relax_immediate (fragS
*fragp
, int size
, int shift
)
14793 /* ??? Should be able to do better than this. */
14794 if (fragp
->fr_symbol
)
14797 low
= (1 << shift
) - 1;
14798 mask
= (1 << (shift
+ size
)) - (1 << shift
);
14799 offset
= fragp
->fr_offset
;
14800 /* Force misaligned offsets to 32-bit variant. */
14803 if (offset
& ~mask
)
14808 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14811 relax_adr (fragS
*fragp
, asection
*sec
)
14816 /* Assume worst case for symbols not known to be in the same section. */
14817 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14818 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14821 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14822 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
14823 addr
= (addr
+ 4) & ~3;
14824 /* Fix the insn as the 4-byte version if the target address is not
14825 sufficiently aligned. This is prevents an infinite loop when two
14826 instructions have contradictory range/alignment requirements. */
14830 if (val
< 0 || val
> 1020)
14835 /* Return the size of a relaxable add/sub immediate instruction. */
14837 relax_addsub (fragS
*fragp
, asection
*sec
)
14842 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14843 op
= bfd_get_16(sec
->owner
, buf
);
14844 if ((op
& 0xf) == ((op
>> 4) & 0xf))
14845 return relax_immediate (fragp
, 8, 0);
14847 return relax_immediate (fragp
, 3, 0);
14851 /* Return the size of a relaxable branch instruction. BITS is the
14852 size of the offset field in the narrow instruction. */
14855 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
14861 /* Assume worst case for symbols not known to be in the same section. */
14862 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14863 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14866 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14867 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
14870 /* Offset is a signed value *2 */
14872 if (val
>= limit
|| val
< -limit
)
14878 /* Relax a machine dependent frag. This returns the amount by which
14879 the current size of the frag should change. */
14882 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
14887 oldsize
= fragp
->fr_var
;
14888 switch (fragp
->fr_subtype
)
14890 case T_MNEM_ldr_pc2
:
14891 newsize
= relax_adr(fragp
, sec
);
14893 case T_MNEM_ldr_pc
:
14894 case T_MNEM_ldr_sp
:
14895 case T_MNEM_str_sp
:
14896 newsize
= relax_immediate(fragp
, 8, 2);
14900 newsize
= relax_immediate(fragp
, 5, 2);
14904 newsize
= relax_immediate(fragp
, 5, 1);
14908 newsize
= relax_immediate(fragp
, 5, 0);
14911 newsize
= relax_adr(fragp
, sec
);
14917 newsize
= relax_immediate(fragp
, 8, 0);
14920 newsize
= relax_branch(fragp
, sec
, 11);
14923 newsize
= relax_branch(fragp
, sec
, 8);
14925 case T_MNEM_add_sp
:
14926 case T_MNEM_add_pc
:
14927 newsize
= relax_immediate (fragp
, 8, 2);
14929 case T_MNEM_inc_sp
:
14930 case T_MNEM_dec_sp
:
14931 newsize
= relax_immediate (fragp
, 7, 2);
14937 newsize
= relax_addsub (fragp
, sec
);
14944 fragp
->fr_var
= -newsize
;
14945 md_convert_frag (sec
->owner
, sec
, fragp
);
14947 return -(newsize
+ oldsize
);
14949 fragp
->fr_var
= newsize
;
14950 return newsize
- oldsize
;
14953 /* Round up a section size to the appropriate boundary. */
14956 md_section_align (segT segment ATTRIBUTE_UNUSED
,
14962 /* Round all sects to multiple of 4. */
14963 return (size
+ 3) & ~3;
14967 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14968 of an rs_align_code fragment. */
14971 arm_handle_align (fragS
* fragP
)
14973 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14974 static char const thumb_noop
[2] = { 0xc0, 0x46 };
14975 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14976 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
14978 int bytes
, fix
, noop_size
;
14982 if (fragP
->fr_type
!= rs_align_code
)
14985 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
14986 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
14989 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
14990 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
14992 if (fragP
->tc_frag_data
)
14994 if (target_big_endian
)
14995 noop
= thumb_bigend_noop
;
14998 noop_size
= sizeof (thumb_noop
);
15002 if (target_big_endian
)
15003 noop
= arm_bigend_noop
;
15006 noop_size
= sizeof (arm_noop
);
15009 if (bytes
& (noop_size
- 1))
15011 fix
= bytes
& (noop_size
- 1);
15012 memset (p
, 0, fix
);
15017 while (bytes
>= noop_size
)
15019 memcpy (p
, noop
, noop_size
);
15021 bytes
-= noop_size
;
15025 fragP
->fr_fix
+= fix
;
15026 fragP
->fr_var
= noop_size
;
15029 /* Called from md_do_align. Used to create an alignment
15030 frag in a code section. */
15033 arm_frag_align_code (int n
, int max
)
15037 /* We assume that there will never be a requirement
15038 to support alignments greater than 32 bytes. */
15039 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
15040 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15042 p
= frag_var (rs_align_code
,
15043 MAX_MEM_FOR_RS_ALIGN_CODE
,
15045 (relax_substateT
) max
,
15052 /* Perform target specific initialisation of a frag. */
15055 arm_init_frag (fragS
* fragP
)
15057 /* Record whether this frag is in an ARM or a THUMB area. */
15058 fragP
->tc_frag_data
= thumb_mode
;
15062 /* When we change sections we need to issue a new mapping symbol. */
15065 arm_elf_change_section (void)
15068 segment_info_type
*seginfo
;
15070 /* Link an unlinked unwind index table section to the .text section. */
15071 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
15072 && elf_linked_to_section (now_seg
) == NULL
)
15073 elf_linked_to_section (now_seg
) = text_section
;
15075 if (!SEG_NORMAL (now_seg
))
15078 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
15080 /* We can ignore sections that only contain debug info. */
15081 if ((flags
& SEC_ALLOC
) == 0)
15084 seginfo
= seg_info (now_seg
);
15085 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
15086 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
15090 arm_elf_section_type (const char * str
, size_t len
)
15092 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
15093 return SHT_ARM_EXIDX
;
15098 /* Code to deal with unwinding tables. */
15100 static void add_unwind_adjustsp (offsetT
);
15102 /* Cenerate and deferred unwind frame offset. */
15105 flush_pending_unwind (void)
15109 offset
= unwind
.pending_offset
;
15110 unwind
.pending_offset
= 0;
15112 add_unwind_adjustsp (offset
);
15115 /* Add an opcode to this list for this function. Two-byte opcodes should
15116 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15120 add_unwind_opcode (valueT op
, int length
)
15122 /* Add any deferred stack adjustment. */
15123 if (unwind
.pending_offset
)
15124 flush_pending_unwind ();
15126 unwind
.sp_restored
= 0;
15128 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
15130 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
15131 if (unwind
.opcodes
)
15132 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
15133 unwind
.opcode_alloc
);
15135 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
15140 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
15142 unwind
.opcode_count
++;
15146 /* Add unwind opcodes to adjust the stack pointer. */
15149 add_unwind_adjustsp (offsetT offset
)
15153 if (offset
> 0x200)
15155 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15160 /* Long form: 0xb2, uleb128. */
15161 /* This might not fit in a word so add the individual bytes,
15162 remembering the list is built in reverse order. */
15163 o
= (valueT
) ((offset
- 0x204) >> 2);
15165 add_unwind_opcode (0, 1);
15167 /* Calculate the uleb128 encoding of the offset. */
15171 bytes
[n
] = o
& 0x7f;
15177 /* Add the insn. */
15179 add_unwind_opcode (bytes
[n
- 1], 1);
15180 add_unwind_opcode (0xb2, 1);
15182 else if (offset
> 0x100)
15184 /* Two short opcodes. */
15185 add_unwind_opcode (0x3f, 1);
15186 op
= (offset
- 0x104) >> 2;
15187 add_unwind_opcode (op
, 1);
15189 else if (offset
> 0)
15191 /* Short opcode. */
15192 op
= (offset
- 4) >> 2;
15193 add_unwind_opcode (op
, 1);
15195 else if (offset
< 0)
15198 while (offset
> 0x100)
15200 add_unwind_opcode (0x7f, 1);
15203 op
= ((offset
- 4) >> 2) | 0x40;
15204 add_unwind_opcode (op
, 1);
15208 /* Finish the list of unwind opcodes for this function. */
15210 finish_unwind_opcodes (void)
15214 if (unwind
.fp_used
)
15216 /* Adjust sp as necessary. */
15217 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
15218 flush_pending_unwind ();
15220 /* After restoring sp from the frame pointer. */
15221 op
= 0x90 | unwind
.fp_reg
;
15222 add_unwind_opcode (op
, 1);
15225 flush_pending_unwind ();
15229 /* Start an exception table entry. If idx is nonzero this is an index table
15233 start_unwind_section (const segT text_seg
, int idx
)
15235 const char * text_name
;
15236 const char * prefix
;
15237 const char * prefix_once
;
15238 const char * group_name
;
15242 size_t sec_name_len
;
15249 prefix
= ELF_STRING_ARM_unwind
;
15250 prefix_once
= ELF_STRING_ARM_unwind_once
;
15251 type
= SHT_ARM_EXIDX
;
15255 prefix
= ELF_STRING_ARM_unwind_info
;
15256 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
15257 type
= SHT_PROGBITS
;
15260 text_name
= segment_name (text_seg
);
15261 if (streq (text_name
, ".text"))
15264 if (strncmp (text_name
, ".gnu.linkonce.t.",
15265 strlen (".gnu.linkonce.t.")) == 0)
15267 prefix
= prefix_once
;
15268 text_name
+= strlen (".gnu.linkonce.t.");
15271 prefix_len
= strlen (prefix
);
15272 text_len
= strlen (text_name
);
15273 sec_name_len
= prefix_len
+ text_len
;
15274 sec_name
= xmalloc (sec_name_len
+ 1);
15275 memcpy (sec_name
, prefix
, prefix_len
);
15276 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
15277 sec_name
[prefix_len
+ text_len
] = '\0';
15283 /* Handle COMDAT group. */
15284 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
15286 group_name
= elf_group_name (text_seg
);
15287 if (group_name
== NULL
)
15289 as_bad ("Group section `%s' has no group signature",
15290 segment_name (text_seg
));
15291 ignore_rest_of_line ();
15294 flags
|= SHF_GROUP
;
15298 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
15300 /* Set the setion link for index tables. */
15302 elf_linked_to_section (now_seg
) = text_seg
;
15306 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15307 personality routine data. Returns zero, or the index table value for
15308 and inline entry. */
15311 create_unwind_entry (int have_data
)
15316 /* The current word of data. */
15318 /* The number of bytes left in this word. */
15321 finish_unwind_opcodes ();
15323 /* Remember the current text section. */
15324 unwind
.saved_seg
= now_seg
;
15325 unwind
.saved_subseg
= now_subseg
;
15327 start_unwind_section (now_seg
, 0);
15329 if (unwind
.personality_routine
== NULL
)
15331 if (unwind
.personality_index
== -2)
15334 as_bad (_("handerdata in cantunwind frame"));
15335 return 1; /* EXIDX_CANTUNWIND. */
15338 /* Use a default personality routine if none is specified. */
15339 if (unwind
.personality_index
== -1)
15341 if (unwind
.opcode_count
> 3)
15342 unwind
.personality_index
= 1;
15344 unwind
.personality_index
= 0;
15347 /* Space for the personality routine entry. */
15348 if (unwind
.personality_index
== 0)
15350 if (unwind
.opcode_count
> 3)
15351 as_bad (_("too many unwind opcodes for personality routine 0"));
15355 /* All the data is inline in the index table. */
15358 while (unwind
.opcode_count
> 0)
15360 unwind
.opcode_count
--;
15361 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15365 /* Pad with "finish" opcodes. */
15367 data
= (data
<< 8) | 0xb0;
15374 /* We get two opcodes "free" in the first word. */
15375 size
= unwind
.opcode_count
- 2;
15378 /* An extra byte is required for the opcode count. */
15379 size
= unwind
.opcode_count
+ 1;
15381 size
= (size
+ 3) >> 2;
15383 as_bad (_("too many unwind opcodes"));
15385 frag_align (2, 0, 0);
15386 record_alignment (now_seg
, 2);
15387 unwind
.table_entry
= expr_build_dot ();
15389 /* Allocate the table entry. */
15390 ptr
= frag_more ((size
<< 2) + 4);
15391 where
= frag_now_fix () - ((size
<< 2) + 4);
15393 switch (unwind
.personality_index
)
15396 /* ??? Should this be a PLT generating relocation? */
15397 /* Custom personality routine. */
15398 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
15399 BFD_RELOC_ARM_PREL31
);
15404 /* Set the first byte to the number of additional words. */
15409 /* ABI defined personality routines. */
15411 /* Three opcodes bytes are packed into the first word. */
15418 /* The size and first two opcode bytes go in the first word. */
15419 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
15424 /* Should never happen. */
15428 /* Pack the opcodes into words (MSB first), reversing the list at the same
15430 while (unwind
.opcode_count
> 0)
15434 md_number_to_chars (ptr
, data
, 4);
15439 unwind
.opcode_count
--;
15441 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15444 /* Finish off the last word. */
15447 /* Pad with "finish" opcodes. */
15449 data
= (data
<< 8) | 0xb0;
15451 md_number_to_chars (ptr
, data
, 4);
15456 /* Add an empty descriptor if there is no user-specified data. */
15457 ptr
= frag_more (4);
15458 md_number_to_chars (ptr
, 0, 4);
15464 /* Convert REGNAME to a DWARF-2 register number. */
15467 tc_arm_regname_to_dw2regnum (const char *regname
)
15469 int reg
= arm_reg_parse ((char **) ®name
, REG_TYPE_RN
);
15477 /* Initialize the DWARF-2 unwind information for this procedure. */
15480 tc_arm_frame_initial_instructions (void)
15482 cfi_add_CFA_def_cfa (REG_SP
, 0);
15484 #endif /* OBJ_ELF */
15487 /* MD interface: Symbol and relocation handling. */
15489 /* Return the address within the segment that a PC-relative fixup is
15490 relative to. For ARM, PC-relative fixups applied to instructions
15491 are generally relative to the location of the fixup plus 8 bytes.
15492 Thumb branches are offset by 4, and Thumb loads relative to PC
15493 require special handling. */
15496 md_pcrel_from_section (fixS
* fixP
, segT seg
)
15498 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
15500 /* If this is pc-relative and we are going to emit a relocation
15501 then we just want to put out any pipeline compensation that the linker
15502 will need. Otherwise we want to use the calculated base. */
15504 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
15505 || arm_force_relocation (fixP
)))
15508 switch (fixP
->fx_r_type
)
15510 /* PC relative addressing on the Thumb is slightly odd as the
15511 bottom two bits of the PC are forced to zero for the
15512 calculation. This happens *after* application of the
15513 pipeline offset. However, Thumb adrl already adjusts for
15514 this, so we need not do it again. */
15515 case BFD_RELOC_ARM_THUMB_ADD
:
15518 case BFD_RELOC_ARM_THUMB_OFFSET
:
15519 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
15520 case BFD_RELOC_ARM_T32_ADD_PC12
:
15521 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
15522 return (base
+ 4) & ~3;
15524 /* Thumb branches are simply offset by +4. */
15525 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
15526 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
15527 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
15528 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
15529 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
15530 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
15531 case BFD_RELOC_THUMB_PCREL_BLX
:
15534 /* ARM mode branches are offset by +8. However, the Windows CE
15535 loader expects the relocation not to take this into account. */
15536 case BFD_RELOC_ARM_PCREL_BRANCH
:
15537 case BFD_RELOC_ARM_PCREL_CALL
:
15538 case BFD_RELOC_ARM_PCREL_JUMP
:
15539 case BFD_RELOC_ARM_PCREL_BLX
:
15540 case BFD_RELOC_ARM_PLT32
:
15547 /* ARM mode loads relative to PC are also offset by +8. Unlike
15548 branches, the Windows CE loader *does* expect the relocation
15549 to take this into account. */
15550 case BFD_RELOC_ARM_OFFSET_IMM
:
15551 case BFD_RELOC_ARM_OFFSET_IMM8
:
15552 case BFD_RELOC_ARM_HWLITERAL
:
15553 case BFD_RELOC_ARM_LITERAL
:
15554 case BFD_RELOC_ARM_CP_OFF_IMM
:
15558 /* Other PC-relative relocations are un-offset. */
15564 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15565 Otherwise we have no need to default values of symbols. */
15568 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
15571 if (name
[0] == '_' && name
[1] == 'G'
15572 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
15576 if (symbol_find (name
))
15577 as_bad ("GOT already in the symbol table");
15579 GOT_symbol
= symbol_new (name
, undefined_section
,
15580 (valueT
) 0, & zero_address_frag
);
15590 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15591 computed as two separate immediate values, added together. We
15592 already know that this value cannot be computed by just one ARM
15595 static unsigned int
15596 validate_immediate_twopart (unsigned int val
,
15597 unsigned int * highpart
)
15602 for (i
= 0; i
< 32; i
+= 2)
15603 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
15609 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
15611 else if (a
& 0xff0000)
15613 if (a
& 0xff000000)
15615 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
15619 assert (a
& 0xff000000);
15620 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
15623 return (a
& 0xff) | (i
<< 7);
15630 validate_offset_imm (unsigned int val
, int hwse
)
15632 if ((hwse
&& val
> 255) || val
> 4095)
15637 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15638 negative immediate constant by altering the instruction. A bit of
15643 by inverting the second operand, and
15646 by negating the second operand. */
15649 negate_data_op (unsigned long * instruction
,
15650 unsigned long value
)
15653 unsigned long negated
, inverted
;
15655 negated
= encode_arm_immediate (-value
);
15656 inverted
= encode_arm_immediate (~value
);
15658 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
15661 /* First negates. */
15662 case OPCODE_SUB
: /* ADD <-> SUB */
15663 new_inst
= OPCODE_ADD
;
15668 new_inst
= OPCODE_SUB
;
15672 case OPCODE_CMP
: /* CMP <-> CMN */
15673 new_inst
= OPCODE_CMN
;
15678 new_inst
= OPCODE_CMP
;
15682 /* Now Inverted ops. */
15683 case OPCODE_MOV
: /* MOV <-> MVN */
15684 new_inst
= OPCODE_MVN
;
15689 new_inst
= OPCODE_MOV
;
15693 case OPCODE_AND
: /* AND <-> BIC */
15694 new_inst
= OPCODE_BIC
;
15699 new_inst
= OPCODE_AND
;
15703 case OPCODE_ADC
: /* ADC <-> SBC */
15704 new_inst
= OPCODE_SBC
;
15709 new_inst
= OPCODE_ADC
;
15713 /* We cannot do anything. */
15718 if (value
== (unsigned) FAIL
)
15721 *instruction
&= OPCODE_MASK
;
15722 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
15726 /* Like negate_data_op, but for Thumb-2. */
15728 static unsigned int
15729 thumb32_negate_data_op (offsetT
*instruction
, offsetT value
)
15733 offsetT negated
, inverted
;
15735 negated
= encode_thumb32_immediate (-value
);
15736 inverted
= encode_thumb32_immediate (~value
);
15738 rd
= (*instruction
>> 8) & 0xf;
15739 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
15742 /* ADD <-> SUB. Includes CMP <-> CMN. */
15743 case T2_OPCODE_SUB
:
15744 new_inst
= T2_OPCODE_ADD
;
15748 case T2_OPCODE_ADD
:
15749 new_inst
= T2_OPCODE_SUB
;
15753 /* ORR <-> ORN. Includes MOV <-> MVN. */
15754 case T2_OPCODE_ORR
:
15755 new_inst
= T2_OPCODE_ORN
;
15759 case T2_OPCODE_ORN
:
15760 new_inst
= T2_OPCODE_ORR
;
15764 /* AND <-> BIC. TST has no inverted equivalent. */
15765 case T2_OPCODE_AND
:
15766 new_inst
= T2_OPCODE_BIC
;
15773 case T2_OPCODE_BIC
:
15774 new_inst
= T2_OPCODE_AND
;
15779 case T2_OPCODE_ADC
:
15780 new_inst
= T2_OPCODE_SBC
;
15784 case T2_OPCODE_SBC
:
15785 new_inst
= T2_OPCODE_ADC
;
15789 /* We cannot do anything. */
15797 *instruction
&= T2_OPCODE_MASK
;
15798 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
15802 /* Read a 32-bit thumb instruction from buf. */
15803 static unsigned long
15804 get_thumb32_insn (char * buf
)
15806 unsigned long insn
;
15807 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
15808 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
15814 /* We usually want to set the low bit on the address of thumb function
15815 symbols. In particular .word foo - . should have the low bit set.
15816 Generic code tries to fold the difference of two symbols to
15817 a constant. Prevent this and force a relocation when the first symbols
15818 is a thumb function. */
15820 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
15822 if (op
== O_subtract
15823 && l
->X_op
== O_symbol
15824 && r
->X_op
== O_symbol
15825 && THUMB_IS_FUNC (l
->X_add_symbol
))
15827 l
->X_op
= O_subtract
;
15828 l
->X_op_symbol
= r
->X_add_symbol
;
15829 l
->X_add_number
-= r
->X_add_number
;
15832 /* Process as normal. */
15837 md_apply_fix (fixS
* fixP
,
15841 offsetT value
= * valP
;
15843 unsigned int newimm
;
15844 unsigned long temp
;
15846 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
15848 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
15850 /* Note whether this will delete the relocation. */
15851 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
15854 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15855 consistency with the behavior on 32-bit hosts. Remember value
15857 value
&= 0xffffffff;
15858 value
^= 0x80000000;
15859 value
-= 0x80000000;
15862 fixP
->fx_addnumber
= value
;
15864 /* Same treatment for fixP->fx_offset. */
15865 fixP
->fx_offset
&= 0xffffffff;
15866 fixP
->fx_offset
^= 0x80000000;
15867 fixP
->fx_offset
-= 0x80000000;
15869 switch (fixP
->fx_r_type
)
15871 case BFD_RELOC_NONE
:
15872 /* This will need to go in the object file. */
15876 case BFD_RELOC_ARM_IMMEDIATE
:
15877 /* We claim that this fixup has been processed here,
15878 even if in fact we generate an error because we do
15879 not have a reloc for it, so tc_gen_reloc will reject it. */
15883 && ! S_IS_DEFINED (fixP
->fx_addsy
))
15885 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15886 _("undefined symbol %s used as an immediate value"),
15887 S_GET_NAME (fixP
->fx_addsy
));
15891 newimm
= encode_arm_immediate (value
);
15892 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15894 /* If the instruction will fail, see if we can fix things up by
15895 changing the opcode. */
15896 if (newimm
== (unsigned int) FAIL
15897 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
15899 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15900 _("invalid constant (%lx) after fixup"),
15901 (unsigned long) value
);
15905 newimm
|= (temp
& 0xfffff000);
15906 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15909 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
15911 unsigned int highpart
= 0;
15912 unsigned int newinsn
= 0xe1a00000; /* nop. */
15914 newimm
= encode_arm_immediate (value
);
15915 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15917 /* If the instruction will fail, see if we can fix things up by
15918 changing the opcode. */
15919 if (newimm
== (unsigned int) FAIL
15920 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
15922 /* No ? OK - try using two ADD instructions to generate
15924 newimm
= validate_immediate_twopart (value
, & highpart
);
15926 /* Yes - then make sure that the second instruction is
15928 if (newimm
!= (unsigned int) FAIL
)
15930 /* Still No ? Try using a negated value. */
15931 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
15932 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
15933 /* Otherwise - give up. */
15936 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15937 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15942 /* Replace the first operand in the 2nd instruction (which
15943 is the PC) with the destination register. We have
15944 already added in the PC in the first instruction and we
15945 do not want to do it again. */
15946 newinsn
&= ~ 0xf0000;
15947 newinsn
|= ((newinsn
& 0x0f000) << 4);
15950 newimm
|= (temp
& 0xfffff000);
15951 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15953 highpart
|= (newinsn
& 0xfffff000);
15954 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
15958 case BFD_RELOC_ARM_OFFSET_IMM
:
15959 if (!fixP
->fx_done
&& seg
->use_rela_p
)
15962 case BFD_RELOC_ARM_LITERAL
:
15968 if (validate_offset_imm (value
, 0) == FAIL
)
15970 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
15971 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15972 _("invalid literal constant: pool needs to be closer"));
15974 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15975 _("bad immediate value for offset (%ld)"),
15980 newval
= md_chars_to_number (buf
, INSN_SIZE
);
15981 newval
&= 0xff7ff000;
15982 newval
|= value
| (sign
? INDEX_UP
: 0);
15983 md_number_to_chars (buf
, newval
, INSN_SIZE
);
15986 case BFD_RELOC_ARM_OFFSET_IMM8
:
15987 case BFD_RELOC_ARM_HWLITERAL
:
15993 if (validate_offset_imm (value
, 1) == FAIL
)
15995 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
15996 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15997 _("invalid literal constant: pool needs to be closer"));
15999 as_bad (_("bad immediate value for half-word offset (%ld)"),
16004 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16005 newval
&= 0xff7ff0f0;
16006 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
16007 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16010 case BFD_RELOC_ARM_T32_OFFSET_U8
:
16011 if (value
< 0 || value
> 1020 || value
% 4 != 0)
16012 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16013 _("bad immediate value for offset (%ld)"), (long) value
);
16016 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
16018 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
16021 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16022 /* This is a complicated relocation used for all varieties of Thumb32
16023 load/store instruction with immediate offset:
16025 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16026 *4, optional writeback(W)
16027 (doubleword load/store)
16029 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16030 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16031 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16032 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16033 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16035 Uppercase letters indicate bits that are already encoded at
16036 this point. Lowercase letters are our problem. For the
16037 second block of instructions, the secondary opcode nybble
16038 (bits 8..11) is present, and bit 23 is zero, even if this is
16039 a PC-relative operation. */
16040 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16042 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
16044 if ((newval
& 0xf0000000) == 0xe0000000)
16046 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16048 newval
|= (1 << 23);
16051 if (value
% 4 != 0)
16053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16054 _("offset not a multiple of 4"));
16060 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16061 _("offset out of range"));
16066 else if ((newval
& 0x000f0000) == 0x000f0000)
16068 /* PC-relative, 12-bit offset. */
16070 newval
|= (1 << 23);
16075 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16076 _("offset out of range"));
16081 else if ((newval
& 0x00000100) == 0x00000100)
16083 /* Writeback: 8-bit, +/- offset. */
16085 newval
|= (1 << 9);
16090 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16091 _("offset out of range"));
16096 else if ((newval
& 0x00000f00) == 0x00000e00)
16098 /* T-instruction: positive 8-bit offset. */
16099 if (value
< 0 || value
> 0xff)
16101 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16102 _("offset out of range"));
16110 /* Positive 12-bit or negative 8-bit offset. */
16114 newval
|= (1 << 23);
16124 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16125 _("offset out of range"));
16132 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
16133 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
16136 case BFD_RELOC_ARM_SHIFT_IMM
:
16137 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16138 if (((unsigned long) value
) > 32
16140 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
16142 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16143 _("shift expression is too large"));
16148 /* Shifts of zero must be done as lsl. */
16150 else if (value
== 32)
16152 newval
&= 0xfffff07f;
16153 newval
|= (value
& 0x1f) << 7;
16154 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16157 case BFD_RELOC_ARM_T32_IMMEDIATE
:
16158 case BFD_RELOC_ARM_T32_IMM12
:
16159 case BFD_RELOC_ARM_T32_ADD_PC12
:
16160 /* We claim that this fixup has been processed here,
16161 even if in fact we generate an error because we do
16162 not have a reloc for it, so tc_gen_reloc will reject it. */
16166 && ! S_IS_DEFINED (fixP
->fx_addsy
))
16168 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16169 _("undefined symbol %s used as an immediate value"),
16170 S_GET_NAME (fixP
->fx_addsy
));
16174 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16176 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
16178 /* FUTURE: Implement analogue of negate_data_op for T32. */
16179 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
)
16181 newimm
= encode_thumb32_immediate (value
);
16182 if (newimm
== (unsigned int) FAIL
)
16183 newimm
= thumb32_negate_data_op (&newval
, value
);
16187 /* 12 bit immediate for addw/subw. */
16191 newval
^= 0x00a00000;
16194 newimm
= (unsigned int) FAIL
;
16199 if (newimm
== (unsigned int)FAIL
)
16201 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16202 _("invalid constant (%lx) after fixup"),
16203 (unsigned long) value
);
16207 newval
|= (newimm
& 0x800) << 15;
16208 newval
|= (newimm
& 0x700) << 4;
16209 newval
|= (newimm
& 0x0ff);
16211 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
16212 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
16215 case BFD_RELOC_ARM_SMC
:
16216 if (((unsigned long) value
) > 0xffff)
16217 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16218 _("invalid smc expression"));
16219 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16220 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
16221 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16224 case BFD_RELOC_ARM_SWI
:
16225 if (fixP
->tc_fix_data
!= 0)
16227 if (((unsigned long) value
) > 0xff)
16228 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16229 _("invalid swi expression"));
16230 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16232 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16236 if (((unsigned long) value
) > 0x00ffffff)
16237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16238 _("invalid swi expression"));
16239 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16241 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16245 case BFD_RELOC_ARM_MULTI
:
16246 if (((unsigned long) value
) > 0xffff)
16247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16248 _("invalid expression in load/store multiple"));
16249 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
16250 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16254 case BFD_RELOC_ARM_PCREL_CALL
:
16255 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16256 if ((newval
& 0xf0000000) == 0xf0000000)
16260 goto arm_branch_common
;
16262 case BFD_RELOC_ARM_PCREL_JUMP
:
16263 case BFD_RELOC_ARM_PLT32
:
16265 case BFD_RELOC_ARM_PCREL_BRANCH
:
16267 goto arm_branch_common
;
16269 case BFD_RELOC_ARM_PCREL_BLX
:
16272 /* We are going to store value (shifted right by two) in the
16273 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16274 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16275 also be be clear. */
16277 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16278 _("misaligned branch destination"));
16279 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
16280 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
16281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16282 _("branch out of range"));
16284 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16286 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16287 newval
|= (value
>> 2) & 0x00ffffff;
16288 /* Set the H bit on BLX instructions. */
16292 newval
|= 0x01000000;
16294 newval
&= ~0x01000000;
16296 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16300 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CZB */
16301 /* CZB can only branch forward. */
16303 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16304 _("branch out of range"));
16306 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16308 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16309 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
16310 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16314 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
16315 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
16316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16317 _("branch out of range"));
16319 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16321 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16322 newval
|= (value
& 0x1ff) >> 1;
16323 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16327 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
16328 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
16329 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16330 _("branch out of range"));
16332 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16334 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16335 newval
|= (value
& 0xfff) >> 1;
16336 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16340 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16341 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
16342 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16343 _("conditional branch out of range"));
16345 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16348 addressT S
, J1
, J2
, lo
, hi
;
16350 S
= (value
& 0x00100000) >> 20;
16351 J2
= (value
& 0x00080000) >> 19;
16352 J1
= (value
& 0x00040000) >> 18;
16353 hi
= (value
& 0x0003f000) >> 12;
16354 lo
= (value
& 0x00000ffe) >> 1;
16356 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16357 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16358 newval
|= (S
<< 10) | hi
;
16359 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
16360 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16361 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16365 case BFD_RELOC_THUMB_PCREL_BLX
:
16366 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16367 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
16368 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16369 _("branch out of range"));
16371 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
16372 /* For a BLX instruction, make sure that the relocation is rounded up
16373 to a word boundary. This follows the semantics of the instruction
16374 which specifies that bit 1 of the target address will come from bit
16375 1 of the base address. */
16376 value
= (value
+ 1) & ~ 1;
16378 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16382 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16383 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16384 newval
|= (value
& 0x7fffff) >> 12;
16385 newval2
|= (value
& 0xfff) >> 1;
16386 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16387 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16391 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16392 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
16393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16394 _("branch out of range"));
16396 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16399 addressT S
, I1
, I2
, lo
, hi
;
16401 S
= (value
& 0x01000000) >> 24;
16402 I1
= (value
& 0x00800000) >> 23;
16403 I2
= (value
& 0x00400000) >> 22;
16404 hi
= (value
& 0x003ff000) >> 12;
16405 lo
= (value
& 0x00000ffe) >> 1;
16410 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16411 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16412 newval
|= (S
<< 10) | hi
;
16413 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
16414 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16415 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16420 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16421 md_number_to_chars (buf
, value
, 1);
16425 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16426 md_number_to_chars (buf
, value
, 2);
16430 case BFD_RELOC_ARM_TLS_GD32
:
16431 case BFD_RELOC_ARM_TLS_LE32
:
16432 case BFD_RELOC_ARM_TLS_IE32
:
16433 case BFD_RELOC_ARM_TLS_LDM32
:
16434 case BFD_RELOC_ARM_TLS_LDO32
:
16435 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
16438 case BFD_RELOC_ARM_GOT32
:
16439 case BFD_RELOC_ARM_GOTOFF
:
16440 case BFD_RELOC_ARM_TARGET2
:
16441 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16442 md_number_to_chars (buf
, 0, 4);
16446 case BFD_RELOC_RVA
:
16448 case BFD_RELOC_ARM_TARGET1
:
16449 case BFD_RELOC_ARM_ROSEGREL32
:
16450 case BFD_RELOC_ARM_SBREL32
:
16451 case BFD_RELOC_32_PCREL
:
16452 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16453 md_number_to_chars (buf
, value
, 4);
16457 case BFD_RELOC_ARM_PREL31
:
16458 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16460 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
16461 if ((value
^ (value
>> 1)) & 0x40000000)
16463 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16464 _("rel31 relocation overflow"));
16466 newval
|= value
& 0x7fffffff;
16467 md_number_to_chars (buf
, newval
, 4);
16472 case BFD_RELOC_ARM_CP_OFF_IMM
:
16473 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
16474 if (value
< -1023 || value
> 1023 || (value
& 3))
16475 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16476 _("co-processor offset out of range"));
16481 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16482 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16483 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16485 newval
= get_thumb32_insn (buf
);
16486 newval
&= 0xff7fff00;
16487 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
16489 newval
&= ~WRITE_BACK
;
16490 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16491 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16492 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16494 put_thumb32_insn (buf
, newval
);
16497 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
16498 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
16499 if (value
< -255 || value
> 255)
16500 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16501 _("co-processor offset out of range"));
16503 goto cp_off_common
;
16505 case BFD_RELOC_ARM_THUMB_OFFSET
:
16506 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16507 /* Exactly what ranges, and where the offset is inserted depends
16508 on the type of instruction, we can establish this from the
16510 switch (newval
>> 12)
16512 case 4: /* PC load. */
16513 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16514 forced to zero for these loads; md_pcrel_from has already
16515 compensated for this. */
16517 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16518 _("invalid offset, target not word aligned (0x%08lX)"),
16519 (((unsigned long) fixP
->fx_frag
->fr_address
16520 + (unsigned long) fixP
->fx_where
) & ~3)
16521 + (unsigned long) value
);
16523 if (value
& ~0x3fc)
16524 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16525 _("invalid offset, value too big (0x%08lX)"),
16528 newval
|= value
>> 2;
16531 case 9: /* SP load/store. */
16532 if (value
& ~0x3fc)
16533 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16534 _("invalid offset, value too big (0x%08lX)"),
16536 newval
|= value
>> 2;
16539 case 6: /* Word load/store. */
16541 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16542 _("invalid offset, value too big (0x%08lX)"),
16544 newval
|= value
<< 4; /* 6 - 2. */
16547 case 7: /* Byte load/store. */
16549 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16550 _("invalid offset, value too big (0x%08lX)"),
16552 newval
|= value
<< 6;
16555 case 8: /* Halfword load/store. */
16557 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16558 _("invalid offset, value too big (0x%08lX)"),
16560 newval
|= value
<< 5; /* 6 - 1. */
16564 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16565 "Unable to process relocation for thumb opcode: %lx",
16566 (unsigned long) newval
);
16569 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16572 case BFD_RELOC_ARM_THUMB_ADD
:
16573 /* This is a complicated relocation, since we use it for all of
16574 the following immediate relocations:
16578 9bit ADD/SUB SP word-aligned
16579 10bit ADD PC/SP word-aligned
16581 The type of instruction being processed is encoded in the
16588 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16590 int rd
= (newval
>> 4) & 0xf;
16591 int rs
= newval
& 0xf;
16592 int subtract
= !!(newval
& 0x8000);
16594 /* Check for HI regs, only very restricted cases allowed:
16595 Adjusting SP, and using PC or SP to get an address. */
16596 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
16597 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
16598 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16599 _("invalid Hi register with immediate"));
16601 /* If value is negative, choose the opposite instruction. */
16605 subtract
= !subtract
;
16607 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16608 _("immediate value out of range"));
16613 if (value
& ~0x1fc)
16614 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16615 _("invalid immediate for stack address calculation"));
16616 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
16617 newval
|= value
>> 2;
16619 else if (rs
== REG_PC
|| rs
== REG_SP
)
16621 if (subtract
|| value
& ~0x3fc)
16622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16623 _("invalid immediate for address calculation (value = 0x%08lX)"),
16624 (unsigned long) value
);
16625 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
16627 newval
|= value
>> 2;
16632 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16633 _("immediate value out of range"));
16634 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
16635 newval
|= (rd
<< 8) | value
;
16640 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16641 _("immediate value out of range"));
16642 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
16643 newval
|= rd
| (rs
<< 3) | (value
<< 6);
16646 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16649 case BFD_RELOC_ARM_THUMB_IMM
:
16650 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16651 if (value
< 0 || value
> 255)
16652 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16653 _("invalid immediate: %ld is too large"),
16656 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16659 case BFD_RELOC_ARM_THUMB_SHIFT
:
16660 /* 5bit shift value (0..32). LSL cannot take 32. */
16661 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
16662 temp
= newval
& 0xf800;
16663 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
16664 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16665 _("invalid shift value: %ld"), (long) value
);
16666 /* Shifts of zero must be encoded as LSL. */
16668 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
16669 /* Shifts of 32 are encoded as zero. */
16670 else if (value
== 32)
16672 newval
|= value
<< 6;
16673 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16676 case BFD_RELOC_VTABLE_INHERIT
:
16677 case BFD_RELOC_VTABLE_ENTRY
:
16681 case BFD_RELOC_UNUSED
:
16683 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16684 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
16688 /* Translate internal representation of relocation info to BFD target
16692 tc_gen_reloc (asection
*section
, fixS
*fixp
)
16695 bfd_reloc_code_real_type code
;
16697 reloc
= xmalloc (sizeof (arelent
));
16699 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
16700 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
16701 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
16703 if (fixp
->fx_pcrel
)
16705 if (section
->use_rela_p
)
16706 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
16708 fixp
->fx_offset
= reloc
->address
;
16710 reloc
->addend
= fixp
->fx_offset
;
16712 switch (fixp
->fx_r_type
)
16715 if (fixp
->fx_pcrel
)
16717 code
= BFD_RELOC_8_PCREL
;
16722 if (fixp
->fx_pcrel
)
16724 code
= BFD_RELOC_16_PCREL
;
16729 if (fixp
->fx_pcrel
)
16731 code
= BFD_RELOC_32_PCREL
;
16735 case BFD_RELOC_NONE
:
16736 case BFD_RELOC_ARM_PCREL_BRANCH
:
16737 case BFD_RELOC_ARM_PCREL_BLX
:
16738 case BFD_RELOC_RVA
:
16739 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
16740 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
16741 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
16742 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16743 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16744 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16745 case BFD_RELOC_THUMB_PCREL_BLX
:
16746 case BFD_RELOC_VTABLE_ENTRY
:
16747 case BFD_RELOC_VTABLE_INHERIT
:
16748 code
= fixp
->fx_r_type
;
16751 case BFD_RELOC_ARM_LITERAL
:
16752 case BFD_RELOC_ARM_HWLITERAL
:
16753 /* If this is called then the a literal has
16754 been referenced across a section boundary. */
16755 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16756 _("literal referenced across section boundary"));
16760 case BFD_RELOC_ARM_GOT32
:
16761 case BFD_RELOC_ARM_GOTOFF
:
16762 case BFD_RELOC_ARM_PLT32
:
16763 case BFD_RELOC_ARM_TARGET1
:
16764 case BFD_RELOC_ARM_ROSEGREL32
:
16765 case BFD_RELOC_ARM_SBREL32
:
16766 case BFD_RELOC_ARM_PREL31
:
16767 case BFD_RELOC_ARM_TARGET2
:
16768 case BFD_RELOC_ARM_TLS_LE32
:
16769 case BFD_RELOC_ARM_TLS_LDO32
:
16770 case BFD_RELOC_ARM_PCREL_CALL
:
16771 case BFD_RELOC_ARM_PCREL_JUMP
:
16772 code
= fixp
->fx_r_type
;
16775 case BFD_RELOC_ARM_TLS_GD32
:
16776 case BFD_RELOC_ARM_TLS_IE32
:
16777 case BFD_RELOC_ARM_TLS_LDM32
:
16778 /* BFD will include the symbol's address in the addend.
16779 But we don't want that, so subtract it out again here. */
16780 if (!S_IS_COMMON (fixp
->fx_addsy
))
16781 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
16782 code
= fixp
->fx_r_type
;
16786 case BFD_RELOC_ARM_IMMEDIATE
:
16787 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16788 _("internal relocation (type: IMMEDIATE) not fixed up"));
16791 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
16792 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16793 _("ADRL used for a symbol not defined in the same file"));
16796 case BFD_RELOC_ARM_OFFSET_IMM
:
16797 if (section
->use_rela_p
)
16799 code
= fixp
->fx_r_type
;
16803 if (fixp
->fx_addsy
!= NULL
16804 && !S_IS_DEFINED (fixp
->fx_addsy
)
16805 && S_IS_LOCAL (fixp
->fx_addsy
))
16807 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16808 _("undefined local label `%s'"),
16809 S_GET_NAME (fixp
->fx_addsy
));
16813 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16814 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16821 switch (fixp
->fx_r_type
)
16823 case BFD_RELOC_NONE
: type
= "NONE"; break;
16824 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
16825 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
16826 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
16827 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
16828 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
16829 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
16830 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
16831 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
16832 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
16833 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
16834 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
16835 default: type
= _("<unknown>"); break;
16837 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16838 _("cannot represent %s relocation in this object file format"),
16845 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
16847 && fixp
->fx_addsy
== GOT_symbol
)
16849 code
= BFD_RELOC_ARM_GOTPC
;
16850 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
16854 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
16856 if (reloc
->howto
== NULL
)
16858 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16859 _("cannot represent %s relocation in this object file format"),
16860 bfd_get_reloc_code_name (code
));
16864 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16865 vtable entry to be used in the relocation's section offset. */
16866 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
16867 reloc
->address
= fixp
->fx_offset
;
16872 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16875 cons_fix_new_arm (fragS
* frag
,
16880 bfd_reloc_code_real_type type
;
16884 FIXME: @@ Should look at CPU word size. */
16888 type
= BFD_RELOC_8
;
16891 type
= BFD_RELOC_16
;
16895 type
= BFD_RELOC_32
;
16898 type
= BFD_RELOC_64
;
16902 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
16905 #if defined OBJ_COFF || defined OBJ_ELF
16907 arm_validate_fix (fixS
* fixP
)
16909 /* If the destination of the branch is a defined symbol which does not have
16910 the THUMB_FUNC attribute, then we must be calling a function which has
16911 the (interfacearm) attribute. We look for the Thumb entry point to that
16912 function and change the branch to refer to that function instead. */
16913 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
16914 && fixP
->fx_addsy
!= NULL
16915 && S_IS_DEFINED (fixP
->fx_addsy
)
16916 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
16918 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
16924 arm_force_relocation (struct fix
* fixp
)
16926 #if defined (OBJ_COFF) && defined (TE_PE)
16927 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
16931 /* Resolve these relocations even if the symbol is extern or weak. */
16932 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
16933 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
16934 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
16935 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
16936 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
16937 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
16940 return generic_force_reloc (fixp
);
16944 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
16945 local labels from being added to the output symbol table when they
16946 are used with the ADRL pseudo op. The ADRL relocation should always
16947 be resolved before the binbary is emitted, so it is safe to say that
16948 it is adjustable. */
16951 arm_fix_adjustable (fixS
* fixP
)
16953 if (fixP
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
)
16960 /* Relocations against Thumb function names must be left unadjusted,
16961 so that the linker can use this information to correctly set the
16962 bottom bit of their addresses. The MIPS version of this function
16963 also prevents relocations that are mips-16 specific, but I do not
16964 know why it does this.
16967 There is one other problem that ought to be addressed here, but
16968 which currently is not: Taking the address of a label (rather
16969 than a function) and then later jumping to that address. Such
16970 addresses also ought to have their bottom bit set (assuming that
16971 they reside in Thumb code), but at the moment they will not. */
16974 arm_fix_adjustable (fixS
* fixP
)
16976 if (fixP
->fx_addsy
== NULL
)
16979 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
16980 && fixP
->fx_subsy
== NULL
)
16983 /* We need the symbol name for the VTABLE entries. */
16984 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
16985 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
16988 /* Don't allow symbols to be discarded on GOT related relocs. */
16989 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
16990 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
16991 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
16992 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
16993 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
16994 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
16995 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
16996 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
16997 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
17004 elf32_arm_target_format (void)
17007 return (target_big_endian
17008 ? "elf32-bigarm-symbian"
17009 : "elf32-littlearm-symbian");
17010 #elif defined (TE_VXWORKS)
17011 return (target_big_endian
17012 ? "elf32-bigarm-vxworks"
17013 : "elf32-littlearm-vxworks");
17015 if (target_big_endian
)
17016 return "elf32-bigarm";
17018 return "elf32-littlearm";
17023 armelf_frob_symbol (symbolS
* symp
,
17026 elf_frob_symbol (symp
, puntp
);
17030 /* MD interface: Finalization. */
17032 /* A good place to do this, although this was probably not intended
17033 for this kind of use. We need to dump the literal pool before
17034 references are made to a null symbol pointer. */
17039 literal_pool
* pool
;
17041 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
17043 /* Put it at the end of the relevent section. */
17044 subseg_set (pool
->section
, pool
->sub_section
);
17046 arm_elf_change_section ();
17052 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17056 arm_adjust_symtab (void)
17061 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17063 if (ARM_IS_THUMB (sym
))
17065 if (THUMB_IS_FUNC (sym
))
17067 /* Mark the symbol as a Thumb function. */
17068 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
17069 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
17070 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
17072 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
17073 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
17075 as_bad (_("%s: unexpected function type: %d"),
17076 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
17078 else switch (S_GET_STORAGE_CLASS (sym
))
17081 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
17084 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
17087 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
17095 if (ARM_IS_INTERWORK (sym
))
17096 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
17103 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17105 if (ARM_IS_THUMB (sym
))
17107 elf_symbol_type
* elf_sym
;
17109 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
17110 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
17112 if (! bfd_is_arm_mapping_symbol_name (elf_sym
->symbol
.name
))
17114 /* If it's a .thumb_func, declare it as so,
17115 otherwise tag label as .code 16. */
17116 if (THUMB_IS_FUNC (sym
))
17117 elf_sym
->internal_elf_sym
.st_info
=
17118 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
17120 elf_sym
->internal_elf_sym
.st_info
=
17121 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
17128 /* MD interface: Initialization. */
17131 set_constant_flonums (void)
17135 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
17136 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
17146 if ( (arm_ops_hsh
= hash_new ()) == NULL
17147 || (arm_cond_hsh
= hash_new ()) == NULL
17148 || (arm_shift_hsh
= hash_new ()) == NULL
17149 || (arm_psr_hsh
= hash_new ()) == NULL
17150 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
17151 || (arm_reg_hsh
= hash_new ()) == NULL
17152 || (arm_reloc_hsh
= hash_new ()) == NULL
17153 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
17154 as_fatal (_("virtual memory exhausted"));
17156 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
17157 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
17158 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
17159 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
17160 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
17161 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
17162 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
17163 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
17164 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
17165 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
17166 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
17167 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
17169 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
17171 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
17172 (PTR
) (barrier_opt_names
+ i
));
17174 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
17175 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
17178 set_constant_flonums ();
17180 /* Set the cpu variant based on the command-line options. We prefer
17181 -mcpu= over -march= if both are set (as for GCC); and we prefer
17182 -mfpu= over any other way of setting the floating point unit.
17183 Use of legacy options with new options are faulted. */
17186 if (mcpu_cpu_opt
|| march_cpu_opt
)
17187 as_bad (_("use of old and new-style options to set CPU type"));
17189 mcpu_cpu_opt
= legacy_cpu
;
17191 else if (!mcpu_cpu_opt
)
17192 mcpu_cpu_opt
= march_cpu_opt
;
17197 as_bad (_("use of old and new-style options to set FPU type"));
17199 mfpu_opt
= legacy_fpu
;
17201 else if (!mfpu_opt
)
17203 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17204 /* Some environments specify a default FPU. If they don't, infer it
17205 from the processor. */
17207 mfpu_opt
= mcpu_fpu_opt
;
17209 mfpu_opt
= march_fpu_opt
;
17211 mfpu_opt
= &fpu_default
;
17218 mfpu_opt
= &fpu_default
;
17219 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
17220 mfpu_opt
= &fpu_arch_vfp_v2
;
17222 mfpu_opt
= &fpu_arch_fpa
;
17228 mcpu_cpu_opt
= &cpu_default
;
17229 selected_cpu
= cpu_default
;
17233 selected_cpu
= *mcpu_cpu_opt
;
17235 mcpu_cpu_opt
= &arm_arch_any
;
17238 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
17240 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
17242 #if defined OBJ_COFF || defined OBJ_ELF
17244 unsigned int flags
= 0;
17246 #if defined OBJ_ELF
17247 flags
= meabi_flags
;
17249 switch (meabi_flags
)
17251 case EF_ARM_EABI_UNKNOWN
:
17253 /* Set the flags in the private structure. */
17254 if (uses_apcs_26
) flags
|= F_APCS26
;
17255 if (support_interwork
) flags
|= F_INTERWORK
;
17256 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
17257 if (pic_code
) flags
|= F_PIC
;
17258 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
17259 flags
|= F_SOFT_FLOAT
;
17261 switch (mfloat_abi_opt
)
17263 case ARM_FLOAT_ABI_SOFT
:
17264 case ARM_FLOAT_ABI_SOFTFP
:
17265 flags
|= F_SOFT_FLOAT
;
17268 case ARM_FLOAT_ABI_HARD
:
17269 if (flags
& F_SOFT_FLOAT
)
17270 as_bad (_("hard-float conflicts with specified fpu"));
17274 /* Using pure-endian doubles (even if soft-float). */
17275 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
17276 flags
|= F_VFP_FLOAT
;
17278 #if defined OBJ_ELF
17279 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
17280 flags
|= EF_ARM_MAVERICK_FLOAT
;
17283 case EF_ARM_EABI_VER4
:
17284 case EF_ARM_EABI_VER5
:
17285 /* No additional flags to set. */
17292 bfd_set_private_flags (stdoutput
, flags
);
17294 /* We have run out flags in the COFF header to encode the
17295 status of ATPCS support, so instead we create a dummy,
17296 empty, debug section called .arm.atpcs. */
17301 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
17305 bfd_set_section_flags
17306 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
17307 bfd_set_section_size (stdoutput
, sec
, 0);
17308 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
17314 /* Record the CPU type as well. */
17315 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
17316 mach
= bfd_mach_arm_iWMMXt
;
17317 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
17318 mach
= bfd_mach_arm_XScale
;
17319 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
17320 mach
= bfd_mach_arm_ep9312
;
17321 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
17322 mach
= bfd_mach_arm_5TE
;
17323 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
17325 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17326 mach
= bfd_mach_arm_5T
;
17328 mach
= bfd_mach_arm_5
;
17330 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
17332 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17333 mach
= bfd_mach_arm_4T
;
17335 mach
= bfd_mach_arm_4
;
17337 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
17338 mach
= bfd_mach_arm_3M
;
17339 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
17340 mach
= bfd_mach_arm_3
;
17341 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
17342 mach
= bfd_mach_arm_2a
;
17343 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
17344 mach
= bfd_mach_arm_2
;
17346 mach
= bfd_mach_arm_unknown
;
17348 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
17351 /* Command line processing. */
17354 Invocation line includes a switch not recognized by the base assembler.
17355 See if it's a processor-specific option.
17357 This routine is somewhat complicated by the need for backwards
17358 compatibility (since older releases of gcc can't be changed).
17359 The new options try to make the interface as compatible as
17362 New options (supported) are:
17364 -mcpu=<cpu name> Assemble for selected processor
17365 -march=<architecture name> Assemble for selected architecture
17366 -mfpu=<fpu architecture> Assemble for selected FPU.
17367 -EB/-mbig-endian Big-endian
17368 -EL/-mlittle-endian Little-endian
17369 -k Generate PIC code
17370 -mthumb Start in Thumb mode
17371 -mthumb-interwork Code supports ARM/Thumb interworking
17373 For now we will also provide support for:
17375 -mapcs-32 32-bit Program counter
17376 -mapcs-26 26-bit Program counter
17377 -macps-float Floats passed in FP registers
17378 -mapcs-reentrant Reentrant code
17380 (sometime these will probably be replaced with -mapcs=<list of options>
17381 and -matpcs=<list of options>)
17383 The remaining options are only supported for back-wards compatibility.
17384 Cpu variants, the arm part is optional:
17385 -m[arm]1 Currently not supported.
17386 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17387 -m[arm]3 Arm 3 processor
17388 -m[arm]6[xx], Arm 6 processors
17389 -m[arm]7[xx][t][[d]m] Arm 7 processors
17390 -m[arm]8[10] Arm 8 processors
17391 -m[arm]9[20][tdmi] Arm 9 processors
17392 -mstrongarm[110[0]] StrongARM processors
17393 -mxscale XScale processors
17394 -m[arm]v[2345[t[e]]] Arm architectures
17395 -mall All (except the ARM1)
17397 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17398 -mfpe-old (No float load/store multiples)
17399 -mvfpxd VFP Single precision
17401 -mno-fpu Disable all floating point instructions
17403 The following CPU names are recognized:
17404 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17405 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17406 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17407 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17408 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17409 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17410 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17414 const char * md_shortopts
= "m:k";
17416 #ifdef ARM_BI_ENDIAN
17417 #define OPTION_EB (OPTION_MD_BASE + 0)
17418 #define OPTION_EL (OPTION_MD_BASE + 1)
17420 #if TARGET_BYTES_BIG_ENDIAN
17421 #define OPTION_EB (OPTION_MD_BASE + 0)
17423 #define OPTION_EL (OPTION_MD_BASE + 1)
17427 struct option md_longopts
[] =
17430 {"EB", no_argument
, NULL
, OPTION_EB
},
17433 {"EL", no_argument
, NULL
, OPTION_EL
},
17435 {NULL
, no_argument
, NULL
, 0}
17438 size_t md_longopts_size
= sizeof (md_longopts
);
17440 struct arm_option_table
17442 char *option
; /* Option name to match. */
17443 char *help
; /* Help information. */
17444 int *var
; /* Variable to change. */
17445 int value
; /* What to change it to. */
17446 char *deprecated
; /* If non-null, print this message. */
17449 struct arm_option_table arm_opts
[] =
17451 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
17452 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
17453 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17454 &support_interwork
, 1, NULL
},
17455 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
17456 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
17457 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
17459 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
17460 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
17461 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
17462 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
17465 /* These are recognized by the assembler, but have no affect on code. */
17466 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
17467 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
17468 {NULL
, NULL
, NULL
, 0, NULL
}
17471 struct arm_legacy_option_table
17473 char *option
; /* Option name to match. */
17474 const arm_feature_set
**var
; /* Variable to change. */
17475 const arm_feature_set value
; /* What to change it to. */
17476 char *deprecated
; /* If non-null, print this message. */
17479 const struct arm_legacy_option_table arm_legacy_opts
[] =
17481 /* DON'T add any new processors to this list -- we want the whole list
17482 to go away... Add them to the processors table instead. */
17483 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17484 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17485 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17486 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17487 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17488 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17489 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17490 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17491 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17492 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17493 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17494 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17495 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17496 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17497 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17498 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17499 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17500 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17501 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17502 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17503 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17504 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17505 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17506 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17507 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17508 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17509 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17510 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17511 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17512 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17513 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17514 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17515 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17516 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17517 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17518 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17519 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17520 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17521 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17522 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17523 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17524 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17525 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17526 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17527 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17528 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17529 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17530 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17531 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17532 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17533 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17534 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17535 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17536 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17537 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17538 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17539 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17540 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17541 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17542 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17543 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17544 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17545 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17546 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17547 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17548 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17549 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17550 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17551 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
17552 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
17553 N_("use -mcpu=strongarm110")},
17554 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
17555 N_("use -mcpu=strongarm1100")},
17556 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
17557 N_("use -mcpu=strongarm1110")},
17558 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
17559 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
17560 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
17562 /* Architecture variants -- don't add any more to this list either. */
17563 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17564 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17565 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17566 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17567 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17568 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17569 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17570 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17571 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17572 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17573 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17574 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17575 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17576 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17577 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17578 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17579 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17580 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17582 /* Floating point variants -- don't add any more to this list either. */
17583 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
17584 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
17585 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
17586 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
17587 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17589 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
17592 struct arm_cpu_option_table
17595 const arm_feature_set value
;
17596 /* For some CPUs we assume an FPU unless the user explicitly sets
17598 const arm_feature_set default_fpu
;
17599 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17601 const char *canonical_name
;
17604 /* This list should, at a minimum, contain all the cpu names
17605 recognized by GCC. */
17606 static const struct arm_cpu_option_table arm_cpus
[] =
17608 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
17609 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
17610 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
17611 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17612 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17613 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17614 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17615 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17616 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17617 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17618 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17619 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17620 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17621 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17622 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17623 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17624 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17625 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17626 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17627 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17628 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17629 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17630 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17631 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17632 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17633 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17634 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17635 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17636 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17637 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17638 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17639 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17640 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17641 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17642 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17643 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17644 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17645 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17646 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17647 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
17648 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17649 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17650 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17651 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17652 /* For V5 or later processors we default to using VFP; but the user
17653 should really set the FPU type explicitly. */
17654 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17655 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17656 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17657 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17658 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17659 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17660 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
17661 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17662 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17663 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
17664 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17665 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17666 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17667 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17668 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17669 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
17670 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17671 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17672 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17673 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
17674 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17675 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
17676 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
17677 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
17678 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
17679 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
17680 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
17681 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
17682 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
17683 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
17684 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
17685 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
17686 | FPU_NEON_EXT_V1
),
17688 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
17689 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
17690 /* ??? XSCALE is really an architecture. */
17691 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17692 /* ??? iwmmxt is not a processor. */
17693 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
17694 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17696 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
17697 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
17700 struct arm_arch_option_table
17703 const arm_feature_set value
;
17704 const arm_feature_set default_fpu
;
17707 /* This list should, at a minimum, contain all the architecture names
17708 recognized by GCC. */
17709 static const struct arm_arch_option_table arm_archs
[] =
17711 {"all", ARM_ANY
, FPU_ARCH_FPA
},
17712 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
17713 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
17714 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17715 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17716 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
17717 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
17718 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
17719 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
17720 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
17721 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
17722 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
17723 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
17724 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
17725 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
17726 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
17727 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
17728 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17729 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17730 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
17731 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
17732 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
17733 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
17734 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
17735 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
17736 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
17737 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
17738 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
17739 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
17740 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
17741 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
17742 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
17743 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
17746 /* ISA extensions in the co-processor space. */
17747 struct arm_option_cpu_value_table
17750 const arm_feature_set value
;
17753 static const struct arm_option_cpu_value_table arm_extensions
[] =
17755 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
17756 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
17757 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
17758 {NULL
, ARM_ARCH_NONE
}
17761 /* This list should, at a minimum, contain all the fpu names
17762 recognized by GCC. */
17763 static const struct arm_option_cpu_value_table arm_fpus
[] =
17765 {"softfpa", FPU_NONE
},
17766 {"fpe", FPU_ARCH_FPE
},
17767 {"fpe2", FPU_ARCH_FPE
},
17768 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
17769 {"fpa", FPU_ARCH_FPA
},
17770 {"fpa10", FPU_ARCH_FPA
},
17771 {"fpa11", FPU_ARCH_FPA
},
17772 {"arm7500fe", FPU_ARCH_FPA
},
17773 {"softvfp", FPU_ARCH_VFP
},
17774 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
17775 {"vfp", FPU_ARCH_VFP_V2
},
17776 {"vfp9", FPU_ARCH_VFP_V2
},
17777 {"vfp3", FPU_ARCH_VFP_V3
},
17778 {"vfp10", FPU_ARCH_VFP_V2
},
17779 {"vfp10-r0", FPU_ARCH_VFP_V1
},
17780 {"vfpxd", FPU_ARCH_VFP_V1xD
},
17781 {"arm1020t", FPU_ARCH_VFP_V1
},
17782 {"arm1020e", FPU_ARCH_VFP_V2
},
17783 {"arm1136jfs", FPU_ARCH_VFP_V2
},
17784 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
17785 {"maverick", FPU_ARCH_MAVERICK
},
17786 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
17787 {NULL
, ARM_ARCH_NONE
}
17790 struct arm_option_value_table
17796 static const struct arm_option_value_table arm_float_abis
[] =
17798 {"hard", ARM_FLOAT_ABI_HARD
},
17799 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
17800 {"soft", ARM_FLOAT_ABI_SOFT
},
17805 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17806 static const struct arm_option_value_table arm_eabis
[] =
17808 {"gnu", EF_ARM_EABI_UNKNOWN
},
17809 {"4", EF_ARM_EABI_VER4
},
17810 {"5", EF_ARM_EABI_VER5
},
17815 struct arm_long_option_table
17817 char * option
; /* Substring to match. */
17818 char * help
; /* Help information. */
17819 int (* func
) (char * subopt
); /* Function to decode sub-option. */
17820 char * deprecated
; /* If non-null, print this message. */
17824 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
17826 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
17828 /* Copy the feature set, so that we can modify it. */
17829 *ext_set
= **opt_p
;
17832 while (str
!= NULL
&& *str
!= 0)
17834 const struct arm_option_cpu_value_table
* opt
;
17840 as_bad (_("invalid architectural extension"));
17845 ext
= strchr (str
, '+');
17848 optlen
= ext
- str
;
17850 optlen
= strlen (str
);
17854 as_bad (_("missing architectural extension"));
17858 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
17859 if (strncmp (opt
->name
, str
, optlen
) == 0)
17861 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
17865 if (opt
->name
== NULL
)
17867 as_bad (_("unknown architectural extnsion `%s'"), str
);
17878 arm_parse_cpu (char * str
)
17880 const struct arm_cpu_option_table
* opt
;
17881 char * ext
= strchr (str
, '+');
17885 optlen
= ext
- str
;
17887 optlen
= strlen (str
);
17891 as_bad (_("missing cpu name `%s'"), str
);
17895 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
17896 if (strncmp (opt
->name
, str
, optlen
) == 0)
17898 mcpu_cpu_opt
= &opt
->value
;
17899 mcpu_fpu_opt
= &opt
->default_fpu
;
17900 if (opt
->canonical_name
)
17901 strcpy(selected_cpu_name
, opt
->canonical_name
);
17905 for (i
= 0; i
< optlen
; i
++)
17906 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
17907 selected_cpu_name
[i
] = 0;
17911 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
17916 as_bad (_("unknown cpu `%s'"), str
);
17921 arm_parse_arch (char * str
)
17923 const struct arm_arch_option_table
*opt
;
17924 char *ext
= strchr (str
, '+');
17928 optlen
= ext
- str
;
17930 optlen
= strlen (str
);
17934 as_bad (_("missing architecture name `%s'"), str
);
17938 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
17939 if (streq (opt
->name
, str
))
17941 march_cpu_opt
= &opt
->value
;
17942 march_fpu_opt
= &opt
->default_fpu
;
17943 strcpy(selected_cpu_name
, opt
->name
);
17946 return arm_parse_extension (ext
, &march_cpu_opt
);
17951 as_bad (_("unknown architecture `%s'\n"), str
);
17956 arm_parse_fpu (char * str
)
17958 const struct arm_option_cpu_value_table
* opt
;
17960 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
17961 if (streq (opt
->name
, str
))
17963 mfpu_opt
= &opt
->value
;
17967 as_bad (_("unknown floating point format `%s'\n"), str
);
17972 arm_parse_float_abi (char * str
)
17974 const struct arm_option_value_table
* opt
;
17976 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
17977 if (streq (opt
->name
, str
))
17979 mfloat_abi_opt
= opt
->value
;
17983 as_bad (_("unknown floating point abi `%s'\n"), str
);
17989 arm_parse_eabi (char * str
)
17991 const struct arm_option_value_table
*opt
;
17993 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
17994 if (streq (opt
->name
, str
))
17996 meabi_flags
= opt
->value
;
17999 as_bad (_("unknown EABI `%s'\n"), str
);
18004 struct arm_long_option_table arm_long_opts
[] =
18006 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18007 arm_parse_cpu
, NULL
},
18008 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18009 arm_parse_arch
, NULL
},
18010 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18011 arm_parse_fpu
, NULL
},
18012 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18013 arm_parse_float_abi
, NULL
},
18015 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18016 arm_parse_eabi
, NULL
},
18018 {NULL
, NULL
, 0, NULL
}
18022 md_parse_option (int c
, char * arg
)
18024 struct arm_option_table
*opt
;
18025 const struct arm_legacy_option_table
*fopt
;
18026 struct arm_long_option_table
*lopt
;
18032 target_big_endian
= 1;
18038 target_big_endian
= 0;
18043 /* Listing option. Just ignore these, we don't support additional
18048 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18050 if (c
== opt
->option
[0]
18051 && ((arg
== NULL
&& opt
->option
[1] == 0)
18052 || streq (arg
, opt
->option
+ 1)))
18054 #if WARN_DEPRECATED
18055 /* If the option is deprecated, tell the user. */
18056 if (opt
->deprecated
!= NULL
)
18057 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18058 arg
? arg
: "", _(opt
->deprecated
));
18061 if (opt
->var
!= NULL
)
18062 *opt
->var
= opt
->value
;
18068 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
18070 if (c
== fopt
->option
[0]
18071 && ((arg
== NULL
&& fopt
->option
[1] == 0)
18072 || streq (arg
, fopt
->option
+ 1)))
18074 #if WARN_DEPRECATED
18075 /* If the option is deprecated, tell the user. */
18076 if (fopt
->deprecated
!= NULL
)
18077 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18078 arg
? arg
: "", _(fopt
->deprecated
));
18081 if (fopt
->var
!= NULL
)
18082 *fopt
->var
= &fopt
->value
;
18088 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18090 /* These options are expected to have an argument. */
18091 if (c
== lopt
->option
[0]
18093 && strncmp (arg
, lopt
->option
+ 1,
18094 strlen (lopt
->option
+ 1)) == 0)
18096 #if WARN_DEPRECATED
18097 /* If the option is deprecated, tell the user. */
18098 if (lopt
->deprecated
!= NULL
)
18099 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
18100 _(lopt
->deprecated
));
18103 /* Call the sup-option parser. */
18104 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
18115 md_show_usage (FILE * fp
)
18117 struct arm_option_table
*opt
;
18118 struct arm_long_option_table
*lopt
;
18120 fprintf (fp
, _(" ARM-specific assembler options:\n"));
18122 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18123 if (opt
->help
!= NULL
)
18124 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
18126 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18127 if (lopt
->help
!= NULL
)
18128 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
18132 -EB assemble code for a big-endian cpu\n"));
18137 -EL assemble code for a little-endian cpu\n"));
18146 arm_feature_set flags
;
18147 } cpu_arch_ver_table
;
18149 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18150 least features first. */
18151 static const cpu_arch_ver_table cpu_arch_ver
[] =
18156 {4, ARM_ARCH_V5TE
},
18157 {5, ARM_ARCH_V5TEJ
},
18161 {9, ARM_ARCH_V6T2
},
18162 {10, ARM_ARCH_V7A
},
18163 {10, ARM_ARCH_V7R
},
18164 {10, ARM_ARCH_V7M
},
18168 /* Set the public EABI object attributes. */
18170 aeabi_set_public_attributes (void)
18173 arm_feature_set flags
;
18174 arm_feature_set tmp
;
18175 const cpu_arch_ver_table
*p
;
18177 /* Choose the architecture based on the capabilities of the requested cpu
18178 (if any) and/or the instructions actually used. */
18179 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
18180 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
18181 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
18185 for (p
= cpu_arch_ver
; p
->val
; p
++)
18187 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
18190 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
18194 /* Tag_CPU_name. */
18195 if (selected_cpu_name
[0])
18199 p
= selected_cpu_name
;
18200 if (strncmp(p
, "armv", 4) == 0)
18205 for (i
= 0; p
[i
]; i
++)
18206 p
[i
] = TOUPPER (p
[i
]);
18208 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
18210 /* Tag_CPU_arch. */
18211 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
18212 /* Tag_CPU_arch_profile. */
18213 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
18214 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
18215 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
18216 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
18217 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
18218 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
18219 /* Tag_ARM_ISA_use. */
18220 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
18221 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
18222 /* Tag_THUMB_ISA_use. */
18223 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
18224 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
18225 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
18226 /* Tag_VFP_arch. */
18227 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
18228 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
18229 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
18230 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
18231 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
18232 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
18233 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
18234 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
18235 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
18236 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
18237 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
18238 /* Tag_WMMX_arch. */
18239 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
18240 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
18241 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
18242 /* Tag_NEON_arch. */
18243 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
18244 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
18245 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
18248 /* Add the .ARM.attributes section. */
18257 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
18260 aeabi_set_public_attributes ();
18261 size
= elf32_arm_eabi_attr_size (stdoutput
);
18262 s
= subseg_new (".ARM.attributes", 0);
18263 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
18264 addr
= frag_now_fix ();
18265 p
= frag_more (size
);
18266 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
18268 #endif /* OBJ_ELF */
18271 /* Parse a .cpu directive. */
18274 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
18276 const struct arm_cpu_option_table
*opt
;
18280 name
= input_line_pointer
;
18281 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18282 input_line_pointer
++;
18283 saved_char
= *input_line_pointer
;
18284 *input_line_pointer
= 0;
18286 /* Skip the first "all" entry. */
18287 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
18288 if (streq (opt
->name
, name
))
18290 mcpu_cpu_opt
= &opt
->value
;
18291 selected_cpu
= opt
->value
;
18292 if (opt
->canonical_name
)
18293 strcpy(selected_cpu_name
, opt
->canonical_name
);
18297 for (i
= 0; opt
->name
[i
]; i
++)
18298 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
18299 selected_cpu_name
[i
] = 0;
18301 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18302 *input_line_pointer
= saved_char
;
18303 demand_empty_rest_of_line ();
18306 as_bad (_("unknown cpu `%s'"), name
);
18307 *input_line_pointer
= saved_char
;
18308 ignore_rest_of_line ();
18312 /* Parse a .arch directive. */
18315 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
18317 const struct arm_arch_option_table
*opt
;
18321 name
= input_line_pointer
;
18322 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18323 input_line_pointer
++;
18324 saved_char
= *input_line_pointer
;
18325 *input_line_pointer
= 0;
18327 /* Skip the first "all" entry. */
18328 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
18329 if (streq (opt
->name
, name
))
18331 mcpu_cpu_opt
= &opt
->value
;
18332 selected_cpu
= opt
->value
;
18333 strcpy(selected_cpu_name
, opt
->name
);
18334 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18335 *input_line_pointer
= saved_char
;
18336 demand_empty_rest_of_line ();
18340 as_bad (_("unknown architecture `%s'\n"), name
);
18341 *input_line_pointer
= saved_char
;
18342 ignore_rest_of_line ();
18346 /* Parse a .fpu directive. */
18349 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
18351 const struct arm_option_cpu_value_table
*opt
;
18355 name
= input_line_pointer
;
18356 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18357 input_line_pointer
++;
18358 saved_char
= *input_line_pointer
;
18359 *input_line_pointer
= 0;
18361 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
18362 if (streq (opt
->name
, name
))
18364 mfpu_opt
= &opt
->value
;
18365 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18366 *input_line_pointer
= saved_char
;
18367 demand_empty_rest_of_line ();
18371 as_bad (_("unknown floating point format `%s'\n"), name
);
18372 *input_line_pointer
= saved_char
;
18373 ignore_rest_of_line ();