1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
64 insert_all_fields_after (const aarch64_operand
*self
, unsigned int start
,
65 aarch64_insn
*code
, aarch64_insn value
)
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > start
; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
83 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
86 return insert_all_fields_after (self
, 0, code
, value
);
89 /* Operand inserters. */
93 aarch64_ins_none (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
94 const aarch64_opnd_info
*info ATTRIBUTE_UNUSED
,
95 aarch64_insn
*code ATTRIBUTE_UNUSED
,
96 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
102 /* Insert register number. */
104 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
106 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
107 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
109 int val
= info
->reg
.regno
- get_operand_specific_data (self
);
110 insert_field (self
->fields
[0], code
, val
, 0);
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
118 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
119 aarch64_insn
*code
, const aarch64_inst
*inst
,
120 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
123 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
124 /* index and/or type */
125 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
127 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
128 if (info
->type
== AARCH64_OPND_En
129 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info
->idx
== 1); /* Vn */
133 aarch64_insn value
= info
->reglane
.index
<< pos
;
134 insert_field (FLD_imm4_11
, code
, value
, 0);
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
145 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
146 insert_field (FLD_imm5
, code
, value
, 0);
149 else if (inst
->opcode
->iclass
== dotproduct
)
151 unsigned reglane_index
= info
->reglane
.index
;
152 switch (info
->qualifier
)
154 case AARCH64_OPND_QLF_S_4B
:
155 case AARCH64_OPND_QLF_S_2H
:
157 assert (reglane_index
< 4);
158 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
164 else if (inst
->opcode
->iclass
== cryptosm3
)
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index
= info
->reglane
.index
;
168 assert (reglane_index
< 4);
169 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index
= info
->reglane
.index
;
177 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
178 /* Complex operand takes two elements. */
181 switch (info
->qualifier
)
183 case AARCH64_OPND_QLF_S_H
:
185 assert (reglane_index
< 8);
186 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
188 case AARCH64_OPND_QLF_S_S
:
190 assert (reglane_index
< 4);
191 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
193 case AARCH64_OPND_QLF_S_D
:
195 assert (reglane_index
< 2);
196 insert_field (FLD_H
, code
, reglane_index
, 0);
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
207 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
209 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
210 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
213 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
215 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
222 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
223 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
224 const aarch64_inst
*inst
,
225 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
227 aarch64_insn value
= 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
232 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
237 switch (info
->reglist
.num_regs
)
239 case 1: value
= 0x7; break;
240 case 2: value
= 0xa; break;
241 case 3: value
= 0x6; break;
242 case 4: value
= 0x2; break;
243 default: return false;
247 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
258 insert_field (FLD_opcode
, code
, value
, 0);
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
266 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
267 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
268 const aarch64_inst
*inst
,
269 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
277 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
279 value
= (aarch64_insn
) 0;
280 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
283 value
= (aarch64_insn
) 1;
284 insert_field (FLD_S
, code
, value
, 0);
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
292 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
293 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
294 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
295 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
297 aarch64_field field
= {0, 0};
298 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
301 assert (info
->reglist
.has_index
);
304 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info
->qualifier
)
308 case AARCH64_OPND_QLF_S_B
:
309 /* Index encoded in "Q:S:size". */
310 QSsize
= info
->reglist
.index
;
313 case AARCH64_OPND_QLF_S_H
:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize
= info
->reglist
.index
<< 1;
318 case AARCH64_OPND_QLF_S_S
:
319 /* Index encoded in "Q:S". */
320 QSsize
= info
->reglist
.index
<< 2;
323 case AARCH64_OPND_QLF_S_D
:
324 /* Index encoded in "Q". */
325 QSsize
= info
->reglist
.index
<< 3 | 0x1;
331 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
332 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
333 insert_field_2 (&field
, code
, opcodeh2
, 0);
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
343 const aarch64_opnd_info
*info
,
344 aarch64_insn
*code
, const aarch64_inst
*inst
,
345 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
347 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
350 if (inst
->opcode
->iclass
== asimdshf
)
354 0000 x SEE AdvSIMD modified immediate
363 Q
= (val
& 0x1) ? 1 : 0;
364 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
368 assert (info
->type
== AARCH64_OPND_IMM_VLSR
369 || info
->type
== AARCH64_OPND_IMM_VLSL
);
371 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
389 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
397 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
399 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
400 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
404 imm
= info
->imm
.value
;
405 if (operand_need_shift_by_two (self
))
407 if (operand_need_shift_by_three (self
))
409 if (operand_need_shift_by_four (self
))
411 insert_all_fields (self
, code
, imm
);
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
418 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
419 aarch64_insn
*code
, const aarch64_inst
*inst
,
420 aarch64_operand_error
*errors
)
423 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
425 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
433 const aarch64_opnd_info
*info
,
435 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
436 aarch64_operand_error
*errors
439 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
440 uint64_t imm
= info
->imm
.value
;
441 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
442 int amount
= info
->shifter
.amount
;
443 aarch64_field field
= {0, 0};
445 /* a:b:c:d:e:f:g:h */
446 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm
= aarch64_shrink_expanded_imm8 (imm
);
454 assert ((int)imm
>= 0);
456 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
458 if (kind
== AARCH64_MOD_NONE
)
461 /* shift amount partially in cmode */
462 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
463 if (kind
== AARCH64_MOD_LSL
)
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
467 assert (esize
== 4 || esize
== 2 || esize
== 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
474 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
476 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
480 /* AARCH64_MOD_MSL: shift ones. */
482 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
484 insert_field_2 (&field
, code
, amount
, 0);
489 /* Insert fields for an 8-bit floating-point immediate. */
491 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
493 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
494 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
496 insert_all_fields (self
, code
, info
->imm
.value
);
500 /* Insert 1-bit rotation immediate (#90 or #270). */
502 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
503 const aarch64_opnd_info
*info
,
504 aarch64_insn
*code
, const aarch64_inst
*inst
,
505 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
507 uint64_t rot
= (info
->imm
.value
- 90) / 180;
509 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
515 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
516 const aarch64_opnd_info
*info
,
517 aarch64_insn
*code
, const aarch64_inst
*inst
,
518 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
520 uint64_t rot
= info
->imm
.value
/ 90;
522 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
529 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
531 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
532 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
534 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
541 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
542 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
543 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
546 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
547 insert_field (self
->fields
[0], code
, value
, 0);
548 /* imm12 (unsigned) */
549 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
556 aarch64_ins_limm_1 (const aarch64_operand
*self
,
557 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
558 const aarch64_inst
*inst
, bool invert_p
,
559 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
563 uint64_t imm
= info
->imm
.value
;
564 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
568 /* The constraint check should guarantee that this will work. */
569 res
= aarch64_logical_immediate_p (imm
, esize
, &value
);
571 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
579 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
580 aarch64_insn
*code
, const aarch64_inst
*inst
,
581 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
583 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
584 inst
->opcode
->op
== OP_BIC
, errors
);
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
589 aarch64_ins_inv_limm (const aarch64_operand
*self
,
590 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
591 const aarch64_inst
*inst
,
592 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
594 return aarch64_ins_limm_1 (self
, info
, code
, inst
, true, errors
);
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
600 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
601 aarch64_insn
*code
, const aarch64_inst
*inst
,
602 aarch64_operand_error
*errors
)
604 aarch64_insn value
= 0;
606 assert (info
->idx
== 0);
609 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
610 if (inst
->opcode
->iclass
== ldstpair_indexed
611 || inst
->opcode
->iclass
== ldstnapair_offs
612 || inst
->opcode
->iclass
== ldstpair_off
613 || inst
->opcode
->iclass
== loadlit
)
616 switch (info
->qualifier
)
618 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
619 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
620 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
621 default: return false;
623 insert_field (FLD_ldst_size
, code
, value
, 0);
628 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
629 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
637 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
638 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
639 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
640 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
643 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
650 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
651 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
652 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
653 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
656 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
659 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
661 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
663 if (kind
== AARCH64_MOD_LSL
)
664 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
665 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
667 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
668 S
= info
->shifter
.amount
!= 0;
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
674 Must be #0 if <extend> is explicitly LSL. */
675 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
676 insert_field (FLD_S
, code
, S
, 0);
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
684 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
685 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
686 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
687 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
690 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
693 int imm
= info
->addr
.offset
.imm
;
694 insert_field (self
->fields
[1], code
, imm
, 0);
697 if (info
->addr
.writeback
)
699 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
700 insert_field (self
->fields
[2], code
, 1, 0);
705 /* Encode the address operand for e.g.
706 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
708 aarch64_ins_rcpc3_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
709 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
710 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
711 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
714 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
717 int imm
= info
->addr
.offset
.imm
;
718 insert_field (self
->fields
[1], code
, imm
, 0);
723 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
725 aarch64_ins_addr_simm (const aarch64_operand
*self
,
726 const aarch64_opnd_info
*info
,
728 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
729 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
734 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
735 /* simm (imm9 or imm7) */
736 imm
= info
->addr
.offset
.imm
;
737 if (self
->fields
[0] == FLD_imm7
738 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
739 /* scaled immediate in ld/st pair instructions.. */
740 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
741 insert_field (self
->fields
[0], code
, imm
, 0);
742 /* pre/post- index */
743 if (info
->addr
.writeback
)
745 assert (inst
->opcode
->iclass
!= ldst_unscaled
746 && inst
->opcode
->iclass
!= ldstnapair_offs
747 && inst
->opcode
->iclass
!= ldstpair_off
748 && inst
->opcode
->iclass
!= ldst_unpriv
);
749 assert (info
->addr
.preind
!= info
->addr
.postind
);
750 if (info
->addr
.preind
)
751 insert_field (self
->fields
[1], code
, 1, 0);
757 /* Encode the address operand, potentially offset by the load/store ammount,
758 e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
759 and STILP <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
761 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
762 const aarch64_opnd_info
*info
,
764 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
765 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
770 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
772 imm
= info
->addr
.offset
.imm
;
774 insert_field (FLD_opc2
, code
, 1, 0);
779 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
781 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
782 const aarch64_opnd_info
*info
,
784 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
785 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
790 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
792 imm
= info
->addr
.offset
.imm
>> 3;
793 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
794 insert_field (self
->fields
[2], code
, imm
, 0);
796 if (info
->addr
.writeback
)
798 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
799 insert_field (self
->fields
[3], code
, 1, 0);
804 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
806 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
807 const aarch64_opnd_info
*info
,
809 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
810 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
812 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
815 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
817 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
821 /* Encode the address operand for e.g.
822 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
824 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
825 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
826 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
827 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
830 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
832 if (info
->addr
.offset
.is_reg
)
833 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
835 insert_field (FLD_Rm
, code
, 0x1f, 0);
839 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
841 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
842 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
843 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
844 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
847 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
851 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
853 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
854 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
855 const aarch64_inst
*inst
,
856 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
858 /* If a system instruction check if we have any restrictions on which
859 registers it can use. */
860 if (inst
->opcode
->iclass
== ic_system
)
862 uint64_t opcode_flags
863 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
864 uint32_t sysreg_flags
865 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
867 /* Check to see if it's read-only, else check if it's write only.
868 if it's both or unspecified don't care. */
869 if (opcode_flags
== F_SYS_READ
871 && sysreg_flags
!= F_REG_READ
)
873 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
874 detail
->error
= _("specified register cannot be read from");
875 detail
->index
= info
->idx
;
876 detail
->non_fatal
= true;
878 else if (opcode_flags
== F_SYS_WRITE
880 && sysreg_flags
!= F_REG_WRITE
)
882 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
883 detail
->error
= _("specified register cannot be written to");
884 detail
->index
= info
->idx
;
885 detail
->non_fatal
= true;
888 /* op0:op1:CRn:CRm:op2 */
889 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
890 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
894 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
896 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
897 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
898 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
899 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
902 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
905 /* Extra CRm mask. */
906 if (info
->sysreg
.flags
| F_REG_IN_CRM
)
907 insert_field (FLD_CRm
, code
, PSTATE_DECODE_CRM (info
->sysreg
.flags
), 0);
911 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
913 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
914 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
915 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
916 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
918 /* op1:CRn:CRm:op2 */
919 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
920 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
924 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
927 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
928 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
929 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
930 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
933 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
937 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
940 aarch64_ins_barrier_dsb_nxs (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
941 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
942 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
943 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
945 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
946 encoded in CRm<3:2>. */
947 aarch64_insn value
= (info
->barrier
->value
>> 2) - 4;
948 insert_field (FLD_CRm_dsb_nxs
, code
, value
, 0);
952 /* Encode the prefetch operation option operand for e.g.
953 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
956 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
957 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
958 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
959 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
962 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
966 /* Encode the hint number for instructions that alias HINT but take an
970 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
971 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
972 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
973 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
976 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
980 /* Encode the extended register operand for e.g.
981 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
983 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
984 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
985 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
986 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
988 enum aarch64_modifier_kind kind
;
991 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
993 kind
= info
->shifter
.kind
;
994 if (kind
== AARCH64_MOD_LSL
)
995 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
996 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
997 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
999 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
1004 /* Encode the shifted register operand for e.g.
1005 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1007 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1008 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1009 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1010 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1013 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1015 insert_field (FLD_shift
, code
,
1016 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
1018 insert_field (FLD_imm6_10
, code
, info
->shifter
.amount
, 0);
1023 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1024 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1025 SELF's operand-dependent value. fields[0] specifies the field that
1026 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1028 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
1029 const aarch64_opnd_info
*info
,
1031 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1032 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1034 int factor
= 1 + get_operand_specific_data (self
);
1035 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1036 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1040 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1041 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1042 SELF's operand-dependent value. fields[0] specifies the field that
1043 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1045 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
1046 const aarch64_opnd_info
*info
,
1048 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1049 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1051 int factor
= 1 + get_operand_specific_data (self
);
1052 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1053 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1057 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1058 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1059 SELF's operand-dependent value. fields[0] specifies the field that
1060 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1061 and imm3 fields, with imm3 being the less-significant part. */
1063 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
1064 const aarch64_opnd_info
*info
,
1066 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1067 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1069 int factor
= 1 + get_operand_specific_data (self
);
1070 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1071 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
1072 2, FLD_imm3_10
, FLD_SVE_imm6
);
1076 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1077 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1078 value. fields[0] specifies the base register field. */
1080 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
1081 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1082 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1083 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1085 int factor
= 1 << get_operand_specific_data (self
);
1086 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1087 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1091 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1092 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1093 value. fields[0] specifies the base register field. */
1095 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1096 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1097 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1098 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1100 int factor
= 1 << get_operand_specific_data (self
);
1101 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1102 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1106 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1107 is SELF's operand-dependent value. fields[0] specifies the base
1108 register field and fields[1] specifies the offset register field. */
1110 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1111 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1112 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1113 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1115 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1116 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1120 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1121 <shift> is SELF's operand-dependent value. fields[0] specifies the
1122 base register field, fields[1] specifies the offset register field and
1123 fields[2] is a single-bit field that selects SXTW over UXTW. */
1125 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1126 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1127 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1128 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1130 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1131 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1132 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1133 insert_field (self
->fields
[2], code
, 0, 0);
1135 insert_field (self
->fields
[2], code
, 1, 0);
1139 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1140 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1141 fields[0] specifies the base register field. */
1143 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1144 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1145 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1146 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1148 int factor
= 1 << get_operand_specific_data (self
);
1149 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1150 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1154 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1155 where <modifier> is fixed by the instruction and where <msz> is a
1156 2-bit unsigned number. fields[0] specifies the base register field
1157 and fields[1] specifies the offset register field. */
1159 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1160 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1161 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1163 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1164 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1165 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1169 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1170 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1171 field and fields[1] specifies the offset register field. */
1173 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1174 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1175 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1176 aarch64_operand_error
*errors
)
1178 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1181 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1182 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1183 field and fields[1] specifies the offset register field. */
1185 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1186 const aarch64_opnd_info
*info
,
1188 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1189 aarch64_operand_error
*errors
)
1191 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1194 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1195 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1196 field and fields[1] specifies the offset register field. */
1198 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1199 const aarch64_opnd_info
*info
,
1201 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1202 aarch64_operand_error
*errors
)
1204 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1207 /* Encode an SVE ADD/SUB immediate. */
1209 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1210 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1211 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1212 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1214 if (info
->shifter
.amount
== 8)
1215 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1216 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1217 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1219 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1224 aarch64_ins_sve_aligned_reglist (const aarch64_operand
*self
,
1225 const aarch64_opnd_info
*info
,
1227 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1228 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1230 unsigned int num_regs
= get_operand_specific_data (self
);
1231 unsigned int val
= info
->reglist
.first_regno
;
1232 insert_field (self
->fields
[0], code
, val
/ num_regs
, 0);
1236 /* Encode an SVE CPY/DUP immediate. */
1238 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1239 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1240 const aarch64_inst
*inst
,
1241 aarch64_operand_error
*errors
)
1243 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1246 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1247 array specifies which field to use for Zn. MM is encoded in the
1248 concatenation of imm5 and SVE_tszh, with imm5 being the less
1249 significant part. */
1251 aarch64_ins_sve_index (const aarch64_operand
*self
,
1252 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1253 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1254 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1256 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1257 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1258 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1259 2, FLD_imm5
, FLD_SVE_tszh
);
1263 /* Encode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
1264 than the number of elements in 128 bit, which can encode il:tsz. */
1266 aarch64_ins_sve_index_imm (const aarch64_operand
*self
,
1267 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1268 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1269 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1271 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1272 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1273 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1274 2, self
->fields
[1],self
->fields
[2]);
1278 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1280 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1281 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1282 const aarch64_inst
*inst
,
1283 aarch64_operand_error
*errors
)
1285 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1288 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1289 and where MM occupies the most-significant part. The operand-dependent
1290 value specifies the number of bits in Zn. */
1292 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1293 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1294 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1295 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1297 unsigned int reg_bits
= get_operand_specific_data (self
);
1298 assert (info
->reglane
.regno
< (1U << reg_bits
));
1299 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1300 insert_all_fields (self
, code
, val
);
1304 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1307 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1308 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1309 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1310 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1312 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1316 /* Encode a strided register list. The first field holds the top bit
1317 (0 or 16) and the second field holds the lower bits. The stride is
1318 16 divided by the list length. */
1320 aarch64_ins_sve_strided_reglist (const aarch64_operand
*self
,
1321 const aarch64_opnd_info
*info
,
1323 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1324 aarch64_operand_error
*errors
1327 unsigned int num_regs
= get_operand_specific_data (self
);
1328 unsigned int mask ATTRIBUTE_UNUSED
= 16 | (16 / num_regs
- 1);
1329 unsigned int val
= info
->reglist
.first_regno
;
1330 assert ((val
& mask
) == val
);
1331 insert_field (self
->fields
[0], code
, val
>> 4, 0);
1332 insert_field (self
->fields
[1], code
, val
& 15, 0);
1336 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1337 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1340 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1341 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1342 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1343 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1345 insert_all_fields (self
, code
, info
->imm
.value
);
1346 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1350 /* Encode an SVE shift left immediate. */
1352 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1353 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1354 const aarch64_inst
*inst
,
1355 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1357 const aarch64_opnd_info
*prev_operand
;
1360 assert (info
->idx
> 0);
1361 prev_operand
= &inst
->operands
[info
->idx
- 1];
1362 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1363 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1367 /* Encode an SVE shift right immediate. */
1369 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1370 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1371 const aarch64_inst
*inst
,
1372 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1374 const aarch64_opnd_info
*prev_operand
;
1377 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1378 assert (info
->idx
>= (int)opnd_backshift
);
1379 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1380 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1381 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1385 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1386 The fields array specifies which field to use. */
1388 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1389 const aarch64_opnd_info
*info
,
1391 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1392 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1394 if (info
->imm
.value
== 0x3f000000)
1395 insert_field (self
->fields
[0], code
, 0, 0);
1397 insert_field (self
->fields
[0], code
, 1, 0);
1401 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1402 The fields array specifies which field to use. */
1404 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1405 const aarch64_opnd_info
*info
,
1407 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1408 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1410 if (info
->imm
.value
== 0x3f000000)
1411 insert_field (self
->fields
[0], code
, 0, 0);
1413 insert_field (self
->fields
[0], code
, 1, 0);
1417 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1418 The fields array specifies which field to use. */
1420 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1421 const aarch64_opnd_info
*info
,
1423 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1424 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1426 if (info
->imm
.value
== 0)
1427 insert_field (self
->fields
[0], code
, 0, 0);
1429 insert_field (self
->fields
[0], code
, 1, 0);
1434 aarch64_ins_sme_za_vrs1 (const aarch64_operand
*self
,
1435 const aarch64_opnd_info
*info
,
1437 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1438 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1440 int za_reg
= info
->indexed_za
.regno
;
1441 int regno
= info
->indexed_za
.index
.regno
& 3;
1442 int imm
= info
->indexed_za
.index
.imm
;
1443 int v
= info
->indexed_za
.v
;
1444 int countm1
= info
->indexed_za
.index
.countm1
;
1446 insert_field (self
->fields
[0], code
, v
, 0);
1447 insert_field (self
->fields
[1], code
, regno
, 0);
1448 switch (info
->qualifier
)
1450 case AARCH64_OPND_QLF_S_B
:
1451 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1453 case AARCH64_OPND_QLF_S_H
:
1454 case AARCH64_OPND_QLF_S_S
:
1455 insert_field (self
->fields
[2], code
, za_reg
, 0);
1456 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1458 case AARCH64_OPND_QLF_S_D
:
1459 insert_field (self
->fields
[2], code
, za_reg
, 0);
1469 aarch64_ins_sme_za_vrs2 (const aarch64_operand
*self
,
1470 const aarch64_opnd_info
*info
,
1472 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1473 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1475 int za_reg
= info
->indexed_za
.regno
;
1476 int regno
= info
->indexed_za
.index
.regno
& 3;
1477 int imm
= info
->indexed_za
.index
.imm
;
1478 int v
= info
->indexed_za
.v
;
1479 int countm1
= info
->indexed_za
.index
.countm1
;
1481 insert_field (self
->fields
[0], code
, v
, 0);
1482 insert_field (self
->fields
[1], code
, regno
, 0);
1483 switch (info
->qualifier
)
1485 case AARCH64_OPND_QLF_S_B
:
1486 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1488 case AARCH64_OPND_QLF_S_H
:
1489 insert_field (self
->fields
[2], code
, za_reg
, 0);
1490 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1492 case AARCH64_OPND_QLF_S_S
:
1493 case AARCH64_OPND_QLF_S_D
:
1494 insert_field (self
->fields
[2], code
, za_reg
, 0);
1503 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1504 vector indicator, vector selector and immediate. */
1506 aarch64_ins_sme_za_hv_tiles (const aarch64_operand
*self
,
1507 const aarch64_opnd_info
*info
,
1509 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1510 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1514 int fld_v
= info
->indexed_za
.v
;
1515 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1516 int fld_zan_imm
= info
->indexed_za
.index
.imm
;
1517 int regno
= info
->indexed_za
.regno
;
1519 switch (info
->qualifier
)
1521 case AARCH64_OPND_QLF_S_B
:
1525 case AARCH64_OPND_QLF_S_H
:
1528 fld_zan_imm
|= regno
<< 3;
1530 case AARCH64_OPND_QLF_S_S
:
1533 fld_zan_imm
|= regno
<< 2;
1535 case AARCH64_OPND_QLF_S_D
:
1538 fld_zan_imm
|= regno
<< 1;
1540 case AARCH64_OPND_QLF_S_Q
:
1543 fld_zan_imm
= regno
;
1549 insert_field (self
->fields
[0], code
, fld_size
, 0);
1550 insert_field (self
->fields
[1], code
, fld_q
, 0);
1551 insert_field (self
->fields
[2], code
, fld_v
, 0);
1552 insert_field (self
->fields
[3], code
, fld_rv
, 0);
1553 insert_field (self
->fields
[4], code
, fld_zan_imm
, 0);
1559 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand
*self
,
1560 const aarch64_opnd_info
*info
,
1562 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1563 aarch64_operand_error
*errors
1566 int ebytes
= aarch64_get_qualifier_esize (info
->qualifier
);
1567 int range_size
= get_opcode_dependent_value (inst
->opcode
);
1568 int fld_v
= info
->indexed_za
.v
;
1569 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1570 int imm
= info
->indexed_za
.index
.imm
;
1571 int max_value
= 16 / range_size
/ ebytes
;
1576 assert (imm
% range_size
== 0 && (imm
/ range_size
) < max_value
);
1577 int fld_zan_imm
= (info
->indexed_za
.regno
* max_value
) | (imm
/ range_size
);
1578 assert (fld_zan_imm
< (range_size
== 4 && ebytes
< 8 ? 4 : 8));
1580 insert_field (self
->fields
[0], code
, fld_v
, 0);
1581 insert_field (self
->fields
[1], code
, fld_rv
, 0);
1582 insert_field (self
->fields
[2], code
, fld_zan_imm
, 0);
1587 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1588 separated by commas, encoded in the "imm8" field.
1590 For programmer convenience an assembler must also accept the names of
1591 32-bit, 16-bit and 8-bit element tiles which are converted into the
1592 corresponding set of 64-bit element tiles.
1595 aarch64_ins_sme_za_list (const aarch64_operand
*self
,
1596 const aarch64_opnd_info
*info
,
1598 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1599 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1601 int fld_mask
= info
->imm
.value
;
1602 insert_field (self
->fields
[0], code
, fld_mask
, 0);
1607 aarch64_ins_sme_za_array (const aarch64_operand
*self
,
1608 const aarch64_opnd_info
*info
,
1610 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1611 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1613 int regno
= info
->indexed_za
.index
.regno
& 3;
1614 int imm
= info
->indexed_za
.index
.imm
;
1615 int countm1
= info
->indexed_za
.index
.countm1
;
1616 assert (imm
% (countm1
+ 1) == 0);
1617 insert_field (self
->fields
[0], code
, regno
, 0);
1618 insert_field (self
->fields
[1], code
, imm
/ (countm1
+ 1), 0);
1623 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand
*self
,
1624 const aarch64_opnd_info
*info
,
1626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1627 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1629 int regno
= info
->addr
.base_regno
;
1630 int imm
= info
->addr
.offset
.imm
;
1631 insert_field (self
->fields
[0], code
, regno
, 0);
1632 insert_field (self
->fields
[1], code
, imm
, 0);
1636 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1638 aarch64_ins_sme_sm_za (const aarch64_operand
*self
,
1639 const aarch64_opnd_info
*info
,
1641 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1642 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1644 aarch64_insn fld_crm
;
1645 /* Set CRm[3:1] bits. */
1646 if (info
->reg
.regno
== 's')
1647 fld_crm
= 0x02 ; /* SVCRSM. */
1648 else if (info
->reg
.regno
== 'z')
1649 fld_crm
= 0x04; /* SVCRZA. */
1653 insert_field (self
->fields
[0], code
, fld_crm
, 0);
1657 /* Encode source scalable predicate register (Pn), name of the index base
1658 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1659 range 0 to one less than the number of vector elements in a 128-bit vector
1660 register, encoded in "i1:tszh:tszl".
1663 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand
*self
,
1664 const aarch64_opnd_info
*info
,
1666 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1667 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1669 int fld_pn
= info
->indexed_za
.regno
;
1670 int fld_rm
= info
->indexed_za
.index
.regno
- 12;
1671 int imm
= info
->indexed_za
.index
.imm
;
1672 int fld_i1
, fld_tszh
, fld_tshl
;
1674 insert_field (self
->fields
[0], code
, fld_rm
, 0);
1675 insert_field (self
->fields
[1], code
, fld_pn
, 0);
1677 /* Optional element index, defaulting to 0, in the range 0 to one less than
1678 the number of vector elements in a 128-bit vector register, encoded in
1688 switch (info
->qualifier
)
1690 case AARCH64_OPND_QLF_S_B
:
1691 /* <imm> is 4 bit value. */
1692 fld_i1
= (imm
>> 3) & 0x1;
1693 fld_tszh
= (imm
>> 2) & 0x1;
1694 fld_tshl
= ((imm
<< 1) | 0x1) & 0x7;
1696 case AARCH64_OPND_QLF_S_H
:
1697 /* <imm> is 3 bit value. */
1698 fld_i1
= (imm
>> 2) & 0x1;
1699 fld_tszh
= (imm
>> 1) & 0x1;
1700 fld_tshl
= ((imm
<< 2) | 0x2) & 0x7;
1702 case AARCH64_OPND_QLF_S_S
:
1703 /* <imm> is 2 bit value. */
1704 fld_i1
= (imm
>> 1) & 0x1;
1705 fld_tszh
= imm
& 0x1;
1708 case AARCH64_OPND_QLF_S_D
:
1709 /* <imm> is 1 bit value. */
1718 insert_field (self
->fields
[2], code
, fld_i1
, 0);
1719 insert_field (self
->fields
[3], code
, fld_tszh
, 0);
1720 insert_field (self
->fields
[4], code
, fld_tshl
, 0);
1724 /* Insert X0-X30. Register 31 is unallocated. */
1726 aarch64_ins_x0_to_x30 (const aarch64_operand
*self
,
1727 const aarch64_opnd_info
*info
,
1729 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1730 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1732 assert (info
->reg
.regno
<= 30);
1733 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
1737 /* Insert an indexed register, with the first field being the register
1738 number and the remaining fields being the index. */
1740 aarch64_ins_simple_index (const aarch64_operand
*self
,
1741 const aarch64_opnd_info
*info
,
1743 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1744 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1746 int bias
= get_operand_specific_data (self
);
1747 insert_field (self
->fields
[0], code
, info
->reglane
.regno
- bias
, 0);
1748 insert_all_fields_after (self
, 1, code
, info
->reglane
.index
);
1752 /* Insert a plain shift-right immediate, when there is only a single
1755 aarch64_ins_plain_shrimm (const aarch64_operand
*self
,
1756 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1757 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1758 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1760 unsigned int base
= 1 << get_operand_field_width (self
, 0);
1761 insert_field (self
->fields
[0], code
, base
- info
->imm
.value
, 0);
1765 /* Miscellaneous encoding functions. */
1767 /* Encode size[0], i.e. bit 22, for
1768 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1771 encode_asimd_fcvt (aarch64_inst
*inst
)
1774 aarch64_field field
= {0, 0};
1775 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_NIL
;
1777 switch (inst
->opcode
->op
)
1781 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1782 qualifier
= inst
->operands
[1].qualifier
;
1786 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1787 qualifier
= inst
->operands
[0].qualifier
;
1792 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1793 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1794 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1795 gen_sub_field (FLD_size
, 0, 1, &field
);
1796 insert_field_2 (&field
, &inst
->value
, value
, 0);
1799 /* Encode size[0], i.e. bit 22, for
1800 e.g. FCVTXN <Vb><d>, <Va><n>. */
1803 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1805 aarch64_insn val
= 1;
1806 aarch64_field field
= {0, 0};
1807 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1808 gen_sub_field (FLD_size
, 0, 1, &field
);
1809 insert_field_2 (&field
, &inst
->value
, val
, 0);
1812 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1814 encode_fcvt (aarch64_inst
*inst
)
1817 const aarch64_field field
= {15, 2};
1820 switch (inst
->operands
[0].qualifier
)
1822 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1823 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1824 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1827 insert_field_2 (&field
, &inst
->value
, val
, 0);
1832 /* Return the index in qualifiers_list that INST is using. Should only
1833 be called once the qualifiers are known to be valid. */
1836 aarch64_get_variant (struct aarch64_inst
*inst
)
1838 int i
, nops
, variant
;
1840 nops
= aarch64_num_of_operands (inst
->opcode
);
1841 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1843 for (i
= 0; i
< nops
; ++i
)
1844 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1845 != inst
->operands
[i
].qualifier
)
1853 /* Do miscellaneous encodings that are not common enough to be driven by
1857 do_misc_encoding (aarch64_inst
*inst
)
1861 switch (inst
->opcode
->op
)
1870 encode_asimd_fcvt (inst
);
1873 encode_asisd_fcvtxn (inst
);
1878 /* Copy Pn to Pm and Pg. */
1879 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1880 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1881 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1884 /* Copy Zd to Zm. */
1885 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1886 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1889 /* Fill in the zero immediate. */
1890 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1891 2, FLD_imm5
, FLD_SVE_tszh
);
1894 /* Copy Zn to Zm. */
1895 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1896 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1901 /* Copy Pd to Pm. */
1902 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1903 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1905 case OP_MOVZS_P_P_P
:
1907 /* Copy Pn to Pm. */
1908 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1909 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1911 case OP_NOTS_P_P_P_Z
:
1912 case OP_NOT_P_P_P_Z
:
1913 /* Copy Pg to Pm. */
1914 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1915 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1921 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1923 encode_sizeq (aarch64_inst
*inst
)
1926 enum aarch64_field_kind kind
;
1929 /* Get the index of the operand whose information we are going to use
1930 to encode the size and Q fields.
1931 This is deduced from the possible valid qualifier lists. */
1932 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1933 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1934 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1935 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1937 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1939 if (inst
->opcode
->iclass
== asisdlse
1940 || inst
->opcode
->iclass
== asisdlsep
1941 || inst
->opcode
->iclass
== asisdlso
1942 || inst
->opcode
->iclass
== asisdlsop
)
1943 kind
= FLD_vldst_size
;
1946 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1949 /* Opcodes that have fields shared by multiple operands are usually flagged
1950 with flags. In this function, we detect such flags and use the
1951 information in one of the related operands to do the encoding. The 'one'
1952 operand is not any operand but one of the operands that has the enough
1953 information for such an encoding. */
1956 do_special_encoding (struct aarch64_inst
*inst
)
1959 aarch64_insn value
= 0;
1961 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1963 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1964 if (inst
->opcode
->flags
& F_COND
)
1966 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1968 if (inst
->opcode
->flags
& F_SF
)
1970 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1971 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1972 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1974 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1975 if (inst
->opcode
->flags
& F_N
)
1976 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1978 if (inst
->opcode
->flags
& F_LSE_SZ
)
1980 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1981 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1982 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1984 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1986 if (inst
->opcode
->flags
& F_RCPC3_SIZE
)
1988 switch (inst
->operands
[0].qualifier
)
1990 case AARCH64_OPND_QLF_W
: value
= 2; break;
1991 case AARCH64_OPND_QLF_X
: value
= 3; break;
1992 case AARCH64_OPND_QLF_S_B
: value
= 0; break;
1993 case AARCH64_OPND_QLF_S_H
: value
= 1; break;
1994 case AARCH64_OPND_QLF_S_S
: value
= 2; break;
1995 case AARCH64_OPND_QLF_S_D
: value
= 3; break;
1996 case AARCH64_OPND_QLF_S_Q
: value
= 0; break;
1999 insert_field (FLD_rcpc3_size
, &inst
->value
, value
, 0);
2002 if (inst
->opcode
->flags
& F_SIZEQ
)
2003 encode_sizeq (inst
);
2004 if (inst
->opcode
->flags
& F_FPTYPE
)
2006 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
2007 switch (inst
->operands
[idx
].qualifier
)
2009 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
2010 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
2011 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
2014 insert_field (FLD_type
, &inst
->value
, value
, 0);
2016 if (inst
->opcode
->flags
& F_SSIZE
)
2018 enum aarch64_opnd_qualifier qualifier
;
2019 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
2020 qualifier
= inst
->operands
[idx
].qualifier
;
2021 assert (qualifier
>= AARCH64_OPND_QLF_S_B
2022 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
2023 value
= aarch64_get_qualifier_standard_value (qualifier
);
2024 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
2026 if (inst
->opcode
->flags
& F_T
)
2028 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
2029 aarch64_field field
= {0, 0};
2030 enum aarch64_opnd_qualifier qualifier
;
2033 qualifier
= inst
->operands
[idx
].qualifier
;
2034 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2035 == AARCH64_OPND_CLASS_SIMD_REG
2036 && qualifier
>= AARCH64_OPND_QLF_V_8B
2037 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
2048 value
= aarch64_get_qualifier_standard_value (qualifier
);
2049 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
2050 num
= (int) value
>> 1;
2051 assert (num
>= 0 && num
<= 3);
2052 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
2053 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
2056 if ((inst
->opcode
->flags
& F_OPD_SIZE
) && inst
->opcode
->iclass
== sve2_urqvs
)
2058 enum aarch64_opnd_qualifier qualifier
[2];
2059 aarch64_insn value1
= 0;
2061 qualifier
[0] = inst
->operands
[idx
].qualifier
;
2062 qualifier
[1] = inst
->operands
[idx
+2].qualifier
;
2063 value
= aarch64_get_qualifier_standard_value (qualifier
[0]);
2064 value1
= aarch64_get_qualifier_standard_value (qualifier
[1]);
2065 assert ((value
>> 1) == value1
);
2066 insert_field (FLD_size
, &inst
->value
, value1
, inst
->opcode
->mask
);
2069 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
2071 /* Use Rt to encode in the case of e.g.
2072 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2073 enum aarch64_opnd_qualifier qualifier
;
2074 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
2076 /* Otherwise use the result operand, which has to be a integer
2079 assert (idx
== 0 || idx
== 1);
2080 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
2081 == AARCH64_OPND_CLASS_INT_REG
);
2082 qualifier
= inst
->operands
[idx
].qualifier
;
2083 insert_field (FLD_Q
, &inst
->value
,
2084 aarch64_get_qualifier_standard_value (qualifier
), 0);
2086 if (inst
->opcode
->flags
& F_LDS_SIZE
)
2088 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2089 enum aarch64_opnd_qualifier qualifier
;
2090 aarch64_field field
= {0, 0};
2091 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2092 == AARCH64_OPND_CLASS_INT_REG
);
2093 gen_sub_field (FLD_opc
, 0, 1, &field
);
2094 qualifier
= inst
->operands
[0].qualifier
;
2095 insert_field_2 (&field
, &inst
->value
,
2096 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
2098 /* Miscellaneous encoding as the last step. */
2099 if (inst
->opcode
->flags
& F_MISC
)
2100 do_misc_encoding (inst
);
2102 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
2105 /* Some instructions (including all SVE ones) use the instruction class
2106 to describe how a qualifiers_list index is represented in the instruction
2107 encoding. If INST is such an instruction, encode the chosen qualifier
2111 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
2114 switch (inst
->opcode
->iclass
)
2118 /* The variant is encoded as part of the immediate. */
2121 case sme_size_12_bhs
:
2122 insert_field (FLD_SME_size_12
, &inst
->value
,
2123 aarch64_get_variant (inst
), 0);
2127 insert_field (FLD_SME_size_22
, &inst
->value
,
2128 aarch64_get_variant (inst
), 0);
2131 case sme_size_22_hsd
:
2132 insert_field (FLD_SME_size_22
, &inst
->value
,
2133 aarch64_get_variant (inst
) + 1, 0);
2136 case sme_size_12_hs
:
2137 insert_field (FLD_SME_size_12
, &inst
->value
,
2138 aarch64_get_variant (inst
) + 1, 0);
2142 insert_field (FLD_SME_sz_23
, &inst
->value
,
2143 aarch64_get_variant (inst
), 0);
2147 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2148 0, 2, FLD_SVE_M_14
, FLD_size
);
2154 case sve_shift_pred
:
2155 case sve_shift_unpred
:
2156 case sve_shift_tsz_hsd
:
2157 case sve_shift_tsz_bhsd
:
2158 /* For indices and shift amounts, the variant is encoded as
2159 part of the immediate. */
2164 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2165 and depend on the immediate. They don't have a separate
2172 /* These instructions have only a single variant. */
2176 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2177 0, 2, FLD_SVE_M_16
, FLD_size
);
2181 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
2186 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
2190 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2191 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) % 3 + 1, 0);
2198 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
2202 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
2206 insert_field (FLD_SVE_size
, &inst
->value
,
2207 aarch64_get_variant (inst
) + 1, 0);
2210 case sve_size_tsz_bhs
:
2211 insert_fields (&inst
->value
,
2212 (1 << aarch64_get_variant (inst
)),
2213 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
2217 variant
= aarch64_get_variant (inst
) + 1;
2220 insert_field (FLD_size
, &inst
->value
, variant
, 0);
2228 /* Converters converting an alias opcode instruction to its real form. */
2230 /* ROR <Wd>, <Ws>, #<shift>
2232 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2234 convert_ror_to_extr (aarch64_inst
*inst
)
2236 copy_operand_info (inst
, 3, 2);
2237 copy_operand_info (inst
, 2, 1);
2240 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2242 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2244 convert_xtl_to_shll (aarch64_inst
*inst
)
2246 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
2247 inst
->operands
[2].imm
.value
= 0;
2251 LSR <Xd>, <Xn>, #<shift>
2253 UBFM <Xd>, <Xn>, #<shift>, #63. */
2255 convert_sr_to_bfm (aarch64_inst
*inst
)
2257 inst
->operands
[3].imm
.value
=
2258 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
2261 /* Convert MOV to ORR. */
2263 convert_mov_to_orr (aarch64_inst
*inst
)
2265 /* MOV <Vd>.<T>, <Vn>.<T>
2267 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2268 copy_operand_info (inst
, 2, 1);
2271 /* When <imms> >= <immr>, the instruction written:
2272 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2274 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2277 convert_bfx_to_bfm (aarch64_inst
*inst
)
2281 /* Convert the operand. */
2282 lsb
= inst
->operands
[2].imm
.value
;
2283 width
= inst
->operands
[3].imm
.value
;
2284 inst
->operands
[2].imm
.value
= lsb
;
2285 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
2288 /* When <imms> < <immr>, the instruction written:
2289 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2291 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2294 convert_bfi_to_bfm (aarch64_inst
*inst
)
2298 /* Convert the operand. */
2299 lsb
= inst
->operands
[2].imm
.value
;
2300 width
= inst
->operands
[3].imm
.value
;
2301 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2303 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2304 inst
->operands
[3].imm
.value
= width
- 1;
2308 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2309 inst
->operands
[3].imm
.value
= width
- 1;
2313 /* The instruction written:
2314 BFC <Xd>, #<lsb>, #<width>
2316 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2319 convert_bfc_to_bfm (aarch64_inst
*inst
)
2324 copy_operand_info (inst
, 3, 2);
2325 copy_operand_info (inst
, 2, 1);
2326 copy_operand_info (inst
, 1, 0);
2327 inst
->operands
[1].reg
.regno
= 0x1f;
2329 /* Convert the immediate operand. */
2330 lsb
= inst
->operands
[2].imm
.value
;
2331 width
= inst
->operands
[3].imm
.value
;
2332 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2334 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2335 inst
->operands
[3].imm
.value
= width
- 1;
2339 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2340 inst
->operands
[3].imm
.value
= width
- 1;
2344 /* The instruction written:
2345 LSL <Xd>, <Xn>, #<shift>
2347 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2350 convert_lsl_to_ubfm (aarch64_inst
*inst
)
2352 int64_t shift
= inst
->operands
[2].imm
.value
;
2354 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2356 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
2357 inst
->operands
[3].imm
.value
= 31 - shift
;
2361 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
2362 inst
->operands
[3].imm
.value
= 63 - shift
;
2366 /* CINC <Wd>, <Wn>, <cond>
2368 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2371 convert_to_csel (aarch64_inst
*inst
)
2373 copy_operand_info (inst
, 3, 2);
2374 copy_operand_info (inst
, 2, 1);
2375 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2378 /* CSET <Wd>, <cond>
2380 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2383 convert_cset_to_csinc (aarch64_inst
*inst
)
2385 copy_operand_info (inst
, 3, 1);
2386 copy_operand_info (inst
, 2, 0);
2387 copy_operand_info (inst
, 1, 0);
2388 inst
->operands
[1].reg
.regno
= 0x1f;
2389 inst
->operands
[2].reg
.regno
= 0x1f;
2390 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2395 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2398 convert_mov_to_movewide (aarch64_inst
*inst
)
2401 uint32_t shift_amount
;
2402 uint64_t value
= ~(uint64_t)0;
2404 switch (inst
->opcode
->op
)
2406 case OP_MOV_IMM_WIDE
:
2407 value
= inst
->operands
[1].imm
.value
;
2409 case OP_MOV_IMM_WIDEN
:
2410 value
= ~inst
->operands
[1].imm
.value
;
2415 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
2416 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
2417 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
2418 /* The constraint check should have guaranteed this wouldn't happen. */
2420 value
>>= shift_amount
;
2422 inst
->operands
[1].imm
.value
= value
;
2423 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
2424 inst
->operands
[1].shifter
.amount
= shift_amount
;
2429 ORR <Wd>, WZR, #<imm>. */
2432 convert_mov_to_movebitmask (aarch64_inst
*inst
)
2434 copy_operand_info (inst
, 2, 1);
2435 inst
->operands
[1].reg
.regno
= 0x1f;
2436 inst
->operands
[1].skip
= 0;
2439 /* Some alias opcodes are assembled by being converted to their real-form. */
2442 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
2444 const aarch64_opcode
*alias
= inst
->opcode
;
2446 if ((alias
->flags
& F_CONV
) == 0)
2447 goto convert_to_real_return
;
2453 convert_sr_to_bfm (inst
);
2456 convert_lsl_to_ubfm (inst
);
2461 convert_to_csel (inst
);
2465 convert_cset_to_csinc (inst
);
2470 convert_bfx_to_bfm (inst
);
2475 convert_bfi_to_bfm (inst
);
2478 convert_bfc_to_bfm (inst
);
2481 convert_mov_to_orr (inst
);
2483 case OP_MOV_IMM_WIDE
:
2484 case OP_MOV_IMM_WIDEN
:
2485 convert_mov_to_movewide (inst
);
2487 case OP_MOV_IMM_LOG
:
2488 convert_mov_to_movebitmask (inst
);
2491 convert_ror_to_extr (inst
);
2497 convert_xtl_to_shll (inst
);
2503 convert_to_real_return
:
2504 aarch64_replace_opcode (inst
, real
);
2507 /* Encode *INST_ORI of the opcode code OPCODE.
2508 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2509 matched operand qualifier sequence in *QLF_SEQ. */
2512 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
2513 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
2514 aarch64_opnd_qualifier_t
*qlf_seq
,
2515 aarch64_operand_error
*mismatch_detail
,
2516 aarch64_instr_sequence
* insn_sequence
)
2519 const aarch64_opcode
*aliased
;
2520 aarch64_inst copy
, *inst
;
2522 DEBUG_TRACE ("enter with %s", opcode
->name
);
2524 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2528 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
2529 if (inst
->opcode
== NULL
)
2530 inst
->opcode
= opcode
;
2532 /* Constrain the operands.
2533 After passing this, the encoding is guaranteed to succeed. */
2534 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2536 DEBUG_TRACE ("FAIL since operand constraint not met");
2540 /* Get the base value.
2541 Note: this has to be before the aliasing handling below in order to
2542 get the base value from the alias opcode before we move on to the
2543 aliased opcode for encoding. */
2544 inst
->value
= opcode
->opcode
;
2546 /* No need to do anything else if the opcode does not have any operand. */
2547 if (aarch64_num_of_operands (opcode
) == 0)
2550 /* Assign operand indexes and check types. Also put the matched
2551 operand qualifiers in *QLF_SEQ to return. */
2552 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2554 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2555 inst
->operands
[i
].idx
= i
;
2556 if (qlf_seq
!= NULL
)
2557 *qlf_seq
= inst
->operands
[i
].qualifier
;
2560 aliased
= aarch64_find_real_opcode (opcode
);
2561 /* If the opcode is an alias and it does not ask for direct encoding by
2562 itself, the instruction will be transformed to the form of real opcode
2563 and the encoding will be carried out using the rules for the aliased
2565 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2567 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2568 aliased
->name
, opcode
->name
);
2569 /* Convert the operands to the form of the real opcode. */
2570 convert_to_real (inst
, aliased
);
2574 aarch64_opnd_info
*info
= inst
->operands
;
2576 /* Call the inserter of each operand. */
2577 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2579 const aarch64_operand
*opnd
;
2580 enum aarch64_opnd type
= opcode
->operands
[i
];
2581 if (type
== AARCH64_OPND_NIL
)
2585 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2588 opnd
= &aarch64_operands
[type
];
2589 if (operand_has_inserter (opnd
)
2590 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2595 /* Call opcode encoders indicated by flags. */
2596 if (opcode_has_special_coder (opcode
))
2597 do_special_encoding (inst
);
2599 /* Possibly use the instruction class to encode the chosen qualifier
2601 aarch64_encode_variant_using_iclass (inst
);
2603 /* Run a verifier if the instruction has one set. */
2604 if (opcode
->verifier
)
2606 enum err_type result
= opcode
->verifier (inst
, *code
, 0, true,
2607 mismatch_detail
, insn_sequence
);
2619 /* Always run constrain verifiers, this is needed because constrains need to
2620 maintain a global state. Regardless if the instruction has the flag set
2622 enum err_type result
= verify_constraints (inst
, *code
, 0, true,
2623 mismatch_detail
, insn_sequence
);
2636 DEBUG_TRACE ("exit with %s", opcode
->name
);
2638 *code
= inst
->value
;