1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
64 insert_all_fields_after (const aarch64_operand
*self
, unsigned int start
,
65 aarch64_insn
*code
, aarch64_insn value
)
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > start
; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
83 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
86 return insert_all_fields_after (self
, 0, code
, value
);
89 /* Operand inserters. */
93 aarch64_ins_none (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
94 const aarch64_opnd_info
*info ATTRIBUTE_UNUSED
,
95 aarch64_insn
*code ATTRIBUTE_UNUSED
,
96 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
102 /* Insert register number. */
104 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
106 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
107 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
109 int val
= info
->reg
.regno
- get_operand_specific_data (self
);
110 insert_field (self
->fields
[0], code
, val
, 0);
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
118 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
119 aarch64_insn
*code
, const aarch64_inst
*inst
,
120 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
123 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
124 /* index and/or type */
125 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
127 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
128 if (info
->type
== AARCH64_OPND_En
129 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info
->idx
== 1); /* Vn */
133 aarch64_insn value
= info
->reglane
.index
<< pos
;
134 insert_field (FLD_imm4_11
, code
, value
, 0);
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
145 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
146 insert_field (FLD_imm5
, code
, value
, 0);
149 else if (inst
->opcode
->iclass
== dotproduct
)
151 unsigned reglane_index
= info
->reglane
.index
;
152 switch (info
->qualifier
)
154 case AARCH64_OPND_QLF_S_4B
:
155 case AARCH64_OPND_QLF_S_2H
:
157 assert (reglane_index
< 4);
158 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
164 else if (inst
->opcode
->iclass
== cryptosm3
)
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index
= info
->reglane
.index
;
168 assert (reglane_index
< 4);
169 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index
= info
->reglane
.index
;
177 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
178 /* Complex operand takes two elements. */
181 switch (info
->qualifier
)
183 case AARCH64_OPND_QLF_S_H
:
185 assert (reglane_index
< 8);
186 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
188 case AARCH64_OPND_QLF_S_S
:
190 assert (reglane_index
< 4);
191 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
193 case AARCH64_OPND_QLF_S_D
:
195 assert (reglane_index
< 2);
196 insert_field (FLD_H
, code
, reglane_index
, 0);
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
207 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
209 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
210 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
213 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
215 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
222 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
223 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
224 const aarch64_inst
*inst
,
225 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
227 aarch64_insn value
= 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
232 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
237 switch (info
->reglist
.num_regs
)
239 case 1: value
= 0x7; break;
240 case 2: value
= 0xa; break;
241 case 3: value
= 0x6; break;
242 case 4: value
= 0x2; break;
243 default: return false;
247 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
258 insert_field (FLD_opcode
, code
, value
, 0);
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
266 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
267 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
268 const aarch64_inst
*inst
,
269 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
277 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
279 value
= (aarch64_insn
) 0;
280 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
283 value
= (aarch64_insn
) 1;
284 insert_field (FLD_S
, code
, value
, 0);
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
292 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
293 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
294 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
295 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
297 aarch64_field field
= {0, 0};
298 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
301 assert (info
->reglist
.has_index
);
304 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info
->qualifier
)
308 case AARCH64_OPND_QLF_S_B
:
309 /* Index encoded in "Q:S:size". */
310 QSsize
= info
->reglist
.index
;
313 case AARCH64_OPND_QLF_S_H
:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize
= info
->reglist
.index
<< 1;
318 case AARCH64_OPND_QLF_S_S
:
319 /* Index encoded in "Q:S". */
320 QSsize
= info
->reglist
.index
<< 2;
323 case AARCH64_OPND_QLF_S_D
:
324 /* Index encoded in "Q". */
325 QSsize
= info
->reglist
.index
<< 3 | 0x1;
331 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
332 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
333 insert_field_2 (&field
, code
, opcodeh2
, 0);
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
343 const aarch64_opnd_info
*info
,
344 aarch64_insn
*code
, const aarch64_inst
*inst
,
345 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
347 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
350 if (inst
->opcode
->iclass
== asimdshf
)
354 0000 x SEE AdvSIMD modified immediate
363 Q
= (val
& 0x1) ? 1 : 0;
364 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
368 assert (info
->type
== AARCH64_OPND_IMM_VLSR
369 || info
->type
== AARCH64_OPND_IMM_VLSL
);
371 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
389 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
397 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
399 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
400 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
404 imm
= info
->imm
.value
;
405 if (operand_need_shift_by_two (self
))
407 if (operand_need_shift_by_three (self
))
409 if (operand_need_shift_by_four (self
))
411 insert_all_fields (self
, code
, imm
);
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
418 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
419 aarch64_insn
*code
, const aarch64_inst
*inst
,
420 aarch64_operand_error
*errors
)
423 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
425 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
433 const aarch64_opnd_info
*info
,
435 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
436 aarch64_operand_error
*errors
439 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
440 uint64_t imm
= info
->imm
.value
;
441 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
442 int amount
= info
->shifter
.amount
;
443 aarch64_field field
= {0, 0};
445 /* a:b:c:d:e:f:g:h */
446 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm
= aarch64_shrink_expanded_imm8 (imm
);
454 assert ((int)imm
>= 0);
456 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
458 if (kind
== AARCH64_MOD_NONE
)
461 /* shift amount partially in cmode */
462 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
463 if (kind
== AARCH64_MOD_LSL
)
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
467 assert (esize
== 4 || esize
== 2 || esize
== 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
474 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
476 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
480 /* AARCH64_MOD_MSL: shift ones. */
482 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
484 insert_field_2 (&field
, code
, amount
, 0);
489 /* Insert fields for an 8-bit floating-point immediate. */
491 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
493 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
494 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
496 insert_all_fields (self
, code
, info
->imm
.value
);
500 /* Insert 1-bit rotation immediate (#90 or #270). */
502 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
503 const aarch64_opnd_info
*info
,
504 aarch64_insn
*code
, const aarch64_inst
*inst
,
505 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
507 uint64_t rot
= (info
->imm
.value
- 90) / 180;
509 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
515 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
516 const aarch64_opnd_info
*info
,
517 aarch64_insn
*code
, const aarch64_inst
*inst
,
518 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
520 uint64_t rot
= info
->imm
.value
/ 90;
522 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
529 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
531 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
532 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
534 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
541 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
542 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
543 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
546 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
547 insert_field (self
->fields
[0], code
, value
, 0);
548 /* imm12 (unsigned) */
549 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
556 aarch64_ins_limm_1 (const aarch64_operand
*self
,
557 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
558 const aarch64_inst
*inst
, bool invert_p
,
559 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
563 uint64_t imm
= info
->imm
.value
;
564 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
568 /* The constraint check should guarantee that this will work. */
569 res
= aarch64_logical_immediate_p (imm
, esize
, &value
);
571 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
579 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
580 aarch64_insn
*code
, const aarch64_inst
*inst
,
581 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
583 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
584 inst
->opcode
->op
== OP_BIC
, errors
);
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
589 aarch64_ins_inv_limm (const aarch64_operand
*self
,
590 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
591 const aarch64_inst
*inst
,
592 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
594 return aarch64_ins_limm_1 (self
, info
, code
, inst
, true, errors
);
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
600 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
601 aarch64_insn
*code
, const aarch64_inst
*inst
,
602 aarch64_operand_error
*errors
)
604 aarch64_insn value
= 0;
606 assert (info
->idx
== 0);
609 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
610 if (inst
->opcode
->iclass
== ldstpair_indexed
611 || inst
->opcode
->iclass
== ldstnapair_offs
612 || inst
->opcode
->iclass
== ldstpair_off
613 || inst
->opcode
->iclass
== loadlit
)
616 switch (info
->qualifier
)
618 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
619 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
620 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
621 default: return false;
623 insert_field (FLD_ldst_size
, code
, value
, 0);
628 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
629 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
637 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
638 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
639 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
640 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
643 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
650 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
651 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
652 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
653 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
656 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
659 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
661 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
663 if (kind
== AARCH64_MOD_LSL
)
664 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
665 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
667 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
668 S
= info
->shifter
.amount
!= 0;
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
674 Must be #0 if <extend> is explicitly LSL. */
675 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
676 insert_field (FLD_S
, code
, S
, 0);
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
684 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
685 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
686 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
687 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
690 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
693 int imm
= info
->addr
.offset
.imm
;
694 insert_field (self
->fields
[1], code
, imm
, 0);
697 if (info
->addr
.writeback
)
699 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
700 insert_field (self
->fields
[2], code
, 1, 0);
705 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
707 aarch64_ins_addr_simm (const aarch64_operand
*self
,
708 const aarch64_opnd_info
*info
,
710 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
711 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
716 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
717 /* simm (imm9 or imm7) */
718 imm
= info
->addr
.offset
.imm
;
719 if (self
->fields
[0] == FLD_imm7
720 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
721 /* scaled immediate in ld/st pair instructions.. */
722 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
723 insert_field (self
->fields
[0], code
, imm
, 0);
724 /* pre/post- index */
725 if (info
->addr
.writeback
)
727 assert (inst
->opcode
->iclass
!= ldst_unscaled
728 && inst
->opcode
->iclass
!= ldstnapair_offs
729 && inst
->opcode
->iclass
!= ldstpair_off
730 && inst
->opcode
->iclass
!= ldst_unpriv
);
731 assert (info
->addr
.preind
!= info
->addr
.postind
);
732 if (info
->addr
.preind
)
733 insert_field (self
->fields
[1], code
, 1, 0);
739 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
741 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
742 const aarch64_opnd_info
*info
,
744 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
745 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
750 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
752 imm
= info
->addr
.offset
.imm
>> 3;
753 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
754 insert_field (self
->fields
[2], code
, imm
, 0);
756 if (info
->addr
.writeback
)
758 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
759 insert_field (self
->fields
[3], code
, 1, 0);
764 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
766 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
767 const aarch64_opnd_info
*info
,
769 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
770 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
772 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
775 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
777 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
781 /* Encode the address operand for e.g.
782 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
784 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
785 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
786 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
787 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
790 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
792 if (info
->addr
.offset
.is_reg
)
793 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
795 insert_field (FLD_Rm
, code
, 0x1f, 0);
799 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
801 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
802 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
803 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
804 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
807 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
811 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
813 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
814 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
815 const aarch64_inst
*inst
,
816 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
818 /* If a system instruction check if we have any restrictions on which
819 registers it can use. */
820 if (inst
->opcode
->iclass
== ic_system
)
822 uint64_t opcode_flags
823 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
824 uint32_t sysreg_flags
825 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
827 /* Check to see if it's read-only, else check if it's write only.
828 if it's both or unspecified don't care. */
829 if (opcode_flags
== F_SYS_READ
831 && sysreg_flags
!= F_REG_READ
)
833 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
834 detail
->error
= _("specified register cannot be read from");
835 detail
->index
= info
->idx
;
836 detail
->non_fatal
= true;
838 else if (opcode_flags
== F_SYS_WRITE
840 && sysreg_flags
!= F_REG_WRITE
)
842 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
843 detail
->error
= _("specified register cannot be written to");
844 detail
->index
= info
->idx
;
845 detail
->non_fatal
= true;
848 /* op0:op1:CRn:CRm:op2 */
849 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
850 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
854 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
856 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
857 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
858 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
859 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
862 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
865 /* Extra CRm mask. */
866 if (info
->sysreg
.flags
| F_REG_IN_CRM
)
867 insert_field (FLD_CRm
, code
, PSTATE_DECODE_CRM (info
->sysreg
.flags
), 0);
871 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
873 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
874 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
875 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
876 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
878 /* op1:CRn:CRm:op2 */
879 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
880 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
884 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
887 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
888 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
889 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
890 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
893 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
897 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
900 aarch64_ins_barrier_dsb_nxs (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
901 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
902 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
903 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
905 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
906 encoded in CRm<3:2>. */
907 aarch64_insn value
= (info
->barrier
->value
>> 2) - 4;
908 insert_field (FLD_CRm_dsb_nxs
, code
, value
, 0);
912 /* Encode the prefetch operation option operand for e.g.
913 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
916 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
917 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
918 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
919 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
922 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
926 /* Encode the hint number for instructions that alias HINT but take an
930 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
931 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
932 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
933 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
936 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
940 /* Encode the extended register operand for e.g.
941 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
943 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
944 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
945 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
946 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
948 enum aarch64_modifier_kind kind
;
951 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
953 kind
= info
->shifter
.kind
;
954 if (kind
== AARCH64_MOD_LSL
)
955 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
956 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
957 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
959 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
964 /* Encode the shifted register operand for e.g.
965 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
967 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
968 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
969 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
970 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
973 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
975 insert_field (FLD_shift
, code
,
976 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
978 insert_field (FLD_imm6_10
, code
, info
->shifter
.amount
, 0);
983 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
984 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
985 SELF's operand-dependent value. fields[0] specifies the field that
986 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
988 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
989 const aarch64_opnd_info
*info
,
991 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
992 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
994 int factor
= 1 + get_operand_specific_data (self
);
995 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
996 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1000 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1001 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1002 SELF's operand-dependent value. fields[0] specifies the field that
1003 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1005 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
1006 const aarch64_opnd_info
*info
,
1008 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1009 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1011 int factor
= 1 + get_operand_specific_data (self
);
1012 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1013 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1017 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1018 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1019 SELF's operand-dependent value. fields[0] specifies the field that
1020 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1021 and imm3 fields, with imm3 being the less-significant part. */
1023 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
1024 const aarch64_opnd_info
*info
,
1026 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1027 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1029 int factor
= 1 + get_operand_specific_data (self
);
1030 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1031 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
1032 2, FLD_imm3_10
, FLD_SVE_imm6
);
1036 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1037 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1038 value. fields[0] specifies the base register field. */
1040 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
1041 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1042 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1043 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1045 int factor
= 1 << get_operand_specific_data (self
);
1046 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1047 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1051 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1052 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1053 value. fields[0] specifies the base register field. */
1055 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1056 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1057 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1058 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1060 int factor
= 1 << get_operand_specific_data (self
);
1061 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1062 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1066 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1067 is SELF's operand-dependent value. fields[0] specifies the base
1068 register field and fields[1] specifies the offset register field. */
1070 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1071 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1072 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1073 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1075 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1076 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1080 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1081 <shift> is SELF's operand-dependent value. fields[0] specifies the
1082 base register field, fields[1] specifies the offset register field and
1083 fields[2] is a single-bit field that selects SXTW over UXTW. */
1085 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1086 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1087 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1088 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1090 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1091 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1092 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1093 insert_field (self
->fields
[2], code
, 0, 0);
1095 insert_field (self
->fields
[2], code
, 1, 0);
1099 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1100 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1101 fields[0] specifies the base register field. */
1103 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1104 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1105 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1106 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1108 int factor
= 1 << get_operand_specific_data (self
);
1109 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1110 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1114 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1115 where <modifier> is fixed by the instruction and where <msz> is a
1116 2-bit unsigned number. fields[0] specifies the base register field
1117 and fields[1] specifies the offset register field. */
1119 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1120 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1121 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1123 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1124 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1125 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1129 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1130 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1131 field and fields[1] specifies the offset register field. */
1133 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1134 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1135 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1136 aarch64_operand_error
*errors
)
1138 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1145 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1146 const aarch64_opnd_info
*info
,
1148 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1149 aarch64_operand_error
*errors
)
1151 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1154 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1155 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1156 field and fields[1] specifies the offset register field. */
1158 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1159 const aarch64_opnd_info
*info
,
1161 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1162 aarch64_operand_error
*errors
)
1164 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1167 /* Encode an SVE ADD/SUB immediate. */
1169 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1170 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1171 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1172 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1174 if (info
->shifter
.amount
== 8)
1175 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1176 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1177 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1179 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1184 aarch64_ins_sve_aligned_reglist (const aarch64_operand
*self
,
1185 const aarch64_opnd_info
*info
,
1187 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1188 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1190 unsigned int num_regs
= get_operand_specific_data (self
);
1191 unsigned int val
= info
->reglist
.first_regno
;
1192 insert_field (self
->fields
[0], code
, val
/ num_regs
, 0);
1196 /* Encode an SVE CPY/DUP immediate. */
1198 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1199 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1200 const aarch64_inst
*inst
,
1201 aarch64_operand_error
*errors
)
1203 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1206 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1207 array specifies which field to use for Zn. MM is encoded in the
1208 concatenation of imm5 and SVE_tszh, with imm5 being the less
1209 significant part. */
1211 aarch64_ins_sve_index (const aarch64_operand
*self
,
1212 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1213 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1214 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1216 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1217 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1218 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1219 2, FLD_imm5
, FLD_SVE_tszh
);
1223 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1225 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1226 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1227 const aarch64_inst
*inst
,
1228 aarch64_operand_error
*errors
)
1230 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1233 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1234 and where MM occupies the most-significant part. The operand-dependent
1235 value specifies the number of bits in Zn. */
1237 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1238 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1239 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1240 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1242 unsigned int reg_bits
= get_operand_specific_data (self
);
1243 assert (info
->reglane
.regno
< (1U << reg_bits
));
1244 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1245 insert_all_fields (self
, code
, val
);
1249 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1252 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1253 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1254 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1255 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1257 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1261 /* Encode a strided register list. The first field holds the top bit
1262 (0 or 16) and the second field holds the lower bits. The stride is
1263 16 divided by the list length. */
1265 aarch64_ins_sve_strided_reglist (const aarch64_operand
*self
,
1266 const aarch64_opnd_info
*info
,
1268 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1269 aarch64_operand_error
*errors
1272 unsigned int num_regs
= get_operand_specific_data (self
);
1273 unsigned int mask
= 16 | (16 / num_regs
- 1);
1274 unsigned int val
= info
->reglist
.first_regno
;
1275 assert ((val
& mask
) == val
);
1276 insert_field (self
->fields
[0], code
, val
>> 4, 0);
1277 insert_field (self
->fields
[1], code
, val
& 15, 0);
1281 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1282 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1285 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1286 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1287 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1288 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1290 insert_all_fields (self
, code
, info
->imm
.value
);
1291 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1295 /* Encode an SVE shift left immediate. */
1297 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1298 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1299 const aarch64_inst
*inst
,
1300 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1302 const aarch64_opnd_info
*prev_operand
;
1305 assert (info
->idx
> 0);
1306 prev_operand
= &inst
->operands
[info
->idx
- 1];
1307 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1308 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1312 /* Encode an SVE shift right immediate. */
1314 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1315 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1316 const aarch64_inst
*inst
,
1317 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1319 const aarch64_opnd_info
*prev_operand
;
1322 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1323 assert (info
->idx
>= (int)opnd_backshift
);
1324 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1325 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1326 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1330 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1331 The fields array specifies which field to use. */
1333 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1334 const aarch64_opnd_info
*info
,
1336 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1337 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1339 if (info
->imm
.value
== 0x3f000000)
1340 insert_field (self
->fields
[0], code
, 0, 0);
1342 insert_field (self
->fields
[0], code
, 1, 0);
1346 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1347 The fields array specifies which field to use. */
1349 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1350 const aarch64_opnd_info
*info
,
1352 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1353 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1355 if (info
->imm
.value
== 0x3f000000)
1356 insert_field (self
->fields
[0], code
, 0, 0);
1358 insert_field (self
->fields
[0], code
, 1, 0);
1362 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1363 The fields array specifies which field to use. */
1365 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1366 const aarch64_opnd_info
*info
,
1368 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1369 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1371 if (info
->imm
.value
== 0)
1372 insert_field (self
->fields
[0], code
, 0, 0);
1374 insert_field (self
->fields
[0], code
, 1, 0);
1378 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1379 vector indicator, vector selector and immediate. */
1381 aarch64_ins_sme_za_hv_tiles (const aarch64_operand
*self
,
1382 const aarch64_opnd_info
*info
,
1384 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1385 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1389 int fld_v
= info
->indexed_za
.v
;
1390 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1391 int fld_zan_imm
= info
->indexed_za
.index
.imm
;
1392 int regno
= info
->indexed_za
.regno
;
1394 switch (info
->qualifier
)
1396 case AARCH64_OPND_QLF_S_B
:
1400 case AARCH64_OPND_QLF_S_H
:
1403 fld_zan_imm
|= regno
<< 3;
1405 case AARCH64_OPND_QLF_S_S
:
1408 fld_zan_imm
|= regno
<< 2;
1410 case AARCH64_OPND_QLF_S_D
:
1413 fld_zan_imm
|= regno
<< 1;
1415 case AARCH64_OPND_QLF_S_Q
:
1418 fld_zan_imm
= regno
;
1424 insert_field (self
->fields
[0], code
, fld_size
, 0);
1425 insert_field (self
->fields
[1], code
, fld_q
, 0);
1426 insert_field (self
->fields
[2], code
, fld_v
, 0);
1427 insert_field (self
->fields
[3], code
, fld_rv
, 0);
1428 insert_field (self
->fields
[4], code
, fld_zan_imm
, 0);
1434 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand
*self
,
1435 const aarch64_opnd_info
*info
,
1437 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1438 aarch64_operand_error
*errors
1441 int ebytes
= aarch64_get_qualifier_esize (info
->qualifier
);
1442 int range_size
= get_opcode_dependent_value (inst
->opcode
);
1443 int fld_v
= info
->indexed_za
.v
;
1444 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1445 int imm
= info
->indexed_za
.index
.imm
;
1446 int max_value
= 16 / range_size
/ ebytes
;
1451 assert (imm
% range_size
== 0 && (imm
/ range_size
) < max_value
);
1452 int fld_zan_imm
= (info
->indexed_za
.regno
* max_value
) | (imm
/ range_size
);
1453 assert (fld_zan_imm
< (range_size
== 4 && ebytes
< 8 ? 4 : 8));
1455 insert_field (self
->fields
[0], code
, fld_v
, 0);
1456 insert_field (self
->fields
[1], code
, fld_rv
, 0);
1457 insert_field (self
->fields
[2], code
, fld_zan_imm
, 0);
1462 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1463 separated by commas, encoded in the "imm8" field.
1465 For programmer convenience an assembler must also accept the names of
1466 32-bit, 16-bit and 8-bit element tiles which are converted into the
1467 corresponding set of 64-bit element tiles.
1470 aarch64_ins_sme_za_list (const aarch64_operand
*self
,
1471 const aarch64_opnd_info
*info
,
1473 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1474 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1476 int fld_mask
= info
->imm
.value
;
1477 insert_field (self
->fields
[0], code
, fld_mask
, 0);
1482 aarch64_ins_sme_za_array (const aarch64_operand
*self
,
1483 const aarch64_opnd_info
*info
,
1485 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1486 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1488 int regno
= info
->indexed_za
.index
.regno
& 3;
1489 int imm
= info
->indexed_za
.index
.imm
;
1490 int countm1
= info
->indexed_za
.index
.countm1
;
1491 assert (imm
% (countm1
+ 1) == 0);
1492 insert_field (self
->fields
[0], code
, regno
, 0);
1493 insert_field (self
->fields
[1], code
, imm
/ (countm1
+ 1), 0);
1498 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand
*self
,
1499 const aarch64_opnd_info
*info
,
1501 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1502 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1504 int regno
= info
->addr
.base_regno
;
1505 int imm
= info
->addr
.offset
.imm
;
1506 insert_field (self
->fields
[0], code
, regno
, 0);
1507 insert_field (self
->fields
[1], code
, imm
, 0);
1511 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1513 aarch64_ins_sme_sm_za (const aarch64_operand
*self
,
1514 const aarch64_opnd_info
*info
,
1516 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1517 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1519 aarch64_insn fld_crm
;
1520 /* Set CRm[3:1] bits. */
1521 if (info
->reg
.regno
== 's')
1522 fld_crm
= 0x02 ; /* SVCRSM. */
1523 else if (info
->reg
.regno
== 'z')
1524 fld_crm
= 0x04; /* SVCRZA. */
1528 insert_field (self
->fields
[0], code
, fld_crm
, 0);
1532 /* Encode source scalable predicate register (Pn), name of the index base
1533 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1534 range 0 to one less than the number of vector elements in a 128-bit vector
1535 register, encoded in "i1:tszh:tszl".
1538 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand
*self
,
1539 const aarch64_opnd_info
*info
,
1541 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1542 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1544 int fld_pn
= info
->indexed_za
.regno
;
1545 int fld_rm
= info
->indexed_za
.index
.regno
- 12;
1546 int imm
= info
->indexed_za
.index
.imm
;
1547 int fld_i1
, fld_tszh
, fld_tshl
;
1549 insert_field (self
->fields
[0], code
, fld_rm
, 0);
1550 insert_field (self
->fields
[1], code
, fld_pn
, 0);
1552 /* Optional element index, defaulting to 0, in the range 0 to one less than
1553 the number of vector elements in a 128-bit vector register, encoded in
1563 switch (info
->qualifier
)
1565 case AARCH64_OPND_QLF_S_B
:
1566 /* <imm> is 4 bit value. */
1567 fld_i1
= (imm
>> 3) & 0x1;
1568 fld_tszh
= (imm
>> 2) & 0x1;
1569 fld_tshl
= ((imm
<< 1) | 0x1) & 0x7;
1571 case AARCH64_OPND_QLF_S_H
:
1572 /* <imm> is 3 bit value. */
1573 fld_i1
= (imm
>> 2) & 0x1;
1574 fld_tszh
= (imm
>> 1) & 0x1;
1575 fld_tshl
= ((imm
<< 2) | 0x2) & 0x7;
1577 case AARCH64_OPND_QLF_S_S
:
1578 /* <imm> is 2 bit value. */
1579 fld_i1
= (imm
>> 1) & 0x1;
1580 fld_tszh
= imm
& 0x1;
1583 case AARCH64_OPND_QLF_S_D
:
1584 /* <imm> is 1 bit value. */
1593 insert_field (self
->fields
[2], code
, fld_i1
, 0);
1594 insert_field (self
->fields
[3], code
, fld_tszh
, 0);
1595 insert_field (self
->fields
[4], code
, fld_tshl
, 0);
1599 /* Insert X0-X30. Register 31 is unallocated. */
1601 aarch64_ins_x0_to_x30 (const aarch64_operand
*self
,
1602 const aarch64_opnd_info
*info
,
1604 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1605 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1607 assert (info
->reg
.regno
<= 30);
1608 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
1612 /* Insert an indexed register, with the first field being the register
1613 number and the remaining fields being the index. */
1615 aarch64_ins_simple_index (const aarch64_operand
*self
,
1616 const aarch64_opnd_info
*info
,
1618 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1619 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1621 int bias
= get_operand_specific_data (self
);
1622 insert_field (self
->fields
[0], code
, info
->reglane
.regno
- bias
, 0);
1623 insert_all_fields_after (self
, 1, code
, info
->reglane
.index
);
1627 /* Insert a plain shift-right immediate, when there is only a single
1630 aarch64_ins_plain_shrimm (const aarch64_operand
*self
,
1631 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1632 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1633 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1635 unsigned int base
= 1 << get_operand_field_width (self
, 0);
1636 insert_field (self
->fields
[0], code
, base
- info
->imm
.value
, 0);
1640 /* Miscellaneous encoding functions. */
1642 /* Encode size[0], i.e. bit 22, for
1643 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1646 encode_asimd_fcvt (aarch64_inst
*inst
)
1649 aarch64_field field
= {0, 0};
1650 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_NIL
;
1652 switch (inst
->opcode
->op
)
1656 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1657 qualifier
= inst
->operands
[1].qualifier
;
1661 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1662 qualifier
= inst
->operands
[0].qualifier
;
1667 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1668 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1669 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1670 gen_sub_field (FLD_size
, 0, 1, &field
);
1671 insert_field_2 (&field
, &inst
->value
, value
, 0);
1674 /* Encode size[0], i.e. bit 22, for
1675 e.g. FCVTXN <Vb><d>, <Va><n>. */
1678 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1680 aarch64_insn val
= 1;
1681 aarch64_field field
= {0, 0};
1682 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1683 gen_sub_field (FLD_size
, 0, 1, &field
);
1684 insert_field_2 (&field
, &inst
->value
, val
, 0);
1687 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1689 encode_fcvt (aarch64_inst
*inst
)
1692 const aarch64_field field
= {15, 2};
1695 switch (inst
->operands
[0].qualifier
)
1697 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1698 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1699 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1702 insert_field_2 (&field
, &inst
->value
, val
, 0);
1707 /* Return the index in qualifiers_list that INST is using. Should only
1708 be called once the qualifiers are known to be valid. */
1711 aarch64_get_variant (struct aarch64_inst
*inst
)
1713 int i
, nops
, variant
;
1715 nops
= aarch64_num_of_operands (inst
->opcode
);
1716 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1718 for (i
= 0; i
< nops
; ++i
)
1719 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1720 != inst
->operands
[i
].qualifier
)
1728 /* Do miscellaneous encodings that are not common enough to be driven by
1732 do_misc_encoding (aarch64_inst
*inst
)
1736 switch (inst
->opcode
->op
)
1745 encode_asimd_fcvt (inst
);
1748 encode_asisd_fcvtxn (inst
);
1753 /* Copy Pn to Pm and Pg. */
1754 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1755 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1756 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1759 /* Copy Zd to Zm. */
1760 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1761 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1764 /* Fill in the zero immediate. */
1765 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1766 2, FLD_imm5
, FLD_SVE_tszh
);
1769 /* Copy Zn to Zm. */
1770 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1771 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1776 /* Copy Pd to Pm. */
1777 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1778 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1780 case OP_MOVZS_P_P_P
:
1782 /* Copy Pn to Pm. */
1783 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1784 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1786 case OP_NOTS_P_P_P_Z
:
1787 case OP_NOT_P_P_P_Z
:
1788 /* Copy Pg to Pm. */
1789 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1790 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1796 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1798 encode_sizeq (aarch64_inst
*inst
)
1801 enum aarch64_field_kind kind
;
1804 /* Get the index of the operand whose information we are going to use
1805 to encode the size and Q fields.
1806 This is deduced from the possible valid qualifier lists. */
1807 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1808 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1809 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1810 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1812 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1814 if (inst
->opcode
->iclass
== asisdlse
1815 || inst
->opcode
->iclass
== asisdlsep
1816 || inst
->opcode
->iclass
== asisdlso
1817 || inst
->opcode
->iclass
== asisdlsop
)
1818 kind
= FLD_vldst_size
;
1821 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1824 /* Opcodes that have fields shared by multiple operands are usually flagged
1825 with flags. In this function, we detect such flags and use the
1826 information in one of the related operands to do the encoding. The 'one'
1827 operand is not any operand but one of the operands that has the enough
1828 information for such an encoding. */
1831 do_special_encoding (struct aarch64_inst
*inst
)
1834 aarch64_insn value
= 0;
1836 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1838 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1839 if (inst
->opcode
->flags
& F_COND
)
1841 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1843 if (inst
->opcode
->flags
& F_SF
)
1845 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1846 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1847 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1849 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1850 if (inst
->opcode
->flags
& F_N
)
1851 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1853 if (inst
->opcode
->flags
& F_LSE_SZ
)
1855 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1856 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1857 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1859 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1861 if (inst
->opcode
->flags
& F_SIZEQ
)
1862 encode_sizeq (inst
);
1863 if (inst
->opcode
->flags
& F_FPTYPE
)
1865 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1866 switch (inst
->operands
[idx
].qualifier
)
1868 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1869 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1870 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1873 insert_field (FLD_type
, &inst
->value
, value
, 0);
1875 if (inst
->opcode
->flags
& F_SSIZE
)
1877 enum aarch64_opnd_qualifier qualifier
;
1878 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1879 qualifier
= inst
->operands
[idx
].qualifier
;
1880 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1881 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1882 value
= aarch64_get_qualifier_standard_value (qualifier
);
1883 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1885 if (inst
->opcode
->flags
& F_T
)
1887 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1888 aarch64_field field
= {0, 0};
1889 enum aarch64_opnd_qualifier qualifier
;
1892 qualifier
= inst
->operands
[idx
].qualifier
;
1893 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1894 == AARCH64_OPND_CLASS_SIMD_REG
1895 && qualifier
>= AARCH64_OPND_QLF_V_8B
1896 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1907 value
= aarch64_get_qualifier_standard_value (qualifier
);
1908 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1909 num
= (int) value
>> 1;
1910 assert (num
>= 0 && num
<= 3);
1911 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1912 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1914 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1916 /* Use Rt to encode in the case of e.g.
1917 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1918 enum aarch64_opnd_qualifier qualifier
;
1919 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1921 /* Otherwise use the result operand, which has to be a integer
1924 assert (idx
== 0 || idx
== 1);
1925 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1926 == AARCH64_OPND_CLASS_INT_REG
);
1927 qualifier
= inst
->operands
[idx
].qualifier
;
1928 insert_field (FLD_Q
, &inst
->value
,
1929 aarch64_get_qualifier_standard_value (qualifier
), 0);
1931 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1933 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1934 enum aarch64_opnd_qualifier qualifier
;
1935 aarch64_field field
= {0, 0};
1936 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1937 == AARCH64_OPND_CLASS_INT_REG
);
1938 gen_sub_field (FLD_opc
, 0, 1, &field
);
1939 qualifier
= inst
->operands
[0].qualifier
;
1940 insert_field_2 (&field
, &inst
->value
,
1941 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1943 /* Miscellaneous encoding as the last step. */
1944 if (inst
->opcode
->flags
& F_MISC
)
1945 do_misc_encoding (inst
);
1947 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1950 /* Some instructions (including all SVE ones) use the instruction class
1951 to describe how a qualifiers_list index is represented in the instruction
1952 encoding. If INST is such an instruction, encode the chosen qualifier
1956 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1959 switch (inst
->opcode
->iclass
)
1963 /* The variant is encoded as part of the immediate. */
1966 case sme_size_12_bhs
:
1967 insert_field (FLD_SME_size_12
, &inst
->value
,
1968 aarch64_get_variant (inst
), 0);
1972 insert_field (FLD_SME_size_22
, &inst
->value
,
1973 aarch64_get_variant (inst
), 0);
1976 case sme_size_22_hsd
:
1977 insert_field (FLD_SME_size_22
, &inst
->value
,
1978 aarch64_get_variant (inst
) + 1, 0);
1981 case sme_size_12_hs
:
1982 insert_field (FLD_SME_size_12
, &inst
->value
,
1983 aarch64_get_variant (inst
) + 1, 0);
1987 insert_field (FLD_SME_sz_23
, &inst
->value
,
1988 aarch64_get_variant (inst
), 0);
1992 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1993 0, 2, FLD_SVE_M_14
, FLD_size
);
1998 case sve_shift_pred
:
1999 case sve_shift_unpred
:
2000 case sve_shift_tsz_hsd
:
2001 case sve_shift_tsz_bhsd
:
2002 /* For indices and shift amounts, the variant is encoded as
2003 part of the immediate. */
2008 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2009 and depend on the immediate. They don't have a separate
2015 /* These instructions have only a single variant. */
2019 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2020 0, 2, FLD_SVE_M_16
, FLD_size
);
2024 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
2029 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
2033 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2034 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) % 3 + 1, 0);
2041 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
2045 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
2049 insert_field (FLD_SVE_size
, &inst
->value
,
2050 aarch64_get_variant (inst
) + 1, 0);
2053 case sve_size_tsz_bhs
:
2054 insert_fields (&inst
->value
,
2055 (1 << aarch64_get_variant (inst
)),
2056 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
2060 variant
= aarch64_get_variant (inst
) + 1;
2063 insert_field (FLD_size
, &inst
->value
, variant
, 0);
2071 /* Converters converting an alias opcode instruction to its real form. */
2073 /* ROR <Wd>, <Ws>, #<shift>
2075 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2077 convert_ror_to_extr (aarch64_inst
*inst
)
2079 copy_operand_info (inst
, 3, 2);
2080 copy_operand_info (inst
, 2, 1);
2083 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2085 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2087 convert_xtl_to_shll (aarch64_inst
*inst
)
2089 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
2090 inst
->operands
[2].imm
.value
= 0;
2094 LSR <Xd>, <Xn>, #<shift>
2096 UBFM <Xd>, <Xn>, #<shift>, #63. */
2098 convert_sr_to_bfm (aarch64_inst
*inst
)
2100 inst
->operands
[3].imm
.value
=
2101 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
2104 /* Convert MOV to ORR. */
2106 convert_mov_to_orr (aarch64_inst
*inst
)
2108 /* MOV <Vd>.<T>, <Vn>.<T>
2110 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2111 copy_operand_info (inst
, 2, 1);
2114 /* When <imms> >= <immr>, the instruction written:
2115 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2117 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2120 convert_bfx_to_bfm (aarch64_inst
*inst
)
2124 /* Convert the operand. */
2125 lsb
= inst
->operands
[2].imm
.value
;
2126 width
= inst
->operands
[3].imm
.value
;
2127 inst
->operands
[2].imm
.value
= lsb
;
2128 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
2131 /* When <imms> < <immr>, the instruction written:
2132 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2134 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2137 convert_bfi_to_bfm (aarch64_inst
*inst
)
2141 /* Convert the operand. */
2142 lsb
= inst
->operands
[2].imm
.value
;
2143 width
= inst
->operands
[3].imm
.value
;
2144 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2146 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2147 inst
->operands
[3].imm
.value
= width
- 1;
2151 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2152 inst
->operands
[3].imm
.value
= width
- 1;
2156 /* The instruction written:
2157 BFC <Xd>, #<lsb>, #<width>
2159 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2162 convert_bfc_to_bfm (aarch64_inst
*inst
)
2167 copy_operand_info (inst
, 3, 2);
2168 copy_operand_info (inst
, 2, 1);
2169 copy_operand_info (inst
, 1, 0);
2170 inst
->operands
[1].reg
.regno
= 0x1f;
2172 /* Convert the immediate operand. */
2173 lsb
= inst
->operands
[2].imm
.value
;
2174 width
= inst
->operands
[3].imm
.value
;
2175 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2177 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2178 inst
->operands
[3].imm
.value
= width
- 1;
2182 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2183 inst
->operands
[3].imm
.value
= width
- 1;
2187 /* The instruction written:
2188 LSL <Xd>, <Xn>, #<shift>
2190 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2193 convert_lsl_to_ubfm (aarch64_inst
*inst
)
2195 int64_t shift
= inst
->operands
[2].imm
.value
;
2197 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2199 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
2200 inst
->operands
[3].imm
.value
= 31 - shift
;
2204 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
2205 inst
->operands
[3].imm
.value
= 63 - shift
;
2209 /* CINC <Wd>, <Wn>, <cond>
2211 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2214 convert_to_csel (aarch64_inst
*inst
)
2216 copy_operand_info (inst
, 3, 2);
2217 copy_operand_info (inst
, 2, 1);
2218 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2221 /* CSET <Wd>, <cond>
2223 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2226 convert_cset_to_csinc (aarch64_inst
*inst
)
2228 copy_operand_info (inst
, 3, 1);
2229 copy_operand_info (inst
, 2, 0);
2230 copy_operand_info (inst
, 1, 0);
2231 inst
->operands
[1].reg
.regno
= 0x1f;
2232 inst
->operands
[2].reg
.regno
= 0x1f;
2233 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2238 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2241 convert_mov_to_movewide (aarch64_inst
*inst
)
2244 uint32_t shift_amount
;
2245 uint64_t value
= ~(uint64_t)0;
2247 switch (inst
->opcode
->op
)
2249 case OP_MOV_IMM_WIDE
:
2250 value
= inst
->operands
[1].imm
.value
;
2252 case OP_MOV_IMM_WIDEN
:
2253 value
= ~inst
->operands
[1].imm
.value
;
2258 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
2259 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
2260 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
2261 /* The constraint check should have guaranteed this wouldn't happen. */
2263 value
>>= shift_amount
;
2265 inst
->operands
[1].imm
.value
= value
;
2266 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
2267 inst
->operands
[1].shifter
.amount
= shift_amount
;
2272 ORR <Wd>, WZR, #<imm>. */
2275 convert_mov_to_movebitmask (aarch64_inst
*inst
)
2277 copy_operand_info (inst
, 2, 1);
2278 inst
->operands
[1].reg
.regno
= 0x1f;
2279 inst
->operands
[1].skip
= 0;
2282 /* Some alias opcodes are assembled by being converted to their real-form. */
2285 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
2287 const aarch64_opcode
*alias
= inst
->opcode
;
2289 if ((alias
->flags
& F_CONV
) == 0)
2290 goto convert_to_real_return
;
2296 convert_sr_to_bfm (inst
);
2299 convert_lsl_to_ubfm (inst
);
2304 convert_to_csel (inst
);
2308 convert_cset_to_csinc (inst
);
2313 convert_bfx_to_bfm (inst
);
2318 convert_bfi_to_bfm (inst
);
2321 convert_bfc_to_bfm (inst
);
2324 convert_mov_to_orr (inst
);
2326 case OP_MOV_IMM_WIDE
:
2327 case OP_MOV_IMM_WIDEN
:
2328 convert_mov_to_movewide (inst
);
2330 case OP_MOV_IMM_LOG
:
2331 convert_mov_to_movebitmask (inst
);
2334 convert_ror_to_extr (inst
);
2340 convert_xtl_to_shll (inst
);
2346 convert_to_real_return
:
2347 aarch64_replace_opcode (inst
, real
);
2350 /* Encode *INST_ORI of the opcode code OPCODE.
2351 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2352 matched operand qualifier sequence in *QLF_SEQ. */
2355 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
2356 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
2357 aarch64_opnd_qualifier_t
*qlf_seq
,
2358 aarch64_operand_error
*mismatch_detail
,
2359 aarch64_instr_sequence
* insn_sequence
)
2362 const aarch64_opcode
*aliased
;
2363 aarch64_inst copy
, *inst
;
2365 DEBUG_TRACE ("enter with %s", opcode
->name
);
2367 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2371 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
2372 if (inst
->opcode
== NULL
)
2373 inst
->opcode
= opcode
;
2375 /* Constrain the operands.
2376 After passing this, the encoding is guaranteed to succeed. */
2377 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2379 DEBUG_TRACE ("FAIL since operand constraint not met");
2383 /* Get the base value.
2384 Note: this has to be before the aliasing handling below in order to
2385 get the base value from the alias opcode before we move on to the
2386 aliased opcode for encoding. */
2387 inst
->value
= opcode
->opcode
;
2389 /* No need to do anything else if the opcode does not have any operand. */
2390 if (aarch64_num_of_operands (opcode
) == 0)
2393 /* Assign operand indexes and check types. Also put the matched
2394 operand qualifiers in *QLF_SEQ to return. */
2395 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2397 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2398 inst
->operands
[i
].idx
= i
;
2399 if (qlf_seq
!= NULL
)
2400 *qlf_seq
= inst
->operands
[i
].qualifier
;
2403 aliased
= aarch64_find_real_opcode (opcode
);
2404 /* If the opcode is an alias and it does not ask for direct encoding by
2405 itself, the instruction will be transformed to the form of real opcode
2406 and the encoding will be carried out using the rules for the aliased
2408 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2410 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2411 aliased
->name
, opcode
->name
);
2412 /* Convert the operands to the form of the real opcode. */
2413 convert_to_real (inst
, aliased
);
2417 aarch64_opnd_info
*info
= inst
->operands
;
2419 /* Call the inserter of each operand. */
2420 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2422 const aarch64_operand
*opnd
;
2423 enum aarch64_opnd type
= opcode
->operands
[i
];
2424 if (type
== AARCH64_OPND_NIL
)
2428 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2431 opnd
= &aarch64_operands
[type
];
2432 if (operand_has_inserter (opnd
)
2433 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2438 /* Call opcode encoders indicated by flags. */
2439 if (opcode_has_special_coder (opcode
))
2440 do_special_encoding (inst
);
2442 /* Possibly use the instruction class to encode the chosen qualifier
2444 aarch64_encode_variant_using_iclass (inst
);
2446 /* Run a verifier if the instruction has one set. */
2447 if (opcode
->verifier
)
2449 enum err_type result
= opcode
->verifier (inst
, *code
, 0, true,
2450 mismatch_detail
, insn_sequence
);
2462 /* Always run constrain verifiers, this is needed because constrains need to
2463 maintain a global state. Regardless if the instruction has the flag set
2465 enum err_type result
= verify_constraints (inst
, *code
, 0, true,
2466 mismatch_detail
, insn_sequence
);
2479 DEBUG_TRACE ("exit with %s", opcode
->name
);
2481 *code
= inst
->value
;