Add generated source files and fix thinko in aarch64-asm.c
[binutils-gdb.git] / opcodes / aarch64-asm.c
blob4d0b13e4c5aa33b8192a298990f8ce6ca3cd8ae8
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
27 /* Utilities. */
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
57 va_end (va);
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
63 static void
64 insert_all_fields_after (const aarch64_operand *self, unsigned int start,
65 aarch64_insn *code, aarch64_insn value)
67 unsigned int i;
68 enum aarch64_field_kind kind;
70 for (i = ARRAY_SIZE (self->fields); i-- > start; )
71 if (self->fields[i] != FLD_NIL)
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
82 static void
83 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
84 aarch64_insn value)
86 return insert_all_fields_after (self, 0, code, value);
89 /* Operand inserters. */
91 /* Insert nothing. */
92 bool
93 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
94 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
95 aarch64_insn *code ATTRIBUTE_UNUSED,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 return true;
102 /* Insert register number. */
103 bool
104 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
105 aarch64_insn *code,
106 const aarch64_inst *inst ATTRIBUTE_UNUSED,
107 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
109 int val = info->reg.regno - get_operand_specific_data (self);
110 insert_field (self->fields[0], code, val, 0);
111 return true;
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
117 bool
118 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
119 aarch64_insn *code, const aarch64_inst *inst,
120 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
122 /* regno */
123 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
124 /* index and/or type */
125 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
127 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
128 if (info->type == AARCH64_OPND_En
129 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info->idx == 1); /* Vn */
133 aarch64_insn value = info->reglane.index << pos;
134 insert_field (FLD_imm4_11, code, value, 0);
136 else
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
139 imm5<3:0> <V>
140 0000 RESERVED
141 xxx1 B
142 xx10 H
143 x100 S
144 1000 D */
145 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
146 insert_field (FLD_imm5, code, value, 0);
149 else if (inst->opcode->iclass == dotproduct)
151 unsigned reglane_index = info->reglane.index;
152 switch (info->qualifier)
154 case AARCH64_OPND_QLF_S_4B:
155 case AARCH64_OPND_QLF_S_2H:
156 /* L:H */
157 assert (reglane_index < 4);
158 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 break;
160 default:
161 return false;
164 else if (inst->opcode->iclass == cryptosm3)
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index = info->reglane.index;
168 assert (reglane_index < 4);
169 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
171 else
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index = info->reglane.index;
177 if (inst->opcode->op == OP_FCMLA_ELEM)
178 /* Complex operand takes two elements. */
179 reglane_index *= 2;
181 switch (info->qualifier)
183 case AARCH64_OPND_QLF_S_H:
184 /* H:L:M */
185 assert (reglane_index < 8);
186 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
187 break;
188 case AARCH64_OPND_QLF_S_S:
189 /* H:L */
190 assert (reglane_index < 4);
191 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
192 break;
193 case AARCH64_OPND_QLF_S_D:
194 /* H */
195 assert (reglane_index < 2);
196 insert_field (FLD_H, code, reglane_index, 0);
197 break;
198 default:
199 return false;
202 return true;
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
206 bool
207 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
208 aarch64_insn *code,
209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
212 /* R */
213 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
214 /* len */
215 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
216 return true;
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
221 bool
222 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
223 const aarch64_opnd_info *info, aarch64_insn *code,
224 const aarch64_inst *inst,
225 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
227 aarch64_insn value = 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num = get_opcode_dependent_value (inst->opcode);
231 /* Rt */
232 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
233 /* opcode */
234 switch (num)
236 case 1:
237 switch (info->reglist.num_regs)
239 case 1: value = 0x7; break;
240 case 2: value = 0xa; break;
241 case 3: value = 0x6; break;
242 case 4: value = 0x2; break;
243 default: return false;
245 break;
246 case 2:
247 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
248 break;
249 case 3:
250 value = 0x4;
251 break;
252 case 4:
253 value = 0x0;
254 break;
255 default:
256 return false;
258 insert_field (FLD_opcode, code, value, 0);
260 return true;
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
265 bool
266 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
267 const aarch64_opnd_info *info, aarch64_insn *code,
268 const aarch64_inst *inst,
269 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 aarch64_insn value;
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
276 /* Rt */
277 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
278 /* S */
279 value = (aarch64_insn) 0;
280 if (is_ld1r && info->reglist.num_regs == 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
282 instead. */
283 value = (aarch64_insn) 1;
284 insert_field (FLD_S, code, value, 0);
286 return true;
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
291 bool
292 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
293 const aarch64_opnd_info *info, aarch64_insn *code,
294 const aarch64_inst *inst ATTRIBUTE_UNUSED,
295 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
297 aarch64_field field = {0, 0};
298 aarch64_insn QSsize = 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
301 assert (info->reglist.has_index);
303 /* Rt */
304 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info->qualifier)
308 case AARCH64_OPND_QLF_S_B:
309 /* Index encoded in "Q:S:size". */
310 QSsize = info->reglist.index;
311 opcodeh2 = 0x0;
312 break;
313 case AARCH64_OPND_QLF_S_H:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize = info->reglist.index << 1;
316 opcodeh2 = 0x1;
317 break;
318 case AARCH64_OPND_QLF_S_S:
319 /* Index encoded in "Q:S". */
320 QSsize = info->reglist.index << 2;
321 opcodeh2 = 0x2;
322 break;
323 case AARCH64_OPND_QLF_S_D:
324 /* Index encoded in "Q". */
325 QSsize = info->reglist.index << 3 | 0x1;
326 opcodeh2 = 0x2;
327 break;
328 default:
329 return false;
331 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
332 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
333 insert_field_2 (&field, code, opcodeh2, 0);
335 return true;
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
341 bool
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
343 const aarch64_opnd_info *info,
344 aarch64_insn *code, const aarch64_inst *inst,
345 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
347 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
348 aarch64_insn Q, imm;
350 if (inst->opcode->iclass == asimdshf)
352 /* Q
353 immh Q <T>
354 0000 x SEE AdvSIMD modified immediate
355 0001 0 8B
356 0001 1 16B
357 001x 0 4H
358 001x 1 8H
359 01xx 0 2S
360 01xx 1 4S
361 1xxx 0 RESERVED
362 1xxx 1 2D */
363 Q = (val & 0x1) ? 1 : 0;
364 insert_field (FLD_Q, code, Q, inst->opcode->mask);
365 val >>= 1;
368 assert (info->type == AARCH64_OPND_IMM_VLSR
369 || info->type == AARCH64_OPND_IMM_VLSL);
371 if (info->type == AARCH64_OPND_IMM_VLSR)
372 /* immh:immb
373 immh <shift>
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm = (16 << (unsigned)val) - info->imm.value;
380 else
381 /* immh:immb
382 immh <shift>
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm = info->imm.value + (8 << (unsigned)val);
389 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
391 return true;
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
396 bool
397 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
398 aarch64_insn *code,
399 const aarch64_inst *inst ATTRIBUTE_UNUSED,
400 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
402 int64_t imm;
404 imm = info->imm.value;
405 if (operand_need_shift_by_two (self))
406 imm >>= 2;
407 if (operand_need_shift_by_three (self))
408 imm >>= 3;
409 if (operand_need_shift_by_four (self))
410 imm >>= 4;
411 insert_all_fields (self, code, imm);
412 return true;
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
417 bool
418 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
419 aarch64_insn *code, const aarch64_inst *inst,
420 aarch64_operand_error *errors)
422 /* imm16 */
423 aarch64_ins_imm (self, info, code, inst, errors);
424 /* hw */
425 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
426 return true;
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
431 bool
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
433 const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED,
436 aarch64_operand_error *errors
437 ATTRIBUTE_UNUSED)
439 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
440 uint64_t imm = info->imm.value;
441 enum aarch64_modifier_kind kind = info->shifter.kind;
442 int amount = info->shifter.amount;
443 aarch64_field field = {0, 0};
445 /* a:b:c:d:e:f:g:h */
446 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm = aarch64_shrink_expanded_imm8 (imm);
454 assert ((int)imm >= 0);
456 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
458 if (kind == AARCH64_MOD_NONE)
459 return true;
461 /* shift amount partially in cmode */
462 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
463 if (kind == AARCH64_MOD_LSL)
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
467 assert (esize == 4 || esize == 2 || esize == 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
469 encoding. */
470 if (esize == 1)
471 return true;
472 amount >>= 3;
473 if (esize == 4)
474 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
475 else
476 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
478 else
480 /* AARCH64_MOD_MSL: shift ones. */
481 amount >>= 4;
482 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
484 insert_field_2 (&field, code, amount, 0);
486 return true;
489 /* Insert fields for an 8-bit floating-point immediate. */
490 bool
491 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
492 aarch64_insn *code,
493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
496 insert_all_fields (self, code, info->imm.value);
497 return true;
500 /* Insert 1-bit rotation immediate (#90 or #270). */
501 bool
502 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
507 uint64_t rot = (info->imm.value - 90) / 180;
508 assert (rot < 2U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
514 bool
515 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
516 const aarch64_opnd_info *info,
517 aarch64_insn *code, const aarch64_inst *inst,
518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 uint64_t rot = info->imm.value / 90;
521 assert (rot < 4U);
522 insert_field (self->fields[0], code, rot, inst->opcode->mask);
523 return true;
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
528 bool
529 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
530 aarch64_insn *code,
531 const aarch64_inst *inst ATTRIBUTE_UNUSED,
532 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
534 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
535 return true;
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
540 bool
541 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
542 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
543 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
545 /* shift */
546 aarch64_insn value = info->shifter.amount ? 1 : 0;
547 insert_field (self->fields[0], code, value, 0);
548 /* imm12 (unsigned) */
549 insert_field (self->fields[1], code, info->imm.value, 0);
550 return true;
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
555 static bool
556 aarch64_ins_limm_1 (const aarch64_operand *self,
557 const aarch64_opnd_info *info, aarch64_insn *code,
558 const aarch64_inst *inst, bool invert_p,
559 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
561 bool res;
562 aarch64_insn value;
563 uint64_t imm = info->imm.value;
564 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
566 if (invert_p)
567 imm = ~imm;
568 /* The constraint check should guarantee that this will work. */
569 res = aarch64_logical_immediate_p (imm, esize, &value);
570 if (res)
571 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
572 self->fields[0]);
573 return res;
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
578 bool
579 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
580 aarch64_insn *code, const aarch64_inst *inst,
581 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
583 return aarch64_ins_limm_1 (self, info, code, inst,
584 inst->opcode->op == OP_BIC, errors);
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
588 bool
589 aarch64_ins_inv_limm (const aarch64_operand *self,
590 const aarch64_opnd_info *info, aarch64_insn *code,
591 const aarch64_inst *inst,
592 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
594 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
599 bool
600 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
601 aarch64_insn *code, const aarch64_inst *inst,
602 aarch64_operand_error *errors)
604 aarch64_insn value = 0;
606 assert (info->idx == 0);
608 /* Rt */
609 aarch64_ins_regno (self, info, code, inst, errors);
610 if (inst->opcode->iclass == ldstpair_indexed
611 || inst->opcode->iclass == ldstnapair_offs
612 || inst->opcode->iclass == ldstpair_off
613 || inst->opcode->iclass == loadlit)
615 /* size */
616 switch (info->qualifier)
618 case AARCH64_OPND_QLF_S_S: value = 0; break;
619 case AARCH64_OPND_QLF_S_D: value = 1; break;
620 case AARCH64_OPND_QLF_S_Q: value = 2; break;
621 default: return false;
623 insert_field (FLD_ldst_size, code, value, 0);
625 else
627 /* opc[1]:size */
628 value = aarch64_get_qualifier_standard_value (info->qualifier);
629 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
632 return true;
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
636 bool
637 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
642 /* Rn */
643 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
644 return true;
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
649 bool
650 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
651 const aarch64_opnd_info *info, aarch64_insn *code,
652 const aarch64_inst *inst ATTRIBUTE_UNUSED,
653 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
655 aarch64_insn S;
656 enum aarch64_modifier_kind kind = info->shifter.kind;
658 /* Rn */
659 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
660 /* Rm */
661 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
662 /* option */
663 if (kind == AARCH64_MOD_LSL)
664 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
665 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
666 /* S */
667 if (info->qualifier != AARCH64_OPND_QLF_S_B)
668 S = info->shifter.amount != 0;
669 else
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
671 S <amount>
672 0 [absent]
673 1 #0
674 Must be #0 if <extend> is explicitly LSL. */
675 S = info->shifter.operator_present && info->shifter.amount_present;
676 insert_field (FLD_S, code, S, 0);
678 return true;
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
683 bool
684 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
685 const aarch64_opnd_info *info, aarch64_insn *code,
686 const aarch64_inst *inst ATTRIBUTE_UNUSED,
687 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
689 /* Rn */
690 insert_field (self->fields[0], code, info->addr.base_regno, 0);
692 /* simm9 */
693 int imm = info->addr.offset.imm;
694 insert_field (self->fields[1], code, imm, 0);
696 /* writeback */
697 if (info->addr.writeback)
699 assert (info->addr.preind == 1 && info->addr.postind == 0);
700 insert_field (self->fields[2], code, 1, 0);
702 return true;
705 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
706 bool
707 aarch64_ins_addr_simm (const aarch64_operand *self,
708 const aarch64_opnd_info *info,
709 aarch64_insn *code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED,
711 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
713 int imm;
715 /* Rn */
716 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
717 /* simm (imm9 or imm7) */
718 imm = info->addr.offset.imm;
719 if (self->fields[0] == FLD_imm7
720 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
721 /* scaled immediate in ld/st pair instructions.. */
722 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
723 insert_field (self->fields[0], code, imm, 0);
724 /* pre/post- index */
725 if (info->addr.writeback)
727 assert (inst->opcode->iclass != ldst_unscaled
728 && inst->opcode->iclass != ldstnapair_offs
729 && inst->opcode->iclass != ldstpair_off
730 && inst->opcode->iclass != ldst_unpriv);
731 assert (info->addr.preind != info->addr.postind);
732 if (info->addr.preind)
733 insert_field (self->fields[1], code, 1, 0);
736 return true;
739 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
740 bool
741 aarch64_ins_addr_simm10 (const aarch64_operand *self,
742 const aarch64_opnd_info *info,
743 aarch64_insn *code,
744 const aarch64_inst *inst ATTRIBUTE_UNUSED,
745 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
747 int imm;
749 /* Rn */
750 insert_field (self->fields[0], code, info->addr.base_regno, 0);
751 /* simm10 */
752 imm = info->addr.offset.imm >> 3;
753 insert_field (self->fields[1], code, imm >> 9, 0);
754 insert_field (self->fields[2], code, imm, 0);
755 /* writeback */
756 if (info->addr.writeback)
758 assert (info->addr.preind == 1 && info->addr.postind == 0);
759 insert_field (self->fields[3], code, 1, 0);
761 return true;
764 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
765 bool
766 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
767 const aarch64_opnd_info *info,
768 aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED,
770 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
772 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
774 /* Rn */
775 insert_field (self->fields[0], code, info->addr.base_regno, 0);
776 /* uimm12 */
777 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
778 return true;
781 /* Encode the address operand for e.g.
782 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
783 bool
784 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
785 const aarch64_opnd_info *info, aarch64_insn *code,
786 const aarch64_inst *inst ATTRIBUTE_UNUSED,
787 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
789 /* Rn */
790 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
791 /* Rm | #<amount> */
792 if (info->addr.offset.is_reg)
793 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
794 else
795 insert_field (FLD_Rm, code, 0x1f, 0);
796 return true;
799 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
800 bool
801 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
802 const aarch64_opnd_info *info, aarch64_insn *code,
803 const aarch64_inst *inst ATTRIBUTE_UNUSED,
804 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
806 /* cond */
807 insert_field (FLD_cond, code, info->cond->value, 0);
808 return true;
811 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
812 bool
813 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
814 const aarch64_opnd_info *info, aarch64_insn *code,
815 const aarch64_inst *inst,
816 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
818 /* If a system instruction check if we have any restrictions on which
819 registers it can use. */
820 if (inst->opcode->iclass == ic_system)
822 uint64_t opcode_flags
823 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
824 uint32_t sysreg_flags
825 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
827 /* Check to see if it's read-only, else check if it's write only.
828 if it's both or unspecified don't care. */
829 if (opcode_flags == F_SYS_READ
830 && sysreg_flags
831 && sysreg_flags != F_REG_READ)
833 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
834 detail->error = _("specified register cannot be read from");
835 detail->index = info->idx;
836 detail->non_fatal = true;
838 else if (opcode_flags == F_SYS_WRITE
839 && sysreg_flags
840 && sysreg_flags != F_REG_WRITE)
842 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
843 detail->error = _("specified register cannot be written to");
844 detail->index = info->idx;
845 detail->non_fatal = true;
848 /* op0:op1:CRn:CRm:op2 */
849 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
850 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
851 return true;
854 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
855 bool
856 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
857 const aarch64_opnd_info *info, aarch64_insn *code,
858 const aarch64_inst *inst ATTRIBUTE_UNUSED,
859 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
861 /* op1:op2 */
862 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
863 FLD_op2, FLD_op1);
865 /* Extra CRm mask. */
866 if (info->sysreg.flags | F_REG_IN_CRM)
867 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
868 return true;
871 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
872 bool
873 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
874 const aarch64_opnd_info *info, aarch64_insn *code,
875 const aarch64_inst *inst ATTRIBUTE_UNUSED,
876 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
878 /* op1:CRn:CRm:op2 */
879 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
880 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
881 return true;
884 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
886 bool
887 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 const aarch64_opnd_info *info, aarch64_insn *code,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED,
890 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
892 /* CRm */
893 insert_field (FLD_CRm, code, info->barrier->value, 0);
894 return true;
897 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
899 bool
900 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
901 const aarch64_opnd_info *info, aarch64_insn *code,
902 const aarch64_inst *inst ATTRIBUTE_UNUSED,
903 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
905 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
906 encoded in CRm<3:2>. */
907 aarch64_insn value = (info->barrier->value >> 2) - 4;
908 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
909 return true;
912 /* Encode the prefetch operation option operand for e.g.
913 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
915 bool
916 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
917 const aarch64_opnd_info *info, aarch64_insn *code,
918 const aarch64_inst *inst ATTRIBUTE_UNUSED,
919 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
921 /* prfop in Rt */
922 insert_field (FLD_Rt, code, info->prfop->value, 0);
923 return true;
926 /* Encode the hint number for instructions that alias HINT but take an
927 operand. */
929 bool
930 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
931 const aarch64_opnd_info *info, aarch64_insn *code,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED,
933 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
935 /* CRm:op2. */
936 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
937 return true;
940 /* Encode the extended register operand for e.g.
941 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
942 bool
943 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
944 const aarch64_opnd_info *info, aarch64_insn *code,
945 const aarch64_inst *inst ATTRIBUTE_UNUSED,
946 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
948 enum aarch64_modifier_kind kind;
950 /* Rm */
951 insert_field (FLD_Rm, code, info->reg.regno, 0);
952 /* option */
953 kind = info->shifter.kind;
954 if (kind == AARCH64_MOD_LSL)
955 kind = info->qualifier == AARCH64_OPND_QLF_W
956 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
957 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
958 /* imm3 */
959 insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
961 return true;
964 /* Encode the shifted register operand for e.g.
965 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
966 bool
967 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
968 const aarch64_opnd_info *info, aarch64_insn *code,
969 const aarch64_inst *inst ATTRIBUTE_UNUSED,
970 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
972 /* Rm */
973 insert_field (FLD_Rm, code, info->reg.regno, 0);
974 /* shift */
975 insert_field (FLD_shift, code,
976 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
977 /* imm6 */
978 insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
980 return true;
983 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
984 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
985 SELF's operand-dependent value. fields[0] specifies the field that
986 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
987 bool
988 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
989 const aarch64_opnd_info *info,
990 aarch64_insn *code,
991 const aarch64_inst *inst ATTRIBUTE_UNUSED,
992 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
994 int factor = 1 + get_operand_specific_data (self);
995 insert_field (self->fields[0], code, info->addr.base_regno, 0);
996 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
997 return true;
1000 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1001 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1002 SELF's operand-dependent value. fields[0] specifies the field that
1003 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1004 bool
1005 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
1006 const aarch64_opnd_info *info,
1007 aarch64_insn *code,
1008 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1009 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 int factor = 1 + get_operand_specific_data (self);
1012 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1013 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1014 return true;
1017 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1018 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1019 SELF's operand-dependent value. fields[0] specifies the field that
1020 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1021 and imm3 fields, with imm3 being the less-significant part. */
1022 bool
1023 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1024 const aarch64_opnd_info *info,
1025 aarch64_insn *code,
1026 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1027 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1029 int factor = 1 + get_operand_specific_data (self);
1030 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1031 insert_fields (code, info->addr.offset.imm / factor, 0,
1032 2, FLD_imm3_10, FLD_SVE_imm6);
1033 return true;
1036 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1037 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1038 value. fields[0] specifies the base register field. */
1039 bool
1040 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1041 const aarch64_opnd_info *info, aarch64_insn *code,
1042 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1045 int factor = 1 << get_operand_specific_data (self);
1046 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1047 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1048 return true;
1051 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1052 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1053 value. fields[0] specifies the base register field. */
1054 bool
1055 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1056 const aarch64_opnd_info *info, aarch64_insn *code,
1057 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1058 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1060 int factor = 1 << get_operand_specific_data (self);
1061 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1062 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1063 return true;
1066 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1067 is SELF's operand-dependent value. fields[0] specifies the base
1068 register field and fields[1] specifies the offset register field. */
1069 bool
1070 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1071 const aarch64_opnd_info *info, aarch64_insn *code,
1072 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1073 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1075 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1076 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1077 return true;
1080 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1081 <shift> is SELF's operand-dependent value. fields[0] specifies the
1082 base register field, fields[1] specifies the offset register field and
1083 fields[2] is a single-bit field that selects SXTW over UXTW. */
1084 bool
1085 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1086 const aarch64_opnd_info *info, aarch64_insn *code,
1087 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1088 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1090 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1091 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1092 if (info->shifter.kind == AARCH64_MOD_UXTW)
1093 insert_field (self->fields[2], code, 0, 0);
1094 else
1095 insert_field (self->fields[2], code, 1, 0);
1096 return true;
1099 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1100 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1101 fields[0] specifies the base register field. */
1102 bool
1103 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1104 const aarch64_opnd_info *info, aarch64_insn *code,
1105 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1106 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1108 int factor = 1 << get_operand_specific_data (self);
1109 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1110 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1111 return true;
1114 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1115 where <modifier> is fixed by the instruction and where <msz> is a
1116 2-bit unsigned number. fields[0] specifies the base register field
1117 and fields[1] specifies the offset register field. */
1118 static bool
1119 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1120 const aarch64_opnd_info *info, aarch64_insn *code,
1121 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1123 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1124 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1125 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1126 return true;
1129 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1130 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1131 field and fields[1] specifies the offset register field. */
1132 bool
1133 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1134 const aarch64_opnd_info *info, aarch64_insn *code,
1135 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1136 aarch64_operand_error *errors)
1138 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1144 bool
1145 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1146 const aarch64_opnd_info *info,
1147 aarch64_insn *code,
1148 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors)
1151 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1154 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1155 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1156 field and fields[1] specifies the offset register field. */
1157 bool
1158 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1159 const aarch64_opnd_info *info,
1160 aarch64_insn *code,
1161 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1162 aarch64_operand_error *errors)
1164 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1167 /* Encode an SVE ADD/SUB immediate. */
1168 bool
1169 aarch64_ins_sve_aimm (const aarch64_operand *self,
1170 const aarch64_opnd_info *info, aarch64_insn *code,
1171 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1172 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1174 if (info->shifter.amount == 8)
1175 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1176 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1177 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1178 else
1179 insert_all_fields (self, code, info->imm.value & 0xff);
1180 return true;
1183 bool
1184 aarch64_ins_sve_aligned_reglist (const aarch64_operand *self,
1185 const aarch64_opnd_info *info,
1186 aarch64_insn *code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 unsigned int num_regs = get_operand_specific_data (self);
1191 unsigned int val = info->reglist.first_regno;
1192 insert_field (self->fields[0], code, val / num_regs, 0);
1193 return true;
1196 /* Encode an SVE CPY/DUP immediate. */
1197 bool
1198 aarch64_ins_sve_asimm (const aarch64_operand *self,
1199 const aarch64_opnd_info *info, aarch64_insn *code,
1200 const aarch64_inst *inst,
1201 aarch64_operand_error *errors)
1203 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1206 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1207 array specifies which field to use for Zn. MM is encoded in the
1208 concatenation of imm5 and SVE_tszh, with imm5 being the less
1209 significant part. */
1210 bool
1211 aarch64_ins_sve_index (const aarch64_operand *self,
1212 const aarch64_opnd_info *info, aarch64_insn *code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1216 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1217 insert_field (self->fields[0], code, info->reglane.regno, 0);
1218 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1219 2, FLD_imm5, FLD_SVE_tszh);
1220 return true;
1223 /* Encode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
1224 than the number of elements in 128 bit, which can encode il:tsz. */
1225 bool
1226 aarch64_ins_sve_index_imm (const aarch64_operand *self,
1227 const aarch64_opnd_info *info, aarch64_insn *code,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1229 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1231 insert_field (self->fields[0], code, info->reglane.regno, 0);
1232 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1233 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1234 2, self->fields[1],self->fields[2]);
1235 return true;
1238 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1239 bool
1240 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1241 const aarch64_opnd_info *info, aarch64_insn *code,
1242 const aarch64_inst *inst,
1243 aarch64_operand_error *errors)
1245 return aarch64_ins_limm (self, info, code, inst, errors);
1248 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1249 and where MM occupies the most-significant part. The operand-dependent
1250 value specifies the number of bits in Zn. */
1251 bool
1252 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1253 const aarch64_opnd_info *info, aarch64_insn *code,
1254 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1255 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1257 unsigned int reg_bits = get_operand_specific_data (self);
1258 assert (info->reglane.regno < (1U << reg_bits));
1259 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1260 insert_all_fields (self, code, val);
1261 return true;
1264 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1265 to use for Zn. */
1266 bool
1267 aarch64_ins_sve_reglist (const aarch64_operand *self,
1268 const aarch64_opnd_info *info, aarch64_insn *code,
1269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1272 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1273 return true;
1276 /* Encode a strided register list. The first field holds the top bit
1277 (0 or 16) and the second field holds the lower bits. The stride is
1278 16 divided by the list length. */
1279 bool
1280 aarch64_ins_sve_strided_reglist (const aarch64_operand *self,
1281 const aarch64_opnd_info *info,
1282 aarch64_insn *code,
1283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1284 aarch64_operand_error *errors
1285 ATTRIBUTE_UNUSED)
1287 unsigned int num_regs = get_operand_specific_data (self);
1288 unsigned int mask = 16 | (16 / num_regs - 1);
1289 unsigned int val = info->reglist.first_regno;
1290 assert ((val & mask) == val);
1291 insert_field (self->fields[0], code, val >> 4, 0);
1292 insert_field (self->fields[1], code, val & 15, 0);
1293 return true;
1296 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1297 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1298 field. */
1299 bool
1300 aarch64_ins_sve_scale (const aarch64_operand *self,
1301 const aarch64_opnd_info *info, aarch64_insn *code,
1302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1305 insert_all_fields (self, code, info->imm.value);
1306 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1307 return true;
1310 /* Encode an SVE shift left immediate. */
1311 bool
1312 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1313 const aarch64_opnd_info *info, aarch64_insn *code,
1314 const aarch64_inst *inst,
1315 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1317 const aarch64_opnd_info *prev_operand;
1318 unsigned int esize;
1320 assert (info->idx > 0);
1321 prev_operand = &inst->operands[info->idx - 1];
1322 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1323 insert_all_fields (self, code, 8 * esize + info->imm.value);
1324 return true;
1327 /* Encode an SVE shift right immediate. */
1328 bool
1329 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1330 const aarch64_opnd_info *info, aarch64_insn *code,
1331 const aarch64_inst *inst,
1332 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1334 const aarch64_opnd_info *prev_operand;
1335 unsigned int esize;
1337 unsigned int opnd_backshift = get_operand_specific_data (self);
1338 assert (info->idx >= (int)opnd_backshift);
1339 prev_operand = &inst->operands[info->idx - opnd_backshift];
1340 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1341 insert_all_fields (self, code, 16 * esize - info->imm.value);
1342 return true;
1345 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1346 The fields array specifies which field to use. */
1347 bool
1348 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1349 const aarch64_opnd_info *info,
1350 aarch64_insn *code,
1351 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1352 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1354 if (info->imm.value == 0x3f000000)
1355 insert_field (self->fields[0], code, 0, 0);
1356 else
1357 insert_field (self->fields[0], code, 1, 0);
1358 return true;
1361 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1362 The fields array specifies which field to use. */
1363 bool
1364 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1365 const aarch64_opnd_info *info,
1366 aarch64_insn *code,
1367 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1368 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1370 if (info->imm.value == 0x3f000000)
1371 insert_field (self->fields[0], code, 0, 0);
1372 else
1373 insert_field (self->fields[0], code, 1, 0);
1374 return true;
1377 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1378 The fields array specifies which field to use. */
1379 bool
1380 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1381 const aarch64_opnd_info *info,
1382 aarch64_insn *code,
1383 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1384 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1386 if (info->imm.value == 0)
1387 insert_field (self->fields[0], code, 0, 0);
1388 else
1389 insert_field (self->fields[0], code, 1, 0);
1390 return true;
1393 bool
1394 aarch64_ins_sme_za_vrs1 (const aarch64_operand *self,
1395 const aarch64_opnd_info *info,
1396 aarch64_insn *code,
1397 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1398 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1400 int za_reg = info->indexed_za.regno;
1401 int regno = info->indexed_za.index.regno & 3;
1402 int imm = info->indexed_za.index.imm;
1403 int v = info->indexed_za.v;
1404 int countm1 = info->indexed_za.index.countm1;
1406 insert_field (self->fields[0], code, v, 0);
1407 insert_field (self->fields[1], code, regno, 0);
1408 switch (info->qualifier)
1410 case AARCH64_OPND_QLF_S_B:
1411 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1412 break;
1413 case AARCH64_OPND_QLF_S_H:
1414 case AARCH64_OPND_QLF_S_S:
1415 insert_field (self->fields[2], code, za_reg, 0);
1416 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1417 break;
1418 case AARCH64_OPND_QLF_S_D:
1419 insert_field (self->fields[2], code, za_reg, 0);
1420 break;
1421 default:
1422 return false;
1425 return true;
1428 bool
1429 aarch64_ins_sme_za_vrs2 (const aarch64_operand *self,
1430 const aarch64_opnd_info *info,
1431 aarch64_insn *code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1433 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1435 int za_reg = info->indexed_za.regno;
1436 int regno = info->indexed_za.index.regno & 3;
1437 int imm = info->indexed_za.index.imm;
1438 int v = info->indexed_za.v;
1439 int countm1 = info->indexed_za.index.countm1;
1441 insert_field (self->fields[0], code, v, 0);
1442 insert_field (self->fields[1], code, regno, 0);
1443 switch (info->qualifier)
1445 case AARCH64_OPND_QLF_S_B:
1446 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1447 break;
1448 case AARCH64_OPND_QLF_S_H:
1449 insert_field (self->fields[2], code, za_reg, 0);
1450 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1451 break;
1452 case AARCH64_OPND_QLF_S_S:
1453 case AARCH64_OPND_QLF_S_D:
1454 insert_field (self->fields[2], code, za_reg, 0);
1455 break;
1456 default:
1457 return false;
1460 return true;
1463 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1464 vector indicator, vector selector and immediate. */
1465 bool
1466 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1467 const aarch64_opnd_info *info,
1468 aarch64_insn *code,
1469 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1470 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1472 int fld_size;
1473 int fld_q;
1474 int fld_v = info->indexed_za.v;
1475 int fld_rv = info->indexed_za.index.regno - 12;
1476 int fld_zan_imm = info->indexed_za.index.imm;
1477 int regno = info->indexed_za.regno;
1479 switch (info->qualifier)
1481 case AARCH64_OPND_QLF_S_B:
1482 fld_size = 0;
1483 fld_q = 0;
1484 break;
1485 case AARCH64_OPND_QLF_S_H:
1486 fld_size = 1;
1487 fld_q = 0;
1488 fld_zan_imm |= regno << 3;
1489 break;
1490 case AARCH64_OPND_QLF_S_S:
1491 fld_size = 2;
1492 fld_q = 0;
1493 fld_zan_imm |= regno << 2;
1494 break;
1495 case AARCH64_OPND_QLF_S_D:
1496 fld_size = 3;
1497 fld_q = 0;
1498 fld_zan_imm |= regno << 1;
1499 break;
1500 case AARCH64_OPND_QLF_S_Q:
1501 fld_size = 3;
1502 fld_q = 1;
1503 fld_zan_imm = regno;
1504 break;
1505 default:
1506 return false;
1509 insert_field (self->fields[0], code, fld_size, 0);
1510 insert_field (self->fields[1], code, fld_q, 0);
1511 insert_field (self->fields[2], code, fld_v, 0);
1512 insert_field (self->fields[3], code, fld_rv, 0);
1513 insert_field (self->fields[4], code, fld_zan_imm, 0);
1515 return true;
1518 bool
1519 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand *self,
1520 const aarch64_opnd_info *info,
1521 aarch64_insn *code,
1522 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1523 aarch64_operand_error *errors
1524 ATTRIBUTE_UNUSED)
1526 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1527 int range_size = get_opcode_dependent_value (inst->opcode);
1528 int fld_v = info->indexed_za.v;
1529 int fld_rv = info->indexed_za.index.regno - 12;
1530 int imm = info->indexed_za.index.imm;
1531 int max_value = 16 / range_size / ebytes;
1533 if (max_value == 0)
1534 max_value = 1;
1536 assert (imm % range_size == 0 && (imm / range_size) < max_value);
1537 int fld_zan_imm = (info->indexed_za.regno * max_value) | (imm / range_size);
1538 assert (fld_zan_imm < (range_size == 4 && ebytes < 8 ? 4 : 8));
1540 insert_field (self->fields[0], code, fld_v, 0);
1541 insert_field (self->fields[1], code, fld_rv, 0);
1542 insert_field (self->fields[2], code, fld_zan_imm, 0);
1544 return true;
1547 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1548 separated by commas, encoded in the "imm8" field.
1550 For programmer convenience an assembler must also accept the names of
1551 32-bit, 16-bit and 8-bit element tiles which are converted into the
1552 corresponding set of 64-bit element tiles.
1554 bool
1555 aarch64_ins_sme_za_list (const aarch64_operand *self,
1556 const aarch64_opnd_info *info,
1557 aarch64_insn *code,
1558 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1559 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1561 int fld_mask = info->imm.value;
1562 insert_field (self->fields[0], code, fld_mask, 0);
1563 return true;
1566 bool
1567 aarch64_ins_sme_za_array (const aarch64_operand *self,
1568 const aarch64_opnd_info *info,
1569 aarch64_insn *code,
1570 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1571 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1573 int regno = info->indexed_za.index.regno & 3;
1574 int imm = info->indexed_za.index.imm;
1575 int countm1 = info->indexed_za.index.countm1;
1576 assert (imm % (countm1 + 1) == 0);
1577 insert_field (self->fields[0], code, regno, 0);
1578 insert_field (self->fields[1], code, imm / (countm1 + 1), 0);
1579 return true;
1582 bool
1583 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1584 const aarch64_opnd_info *info,
1585 aarch64_insn *code,
1586 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1587 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1589 int regno = info->addr.base_regno;
1590 int imm = info->addr.offset.imm;
1591 insert_field (self->fields[0], code, regno, 0);
1592 insert_field (self->fields[1], code, imm, 0);
1593 return true;
1596 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1597 bool
1598 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1599 const aarch64_opnd_info *info,
1600 aarch64_insn *code,
1601 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1602 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1604 aarch64_insn fld_crm;
1605 /* Set CRm[3:1] bits. */
1606 if (info->reg.regno == 's')
1607 fld_crm = 0x02 ; /* SVCRSM. */
1608 else if (info->reg.regno == 'z')
1609 fld_crm = 0x04; /* SVCRZA. */
1610 else
1611 return false;
1613 insert_field (self->fields[0], code, fld_crm, 0);
1614 return true;
1617 /* Encode source scalable predicate register (Pn), name of the index base
1618 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1619 range 0 to one less than the number of vector elements in a 128-bit vector
1620 register, encoded in "i1:tszh:tszl".
1622 bool
1623 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1624 const aarch64_opnd_info *info,
1625 aarch64_insn *code,
1626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1629 int fld_pn = info->indexed_za.regno;
1630 int fld_rm = info->indexed_za.index.regno - 12;
1631 int imm = info->indexed_za.index.imm;
1632 int fld_i1, fld_tszh, fld_tshl;
1634 insert_field (self->fields[0], code, fld_rm, 0);
1635 insert_field (self->fields[1], code, fld_pn, 0);
1637 /* Optional element index, defaulting to 0, in the range 0 to one less than
1638 the number of vector elements in a 128-bit vector register, encoded in
1639 "i1:tszh:tszl".
1641 i1 tszh tszl <T>
1642 0 0 000 RESERVED
1643 x x xx1 B
1644 x x x10 H
1645 x x 100 S
1646 x 1 000 D
1648 switch (info->qualifier)
1650 case AARCH64_OPND_QLF_S_B:
1651 /* <imm> is 4 bit value. */
1652 fld_i1 = (imm >> 3) & 0x1;
1653 fld_tszh = (imm >> 2) & 0x1;
1654 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1655 break;
1656 case AARCH64_OPND_QLF_S_H:
1657 /* <imm> is 3 bit value. */
1658 fld_i1 = (imm >> 2) & 0x1;
1659 fld_tszh = (imm >> 1) & 0x1;
1660 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1661 break;
1662 case AARCH64_OPND_QLF_S_S:
1663 /* <imm> is 2 bit value. */
1664 fld_i1 = (imm >> 1) & 0x1;
1665 fld_tszh = imm & 0x1;
1666 fld_tshl = 0x4;
1667 break;
1668 case AARCH64_OPND_QLF_S_D:
1669 /* <imm> is 1 bit value. */
1670 fld_i1 = imm & 0x1;
1671 fld_tszh = 0x1;
1672 fld_tshl = 0x0;
1673 break;
1674 default:
1675 return false;
1678 insert_field (self->fields[2], code, fld_i1, 0);
1679 insert_field (self->fields[3], code, fld_tszh, 0);
1680 insert_field (self->fields[4], code, fld_tshl, 0);
1681 return true;
1684 /* Insert X0-X30. Register 31 is unallocated. */
1685 bool
1686 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1687 const aarch64_opnd_info *info,
1688 aarch64_insn *code,
1689 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1690 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1692 assert (info->reg.regno <= 30);
1693 insert_field (self->fields[0], code, info->reg.regno, 0);
1694 return true;
1697 /* Insert an indexed register, with the first field being the register
1698 number and the remaining fields being the index. */
1699 bool
1700 aarch64_ins_simple_index (const aarch64_operand *self,
1701 const aarch64_opnd_info *info,
1702 aarch64_insn *code,
1703 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1704 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1706 int bias = get_operand_specific_data (self);
1707 insert_field (self->fields[0], code, info->reglane.regno - bias, 0);
1708 insert_all_fields_after (self, 1, code, info->reglane.index);
1709 return true;
1712 /* Insert a plain shift-right immediate, when there is only a single
1713 element size. */
1714 bool
1715 aarch64_ins_plain_shrimm (const aarch64_operand *self,
1716 const aarch64_opnd_info *info, aarch64_insn *code,
1717 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1718 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1720 unsigned int base = 1 << get_operand_field_width (self, 0);
1721 insert_field (self->fields[0], code, base - info->imm.value, 0);
1722 return true;
1725 /* Miscellaneous encoding functions. */
1727 /* Encode size[0], i.e. bit 22, for
1728 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1730 static void
1731 encode_asimd_fcvt (aarch64_inst *inst)
1733 aarch64_insn value;
1734 aarch64_field field = {0, 0};
1735 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1737 switch (inst->opcode->op)
1739 case OP_FCVTN:
1740 case OP_FCVTN2:
1741 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1742 qualifier = inst->operands[1].qualifier;
1743 break;
1744 case OP_FCVTL:
1745 case OP_FCVTL2:
1746 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1747 qualifier = inst->operands[0].qualifier;
1748 break;
1749 default:
1750 return;
1752 assert (qualifier == AARCH64_OPND_QLF_V_4S
1753 || qualifier == AARCH64_OPND_QLF_V_2D);
1754 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1755 gen_sub_field (FLD_size, 0, 1, &field);
1756 insert_field_2 (&field, &inst->value, value, 0);
1759 /* Encode size[0], i.e. bit 22, for
1760 e.g. FCVTXN <Vb><d>, <Va><n>. */
1762 static void
1763 encode_asisd_fcvtxn (aarch64_inst *inst)
1765 aarch64_insn val = 1;
1766 aarch64_field field = {0, 0};
1767 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1768 gen_sub_field (FLD_size, 0, 1, &field);
1769 insert_field_2 (&field, &inst->value, val, 0);
1772 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1773 static void
1774 encode_fcvt (aarch64_inst *inst)
1776 aarch64_insn val;
1777 const aarch64_field field = {15, 2};
1779 /* opc dstsize */
1780 switch (inst->operands[0].qualifier)
1782 case AARCH64_OPND_QLF_S_S: val = 0; break;
1783 case AARCH64_OPND_QLF_S_D: val = 1; break;
1784 case AARCH64_OPND_QLF_S_H: val = 3; break;
1785 default: abort ();
1787 insert_field_2 (&field, &inst->value, val, 0);
1789 return;
1792 /* Return the index in qualifiers_list that INST is using. Should only
1793 be called once the qualifiers are known to be valid. */
1795 static int
1796 aarch64_get_variant (struct aarch64_inst *inst)
1798 int i, nops, variant;
1800 nops = aarch64_num_of_operands (inst->opcode);
1801 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1803 for (i = 0; i < nops; ++i)
1804 if (inst->opcode->qualifiers_list[variant][i]
1805 != inst->operands[i].qualifier)
1806 break;
1807 if (i == nops)
1808 return variant;
1810 abort ();
1813 /* Do miscellaneous encodings that are not common enough to be driven by
1814 flags. */
1816 static void
1817 do_misc_encoding (aarch64_inst *inst)
1819 unsigned int value;
1821 switch (inst->opcode->op)
1823 case OP_FCVT:
1824 encode_fcvt (inst);
1825 break;
1826 case OP_FCVTN:
1827 case OP_FCVTN2:
1828 case OP_FCVTL:
1829 case OP_FCVTL2:
1830 encode_asimd_fcvt (inst);
1831 break;
1832 case OP_FCVTXN_S:
1833 encode_asisd_fcvtxn (inst);
1834 break;
1835 case OP_MOV_P_P:
1836 case OP_MOV_PN_PN:
1837 case OP_MOVS_P_P:
1838 /* Copy Pn to Pm and Pg. */
1839 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1840 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1841 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1842 break;
1843 case OP_MOV_Z_P_Z:
1844 /* Copy Zd to Zm. */
1845 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1846 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1847 break;
1848 case OP_MOV_Z_V:
1849 /* Fill in the zero immediate. */
1850 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1851 2, FLD_imm5, FLD_SVE_tszh);
1852 break;
1853 case OP_MOV_Z_Z:
1854 /* Copy Zn to Zm. */
1855 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1856 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1857 break;
1858 case OP_MOV_Z_Zi:
1859 break;
1860 case OP_MOVM_P_P_P:
1861 /* Copy Pd to Pm. */
1862 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1863 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1864 break;
1865 case OP_MOVZS_P_P_P:
1866 case OP_MOVZ_P_P_P:
1867 /* Copy Pn to Pm. */
1868 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1869 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1870 break;
1871 case OP_NOTS_P_P_P_Z:
1872 case OP_NOT_P_P_P_Z:
1873 /* Copy Pg to Pm. */
1874 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1875 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1876 break;
1877 default: break;
1881 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1882 static void
1883 encode_sizeq (aarch64_inst *inst)
1885 aarch64_insn sizeq;
1886 enum aarch64_field_kind kind;
1887 int idx;
1889 /* Get the index of the operand whose information we are going to use
1890 to encode the size and Q fields.
1891 This is deduced from the possible valid qualifier lists. */
1892 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1893 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1894 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1895 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1896 /* Q */
1897 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1898 /* size */
1899 if (inst->opcode->iclass == asisdlse
1900 || inst->opcode->iclass == asisdlsep
1901 || inst->opcode->iclass == asisdlso
1902 || inst->opcode->iclass == asisdlsop)
1903 kind = FLD_vldst_size;
1904 else
1905 kind = FLD_size;
1906 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1909 /* Opcodes that have fields shared by multiple operands are usually flagged
1910 with flags. In this function, we detect such flags and use the
1911 information in one of the related operands to do the encoding. The 'one'
1912 operand is not any operand but one of the operands that has the enough
1913 information for such an encoding. */
1915 static void
1916 do_special_encoding (struct aarch64_inst *inst)
1918 int idx;
1919 aarch64_insn value = 0;
1921 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1923 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1924 if (inst->opcode->flags & F_COND)
1926 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1928 if (inst->opcode->flags & F_SF)
1930 idx = select_operand_for_sf_field_coding (inst->opcode);
1931 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1932 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1933 ? 1 : 0;
1934 insert_field (FLD_sf, &inst->value, value, 0);
1935 if (inst->opcode->flags & F_N)
1936 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1938 if (inst->opcode->flags & F_LSE_SZ)
1940 idx = select_operand_for_sf_field_coding (inst->opcode);
1941 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1942 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1943 ? 1 : 0;
1944 insert_field (FLD_lse_sz, &inst->value, value, 0);
1946 if (inst->opcode->flags & F_SIZEQ)
1947 encode_sizeq (inst);
1948 if (inst->opcode->flags & F_FPTYPE)
1950 idx = select_operand_for_fptype_field_coding (inst->opcode);
1951 switch (inst->operands[idx].qualifier)
1953 case AARCH64_OPND_QLF_S_S: value = 0; break;
1954 case AARCH64_OPND_QLF_S_D: value = 1; break;
1955 case AARCH64_OPND_QLF_S_H: value = 3; break;
1956 default: return;
1958 insert_field (FLD_type, &inst->value, value, 0);
1960 if (inst->opcode->flags & F_SSIZE)
1962 enum aarch64_opnd_qualifier qualifier;
1963 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1964 qualifier = inst->operands[idx].qualifier;
1965 assert (qualifier >= AARCH64_OPND_QLF_S_B
1966 && qualifier <= AARCH64_OPND_QLF_S_Q);
1967 value = aarch64_get_qualifier_standard_value (qualifier);
1968 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1970 if (inst->opcode->flags & F_T)
1972 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1973 aarch64_field field = {0, 0};
1974 enum aarch64_opnd_qualifier qualifier;
1976 idx = 0;
1977 qualifier = inst->operands[idx].qualifier;
1978 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1979 == AARCH64_OPND_CLASS_SIMD_REG
1980 && qualifier >= AARCH64_OPND_QLF_V_8B
1981 && qualifier <= AARCH64_OPND_QLF_V_2D);
1982 /* imm5<3:0> q <t>
1983 0000 x reserved
1984 xxx1 0 8b
1985 xxx1 1 16b
1986 xx10 0 4h
1987 xx10 1 8h
1988 x100 0 2s
1989 x100 1 4s
1990 1000 0 reserved
1991 1000 1 2d */
1992 value = aarch64_get_qualifier_standard_value (qualifier);
1993 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1994 num = (int) value >> 1;
1995 assert (num >= 0 && num <= 3);
1996 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1997 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
2000 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2002 enum aarch64_opnd_qualifier qualifier[2];
2003 aarch64_insn value1 = 0;
2004 idx = 0;
2005 qualifier[0] = inst->operands[idx].qualifier;
2006 qualifier[1] = inst->operands[idx+2].qualifier;
2007 value = aarch64_get_qualifier_standard_value (qualifier[0]);
2008 value1 = aarch64_get_qualifier_standard_value (qualifier[1]);
2009 assert ((value >> 1) == value1);
2010 insert_field (FLD_size, &inst->value, value1, inst->opcode->mask);
2013 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2015 /* Use Rt to encode in the case of e.g.
2016 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2017 enum aarch64_opnd_qualifier qualifier;
2018 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2019 if (idx == -1)
2020 /* Otherwise use the result operand, which has to be a integer
2021 register. */
2022 idx = 0;
2023 assert (idx == 0 || idx == 1);
2024 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
2025 == AARCH64_OPND_CLASS_INT_REG);
2026 qualifier = inst->operands[idx].qualifier;
2027 insert_field (FLD_Q, &inst->value,
2028 aarch64_get_qualifier_standard_value (qualifier), 0);
2030 if (inst->opcode->flags & F_LDS_SIZE)
2032 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2033 enum aarch64_opnd_qualifier qualifier;
2034 aarch64_field field = {0, 0};
2035 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2036 == AARCH64_OPND_CLASS_INT_REG);
2037 gen_sub_field (FLD_opc, 0, 1, &field);
2038 qualifier = inst->operands[0].qualifier;
2039 insert_field_2 (&field, &inst->value,
2040 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
2042 /* Miscellaneous encoding as the last step. */
2043 if (inst->opcode->flags & F_MISC)
2044 do_misc_encoding (inst);
2046 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
2049 /* Some instructions (including all SVE ones) use the instruction class
2050 to describe how a qualifiers_list index is represented in the instruction
2051 encoding. If INST is such an instruction, encode the chosen qualifier
2052 variant. */
2054 static void
2055 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
2057 int variant = 0;
2058 switch (inst->opcode->iclass)
2060 case sme_mov:
2061 case sme_psel:
2062 /* The variant is encoded as part of the immediate. */
2063 break;
2065 case sme_size_12_bhs:
2066 insert_field (FLD_SME_size_12, &inst->value,
2067 aarch64_get_variant (inst), 0);
2068 break;
2070 case sme_size_22:
2071 insert_field (FLD_SME_size_22, &inst->value,
2072 aarch64_get_variant (inst), 0);
2073 break;
2075 case sme_size_22_hsd:
2076 insert_field (FLD_SME_size_22, &inst->value,
2077 aarch64_get_variant (inst) + 1, 0);
2078 break;
2080 case sme_size_12_hs:
2081 insert_field (FLD_SME_size_12, &inst->value,
2082 aarch64_get_variant (inst) + 1, 0);
2083 break;
2085 case sme_sz_23:
2086 insert_field (FLD_SME_sz_23, &inst->value,
2087 aarch64_get_variant (inst), 0);
2088 break;
2090 case sve_cpy:
2091 insert_fields (&inst->value, aarch64_get_variant (inst),
2092 0, 2, FLD_SVE_M_14, FLD_size);
2093 break;
2095 case sme_shift:
2096 case sve_index:
2097 case sve_index1:
2098 case sve_shift_pred:
2099 case sve_shift_unpred:
2100 case sve_shift_tsz_hsd:
2101 case sve_shift_tsz_bhsd:
2102 /* For indices and shift amounts, the variant is encoded as
2103 part of the immediate. */
2104 break;
2106 case sve_limm:
2107 case sme2_mov:
2108 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2109 and depend on the immediate. They don't have a separate
2110 encoding. */
2111 break;
2113 case sme_misc:
2114 case sme2_movaz:
2115 case sve_misc:
2116 /* These instructions have only a single variant. */
2117 break;
2119 case sve_movprfx:
2120 insert_fields (&inst->value, aarch64_get_variant (inst),
2121 0, 2, FLD_SVE_M_16, FLD_size);
2122 break;
2124 case sve_pred_zm:
2125 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
2126 break;
2128 case sve_size_bhs:
2129 case sve_size_bhsd:
2130 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
2131 break;
2133 case sve_size_hsd:
2134 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2135 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
2136 break;
2138 case sme_fp_sd:
2139 case sme_int_sd:
2140 case sve_size_bh:
2141 case sve_size_sd:
2142 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
2143 break;
2145 case sve_size_sd2:
2146 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
2147 break;
2149 case sve_size_hsd2:
2150 insert_field (FLD_SVE_size, &inst->value,
2151 aarch64_get_variant (inst) + 1, 0);
2152 break;
2154 case sve_size_tsz_bhs:
2155 insert_fields (&inst->value,
2156 (1 << aarch64_get_variant (inst)),
2157 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
2158 break;
2160 case sve_size_13:
2161 variant = aarch64_get_variant (inst) + 1;
2162 if (variant == 2)
2163 variant = 3;
2164 insert_field (FLD_size, &inst->value, variant, 0);
2165 break;
2167 default:
2168 break;
2172 /* Converters converting an alias opcode instruction to its real form. */
2174 /* ROR <Wd>, <Ws>, #<shift>
2175 is equivalent to:
2176 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2177 static void
2178 convert_ror_to_extr (aarch64_inst *inst)
2180 copy_operand_info (inst, 3, 2);
2181 copy_operand_info (inst, 2, 1);
2184 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2185 is equivalent to:
2186 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2187 static void
2188 convert_xtl_to_shll (aarch64_inst *inst)
2190 inst->operands[2].qualifier = inst->operands[1].qualifier;
2191 inst->operands[2].imm.value = 0;
2194 /* Convert
2195 LSR <Xd>, <Xn>, #<shift>
2197 UBFM <Xd>, <Xn>, #<shift>, #63. */
2198 static void
2199 convert_sr_to_bfm (aarch64_inst *inst)
2201 inst->operands[3].imm.value =
2202 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2205 /* Convert MOV to ORR. */
2206 static void
2207 convert_mov_to_orr (aarch64_inst *inst)
2209 /* MOV <Vd>.<T>, <Vn>.<T>
2210 is equivalent to:
2211 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2212 copy_operand_info (inst, 2, 1);
2215 /* When <imms> >= <immr>, the instruction written:
2216 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2217 is equivalent to:
2218 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2220 static void
2221 convert_bfx_to_bfm (aarch64_inst *inst)
2223 int64_t lsb, width;
2225 /* Convert the operand. */
2226 lsb = inst->operands[2].imm.value;
2227 width = inst->operands[3].imm.value;
2228 inst->operands[2].imm.value = lsb;
2229 inst->operands[3].imm.value = lsb + width - 1;
2232 /* When <imms> < <immr>, the instruction written:
2233 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2234 is equivalent to:
2235 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2237 static void
2238 convert_bfi_to_bfm (aarch64_inst *inst)
2240 int64_t lsb, width;
2242 /* Convert the operand. */
2243 lsb = inst->operands[2].imm.value;
2244 width = inst->operands[3].imm.value;
2245 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2247 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2248 inst->operands[3].imm.value = width - 1;
2250 else
2252 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2253 inst->operands[3].imm.value = width - 1;
2257 /* The instruction written:
2258 BFC <Xd>, #<lsb>, #<width>
2259 is equivalent to:
2260 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2262 static void
2263 convert_bfc_to_bfm (aarch64_inst *inst)
2265 int64_t lsb, width;
2267 /* Insert XZR. */
2268 copy_operand_info (inst, 3, 2);
2269 copy_operand_info (inst, 2, 1);
2270 copy_operand_info (inst, 1, 0);
2271 inst->operands[1].reg.regno = 0x1f;
2273 /* Convert the immediate operand. */
2274 lsb = inst->operands[2].imm.value;
2275 width = inst->operands[3].imm.value;
2276 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2278 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2279 inst->operands[3].imm.value = width - 1;
2281 else
2283 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2284 inst->operands[3].imm.value = width - 1;
2288 /* The instruction written:
2289 LSL <Xd>, <Xn>, #<shift>
2290 is equivalent to:
2291 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2293 static void
2294 convert_lsl_to_ubfm (aarch64_inst *inst)
2296 int64_t shift = inst->operands[2].imm.value;
2298 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2300 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2301 inst->operands[3].imm.value = 31 - shift;
2303 else
2305 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2306 inst->operands[3].imm.value = 63 - shift;
2310 /* CINC <Wd>, <Wn>, <cond>
2311 is equivalent to:
2312 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2314 static void
2315 convert_to_csel (aarch64_inst *inst)
2317 copy_operand_info (inst, 3, 2);
2318 copy_operand_info (inst, 2, 1);
2319 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2322 /* CSET <Wd>, <cond>
2323 is equivalent to:
2324 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2326 static void
2327 convert_cset_to_csinc (aarch64_inst *inst)
2329 copy_operand_info (inst, 3, 1);
2330 copy_operand_info (inst, 2, 0);
2331 copy_operand_info (inst, 1, 0);
2332 inst->operands[1].reg.regno = 0x1f;
2333 inst->operands[2].reg.regno = 0x1f;
2334 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2337 /* MOV <Wd>, #<imm>
2338 is equivalent to:
2339 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2341 static void
2342 convert_mov_to_movewide (aarch64_inst *inst)
2344 int is32;
2345 uint32_t shift_amount;
2346 uint64_t value = ~(uint64_t)0;
2348 switch (inst->opcode->op)
2350 case OP_MOV_IMM_WIDE:
2351 value = inst->operands[1].imm.value;
2352 break;
2353 case OP_MOV_IMM_WIDEN:
2354 value = ~inst->operands[1].imm.value;
2355 break;
2356 default:
2357 return;
2359 inst->operands[1].type = AARCH64_OPND_HALF;
2360 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2361 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2362 /* The constraint check should have guaranteed this wouldn't happen. */
2363 return;
2364 value >>= shift_amount;
2365 value &= 0xffff;
2366 inst->operands[1].imm.value = value;
2367 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2368 inst->operands[1].shifter.amount = shift_amount;
2371 /* MOV <Wd>, #<imm>
2372 is equivalent to:
2373 ORR <Wd>, WZR, #<imm>. */
2375 static void
2376 convert_mov_to_movebitmask (aarch64_inst *inst)
2378 copy_operand_info (inst, 2, 1);
2379 inst->operands[1].reg.regno = 0x1f;
2380 inst->operands[1].skip = 0;
2383 /* Some alias opcodes are assembled by being converted to their real-form. */
2385 static void
2386 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2388 const aarch64_opcode *alias = inst->opcode;
2390 if ((alias->flags & F_CONV) == 0)
2391 goto convert_to_real_return;
2393 switch (alias->op)
2395 case OP_ASR_IMM:
2396 case OP_LSR_IMM:
2397 convert_sr_to_bfm (inst);
2398 break;
2399 case OP_LSL_IMM:
2400 convert_lsl_to_ubfm (inst);
2401 break;
2402 case OP_CINC:
2403 case OP_CINV:
2404 case OP_CNEG:
2405 convert_to_csel (inst);
2406 break;
2407 case OP_CSET:
2408 case OP_CSETM:
2409 convert_cset_to_csinc (inst);
2410 break;
2411 case OP_UBFX:
2412 case OP_BFXIL:
2413 case OP_SBFX:
2414 convert_bfx_to_bfm (inst);
2415 break;
2416 case OP_SBFIZ:
2417 case OP_BFI:
2418 case OP_UBFIZ:
2419 convert_bfi_to_bfm (inst);
2420 break;
2421 case OP_BFC:
2422 convert_bfc_to_bfm (inst);
2423 break;
2424 case OP_MOV_V:
2425 convert_mov_to_orr (inst);
2426 break;
2427 case OP_MOV_IMM_WIDE:
2428 case OP_MOV_IMM_WIDEN:
2429 convert_mov_to_movewide (inst);
2430 break;
2431 case OP_MOV_IMM_LOG:
2432 convert_mov_to_movebitmask (inst);
2433 break;
2434 case OP_ROR_IMM:
2435 convert_ror_to_extr (inst);
2436 break;
2437 case OP_SXTL:
2438 case OP_SXTL2:
2439 case OP_UXTL:
2440 case OP_UXTL2:
2441 convert_xtl_to_shll (inst);
2442 break;
2443 default:
2444 break;
2447 convert_to_real_return:
2448 aarch64_replace_opcode (inst, real);
2451 /* Encode *INST_ORI of the opcode code OPCODE.
2452 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2453 matched operand qualifier sequence in *QLF_SEQ. */
2455 bool
2456 aarch64_opcode_encode (const aarch64_opcode *opcode,
2457 const aarch64_inst *inst_ori, aarch64_insn *code,
2458 aarch64_opnd_qualifier_t *qlf_seq,
2459 aarch64_operand_error *mismatch_detail,
2460 aarch64_instr_sequence* insn_sequence)
2462 int i;
2463 const aarch64_opcode *aliased;
2464 aarch64_inst copy, *inst;
2466 DEBUG_TRACE ("enter with %s", opcode->name);
2468 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2469 copy = *inst_ori;
2470 inst = &copy;
2472 assert (inst->opcode == NULL || inst->opcode == opcode);
2473 if (inst->opcode == NULL)
2474 inst->opcode = opcode;
2476 /* Constrain the operands.
2477 After passing this, the encoding is guaranteed to succeed. */
2478 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2480 DEBUG_TRACE ("FAIL since operand constraint not met");
2481 return 0;
2484 /* Get the base value.
2485 Note: this has to be before the aliasing handling below in order to
2486 get the base value from the alias opcode before we move on to the
2487 aliased opcode for encoding. */
2488 inst->value = opcode->opcode;
2490 /* No need to do anything else if the opcode does not have any operand. */
2491 if (aarch64_num_of_operands (opcode) == 0)
2492 goto encoding_exit;
2494 /* Assign operand indexes and check types. Also put the matched
2495 operand qualifiers in *QLF_SEQ to return. */
2496 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2498 assert (opcode->operands[i] == inst->operands[i].type);
2499 inst->operands[i].idx = i;
2500 if (qlf_seq != NULL)
2501 *qlf_seq = inst->operands[i].qualifier;
2504 aliased = aarch64_find_real_opcode (opcode);
2505 /* If the opcode is an alias and it does not ask for direct encoding by
2506 itself, the instruction will be transformed to the form of real opcode
2507 and the encoding will be carried out using the rules for the aliased
2508 opcode. */
2509 if (aliased != NULL && (opcode->flags & F_CONV))
2511 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2512 aliased->name, opcode->name);
2513 /* Convert the operands to the form of the real opcode. */
2514 convert_to_real (inst, aliased);
2515 opcode = aliased;
2518 aarch64_opnd_info *info = inst->operands;
2520 /* Call the inserter of each operand. */
2521 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2523 const aarch64_operand *opnd;
2524 enum aarch64_opnd type = opcode->operands[i];
2525 if (type == AARCH64_OPND_NIL)
2526 break;
2527 if (info->skip)
2529 DEBUG_TRACE ("skip the incomplete operand %d", i);
2530 continue;
2532 opnd = &aarch64_operands[type];
2533 if (operand_has_inserter (opnd)
2534 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2535 mismatch_detail))
2536 return false;
2539 /* Call opcode encoders indicated by flags. */
2540 if (opcode_has_special_coder (opcode))
2541 do_special_encoding (inst);
2543 /* Possibly use the instruction class to encode the chosen qualifier
2544 variant. */
2545 aarch64_encode_variant_using_iclass (inst);
2547 /* Run a verifier if the instruction has one set. */
2548 if (opcode->verifier)
2550 enum err_type result = opcode->verifier (inst, *code, 0, true,
2551 mismatch_detail, insn_sequence);
2552 switch (result)
2554 case ERR_UND:
2555 case ERR_UNP:
2556 case ERR_NYI:
2557 return false;
2558 default:
2559 break;
2563 /* Always run constrain verifiers, this is needed because constrains need to
2564 maintain a global state. Regardless if the instruction has the flag set
2565 or not. */
2566 enum err_type result = verify_constraints (inst, *code, 0, true,
2567 mismatch_detail, insn_sequence);
2568 switch (result)
2570 case ERR_UND:
2571 case ERR_UNP:
2572 case ERR_NYI:
2573 return false;
2574 default:
2575 break;
2579 encoding_exit:
2580 DEBUG_TRACE ("exit with %s", opcode->name);
2582 *code = inst->value;
2584 return true;