Fix: ld: Test case pr28158 fails on x86_64-linux-musl when index is > 19
[binutils-gdb.git] / opcodes / aarch64-asm.c
blob0025cb6f80c7d43980fa68c7318b65faaf35f152
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
27 /* Utilities. */
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
57 va_end (va);
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
63 static void
64 insert_all_fields_after (const aarch64_operand *self, unsigned int start,
65 aarch64_insn *code, aarch64_insn value)
67 unsigned int i;
68 enum aarch64_field_kind kind;
70 for (i = ARRAY_SIZE (self->fields); i-- > start; )
71 if (self->fields[i] != FLD_NIL)
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
82 static void
83 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
84 aarch64_insn value)
86 return insert_all_fields_after (self, 0, code, value);
89 /* Operand inserters. */
91 /* Insert nothing. */
92 bool
93 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
94 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
95 aarch64_insn *code ATTRIBUTE_UNUSED,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 return true;
102 /* Insert register number. */
103 bool
104 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
105 aarch64_insn *code,
106 const aarch64_inst *inst ATTRIBUTE_UNUSED,
107 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
109 int val = info->reg.regno - get_operand_specific_data (self);
110 insert_field (self->fields[0], code, val, 0);
111 return true;
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
117 bool
118 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
119 aarch64_insn *code, const aarch64_inst *inst,
120 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
122 /* regno */
123 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
124 /* index and/or type */
125 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
127 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
128 if (info->type == AARCH64_OPND_En
129 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info->idx == 1); /* Vn */
133 aarch64_insn value = info->reglane.index << pos;
134 insert_field (FLD_imm4_11, code, value, 0);
136 else
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
139 imm5<3:0> <V>
140 0000 RESERVED
141 xxx1 B
142 xx10 H
143 x100 S
144 1000 D */
145 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
146 insert_field (FLD_imm5, code, value, 0);
149 else if (inst->opcode->iclass == dotproduct)
151 unsigned reglane_index = info->reglane.index;
152 switch (info->qualifier)
154 case AARCH64_OPND_QLF_S_4B:
155 case AARCH64_OPND_QLF_S_2H:
156 /* L:H */
157 assert (reglane_index < 4);
158 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 break;
160 default:
161 return false;
164 else if (inst->opcode->iclass == cryptosm3)
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index = info->reglane.index;
168 assert (reglane_index < 4);
169 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
171 else
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index = info->reglane.index;
177 if (inst->opcode->op == OP_FCMLA_ELEM)
178 /* Complex operand takes two elements. */
179 reglane_index *= 2;
181 switch (info->qualifier)
183 case AARCH64_OPND_QLF_S_H:
184 /* H:L:M */
185 assert (reglane_index < 8);
186 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
187 break;
188 case AARCH64_OPND_QLF_S_S:
189 /* H:L */
190 assert (reglane_index < 4);
191 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
192 break;
193 case AARCH64_OPND_QLF_S_D:
194 /* H */
195 assert (reglane_index < 2);
196 insert_field (FLD_H, code, reglane_index, 0);
197 break;
198 default:
199 return false;
202 return true;
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
206 bool
207 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
208 aarch64_insn *code,
209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
212 /* R */
213 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
214 /* len */
215 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
216 return true;
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
221 bool
222 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
223 const aarch64_opnd_info *info, aarch64_insn *code,
224 const aarch64_inst *inst,
225 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
227 aarch64_insn value = 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num = get_opcode_dependent_value (inst->opcode);
231 /* Rt */
232 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
233 /* opcode */
234 switch (num)
236 case 1:
237 switch (info->reglist.num_regs)
239 case 1: value = 0x7; break;
240 case 2: value = 0xa; break;
241 case 3: value = 0x6; break;
242 case 4: value = 0x2; break;
243 default: return false;
245 break;
246 case 2:
247 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
248 break;
249 case 3:
250 value = 0x4;
251 break;
252 case 4:
253 value = 0x0;
254 break;
255 default:
256 return false;
258 insert_field (FLD_opcode, code, value, 0);
260 return true;
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
265 bool
266 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
267 const aarch64_opnd_info *info, aarch64_insn *code,
268 const aarch64_inst *inst,
269 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 aarch64_insn value;
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
276 /* Rt */
277 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
278 /* S */
279 value = (aarch64_insn) 0;
280 if (is_ld1r && info->reglist.num_regs == 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
282 instead. */
283 value = (aarch64_insn) 1;
284 insert_field (FLD_S, code, value, 0);
286 return true;
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
291 bool
292 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
293 const aarch64_opnd_info *info, aarch64_insn *code,
294 const aarch64_inst *inst ATTRIBUTE_UNUSED,
295 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
297 aarch64_field field = {0, 0};
298 aarch64_insn QSsize = 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
301 assert (info->reglist.has_index);
303 /* Rt */
304 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info->qualifier)
308 case AARCH64_OPND_QLF_S_B:
309 /* Index encoded in "Q:S:size". */
310 QSsize = info->reglist.index;
311 opcodeh2 = 0x0;
312 break;
313 case AARCH64_OPND_QLF_S_H:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize = info->reglist.index << 1;
316 opcodeh2 = 0x1;
317 break;
318 case AARCH64_OPND_QLF_S_S:
319 /* Index encoded in "Q:S". */
320 QSsize = info->reglist.index << 2;
321 opcodeh2 = 0x2;
322 break;
323 case AARCH64_OPND_QLF_S_D:
324 /* Index encoded in "Q". */
325 QSsize = info->reglist.index << 3 | 0x1;
326 opcodeh2 = 0x2;
327 break;
328 default:
329 return false;
331 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
332 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
333 insert_field_2 (&field, code, opcodeh2, 0);
335 return true;
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
341 bool
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
343 const aarch64_opnd_info *info,
344 aarch64_insn *code, const aarch64_inst *inst,
345 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
347 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
348 aarch64_insn Q, imm;
350 if (inst->opcode->iclass == asimdshf)
352 /* Q
353 immh Q <T>
354 0000 x SEE AdvSIMD modified immediate
355 0001 0 8B
356 0001 1 16B
357 001x 0 4H
358 001x 1 8H
359 01xx 0 2S
360 01xx 1 4S
361 1xxx 0 RESERVED
362 1xxx 1 2D */
363 Q = (val & 0x1) ? 1 : 0;
364 insert_field (FLD_Q, code, Q, inst->opcode->mask);
365 val >>= 1;
368 assert (info->type == AARCH64_OPND_IMM_VLSR
369 || info->type == AARCH64_OPND_IMM_VLSL);
371 if (info->type == AARCH64_OPND_IMM_VLSR)
372 /* immh:immb
373 immh <shift>
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm = (16 << (unsigned)val) - info->imm.value;
380 else
381 /* immh:immb
382 immh <shift>
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm = info->imm.value + (8 << (unsigned)val);
389 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
391 return true;
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
396 bool
397 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
398 aarch64_insn *code,
399 const aarch64_inst *inst ATTRIBUTE_UNUSED,
400 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
402 int64_t imm;
404 imm = info->imm.value;
405 if (operand_need_shift_by_two (self))
406 imm >>= 2;
407 if (operand_need_shift_by_three (self))
408 imm >>= 3;
409 if (operand_need_shift_by_four (self))
410 imm >>= 4;
411 insert_all_fields (self, code, imm);
412 return true;
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
417 bool
418 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
419 aarch64_insn *code, const aarch64_inst *inst,
420 aarch64_operand_error *errors)
422 /* imm16 */
423 aarch64_ins_imm (self, info, code, inst, errors);
424 /* hw */
425 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
426 return true;
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
431 bool
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
433 const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED,
436 aarch64_operand_error *errors
437 ATTRIBUTE_UNUSED)
439 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
440 uint64_t imm = info->imm.value;
441 enum aarch64_modifier_kind kind = info->shifter.kind;
442 int amount = info->shifter.amount;
443 aarch64_field field = {0, 0};
445 /* a:b:c:d:e:f:g:h */
446 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm = aarch64_shrink_expanded_imm8 (imm);
454 assert ((int)imm >= 0);
456 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
458 if (kind == AARCH64_MOD_NONE)
459 return true;
461 /* shift amount partially in cmode */
462 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
463 if (kind == AARCH64_MOD_LSL)
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
467 assert (esize == 4 || esize == 2 || esize == 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
469 encoding. */
470 if (esize == 1)
471 return true;
472 amount >>= 3;
473 if (esize == 4)
474 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
475 else
476 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
478 else
480 /* AARCH64_MOD_MSL: shift ones. */
481 amount >>= 4;
482 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
484 insert_field_2 (&field, code, amount, 0);
486 return true;
489 /* Insert fields for an 8-bit floating-point immediate. */
490 bool
491 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
492 aarch64_insn *code,
493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
496 insert_all_fields (self, code, info->imm.value);
497 return true;
500 /* Insert 1-bit rotation immediate (#90 or #270). */
501 bool
502 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
507 uint64_t rot = (info->imm.value - 90) / 180;
508 assert (rot < 2U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
514 bool
515 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
516 const aarch64_opnd_info *info,
517 aarch64_insn *code, const aarch64_inst *inst,
518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 uint64_t rot = info->imm.value / 90;
521 assert (rot < 4U);
522 insert_field (self->fields[0], code, rot, inst->opcode->mask);
523 return true;
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
528 bool
529 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
530 aarch64_insn *code,
531 const aarch64_inst *inst ATTRIBUTE_UNUSED,
532 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
534 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
535 return true;
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
540 bool
541 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
542 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
543 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
545 /* shift */
546 aarch64_insn value = info->shifter.amount ? 1 : 0;
547 insert_field (self->fields[0], code, value, 0);
548 /* imm12 (unsigned) */
549 insert_field (self->fields[1], code, info->imm.value, 0);
550 return true;
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
555 static bool
556 aarch64_ins_limm_1 (const aarch64_operand *self,
557 const aarch64_opnd_info *info, aarch64_insn *code,
558 const aarch64_inst *inst, bool invert_p,
559 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
561 bool res;
562 aarch64_insn value;
563 uint64_t imm = info->imm.value;
564 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
566 if (invert_p)
567 imm = ~imm;
568 /* The constraint check should guarantee that this will work. */
569 res = aarch64_logical_immediate_p (imm, esize, &value);
570 if (res)
571 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
572 self->fields[0]);
573 return res;
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
578 bool
579 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
580 aarch64_insn *code, const aarch64_inst *inst,
581 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
583 return aarch64_ins_limm_1 (self, info, code, inst,
584 inst->opcode->op == OP_BIC, errors);
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
588 bool
589 aarch64_ins_inv_limm (const aarch64_operand *self,
590 const aarch64_opnd_info *info, aarch64_insn *code,
591 const aarch64_inst *inst,
592 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
594 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
599 bool
600 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
601 aarch64_insn *code, const aarch64_inst *inst,
602 aarch64_operand_error *errors)
604 aarch64_insn value = 0;
606 assert (info->idx == 0);
608 /* Rt */
609 aarch64_ins_regno (self, info, code, inst, errors);
610 if (inst->opcode->iclass == ldstpair_indexed
611 || inst->opcode->iclass == ldstnapair_offs
612 || inst->opcode->iclass == ldstpair_off
613 || inst->opcode->iclass == loadlit)
615 /* size */
616 switch (info->qualifier)
618 case AARCH64_OPND_QLF_S_S: value = 0; break;
619 case AARCH64_OPND_QLF_S_D: value = 1; break;
620 case AARCH64_OPND_QLF_S_Q: value = 2; break;
621 default: return false;
623 insert_field (FLD_ldst_size, code, value, 0);
625 else
627 /* opc[1]:size */
628 value = aarch64_get_qualifier_standard_value (info->qualifier);
629 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
632 return true;
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
636 bool
637 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
642 /* Rn */
643 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
644 return true;
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
649 bool
650 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
651 const aarch64_opnd_info *info, aarch64_insn *code,
652 const aarch64_inst *inst ATTRIBUTE_UNUSED,
653 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
655 aarch64_insn S;
656 enum aarch64_modifier_kind kind = info->shifter.kind;
658 /* Rn */
659 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
660 /* Rm */
661 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
662 /* option */
663 if (kind == AARCH64_MOD_LSL)
664 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
665 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
666 /* S */
667 if (info->qualifier != AARCH64_OPND_QLF_S_B)
668 S = info->shifter.amount != 0;
669 else
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
671 S <amount>
672 0 [absent]
673 1 #0
674 Must be #0 if <extend> is explicitly LSL. */
675 S = info->shifter.operator_present && info->shifter.amount_present;
676 insert_field (FLD_S, code, S, 0);
678 return true;
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
683 bool
684 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
685 const aarch64_opnd_info *info, aarch64_insn *code,
686 const aarch64_inst *inst ATTRIBUTE_UNUSED,
687 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
689 /* Rn */
690 insert_field (self->fields[0], code, info->addr.base_regno, 0);
692 /* simm9 */
693 int imm = info->addr.offset.imm;
694 insert_field (self->fields[1], code, imm, 0);
696 /* writeback */
697 if (info->addr.writeback)
699 assert (info->addr.preind == 1 && info->addr.postind == 0);
700 insert_field (self->fields[2], code, 1, 0);
702 return true;
705 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
706 bool
707 aarch64_ins_addr_simm (const aarch64_operand *self,
708 const aarch64_opnd_info *info,
709 aarch64_insn *code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED,
711 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
713 int imm;
715 /* Rn */
716 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
717 /* simm (imm9 or imm7) */
718 imm = info->addr.offset.imm;
719 if (self->fields[0] == FLD_imm7
720 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
721 /* scaled immediate in ld/st pair instructions.. */
722 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
723 insert_field (self->fields[0], code, imm, 0);
724 /* pre/post- index */
725 if (info->addr.writeback)
727 assert (inst->opcode->iclass != ldst_unscaled
728 && inst->opcode->iclass != ldstnapair_offs
729 && inst->opcode->iclass != ldstpair_off
730 && inst->opcode->iclass != ldst_unpriv);
731 assert (info->addr.preind != info->addr.postind);
732 if (info->addr.preind)
733 insert_field (self->fields[1], code, 1, 0);
736 return true;
739 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
740 bool
741 aarch64_ins_addr_simm10 (const aarch64_operand *self,
742 const aarch64_opnd_info *info,
743 aarch64_insn *code,
744 const aarch64_inst *inst ATTRIBUTE_UNUSED,
745 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
747 int imm;
749 /* Rn */
750 insert_field (self->fields[0], code, info->addr.base_regno, 0);
751 /* simm10 */
752 imm = info->addr.offset.imm >> 3;
753 insert_field (self->fields[1], code, imm >> 9, 0);
754 insert_field (self->fields[2], code, imm, 0);
755 /* writeback */
756 if (info->addr.writeback)
758 assert (info->addr.preind == 1 && info->addr.postind == 0);
759 insert_field (self->fields[3], code, 1, 0);
761 return true;
764 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
765 bool
766 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
767 const aarch64_opnd_info *info,
768 aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED,
770 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
772 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
774 /* Rn */
775 insert_field (self->fields[0], code, info->addr.base_regno, 0);
776 /* uimm12 */
777 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
778 return true;
781 /* Encode the address operand for e.g.
782 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
783 bool
784 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
785 const aarch64_opnd_info *info, aarch64_insn *code,
786 const aarch64_inst *inst ATTRIBUTE_UNUSED,
787 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
789 /* Rn */
790 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
791 /* Rm | #<amount> */
792 if (info->addr.offset.is_reg)
793 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
794 else
795 insert_field (FLD_Rm, code, 0x1f, 0);
796 return true;
799 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
800 bool
801 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
802 const aarch64_opnd_info *info, aarch64_insn *code,
803 const aarch64_inst *inst ATTRIBUTE_UNUSED,
804 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
806 /* cond */
807 insert_field (FLD_cond, code, info->cond->value, 0);
808 return true;
811 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
812 bool
813 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
814 const aarch64_opnd_info *info, aarch64_insn *code,
815 const aarch64_inst *inst,
816 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
818 /* If a system instruction check if we have any restrictions on which
819 registers it can use. */
820 if (inst->opcode->iclass == ic_system)
822 uint64_t opcode_flags
823 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
824 uint32_t sysreg_flags
825 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
827 /* Check to see if it's read-only, else check if it's write only.
828 if it's both or unspecified don't care. */
829 if (opcode_flags == F_SYS_READ
830 && sysreg_flags
831 && sysreg_flags != F_REG_READ)
833 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
834 detail->error = _("specified register cannot be read from");
835 detail->index = info->idx;
836 detail->non_fatal = true;
838 else if (opcode_flags == F_SYS_WRITE
839 && sysreg_flags
840 && sysreg_flags != F_REG_WRITE)
842 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
843 detail->error = _("specified register cannot be written to");
844 detail->index = info->idx;
845 detail->non_fatal = true;
848 /* op0:op1:CRn:CRm:op2 */
849 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
850 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
851 return true;
854 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
855 bool
856 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
857 const aarch64_opnd_info *info, aarch64_insn *code,
858 const aarch64_inst *inst ATTRIBUTE_UNUSED,
859 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
861 /* op1:op2 */
862 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
863 FLD_op2, FLD_op1);
865 /* Extra CRm mask. */
866 if (info->sysreg.flags | F_REG_IN_CRM)
867 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
868 return true;
871 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
872 bool
873 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
874 const aarch64_opnd_info *info, aarch64_insn *code,
875 const aarch64_inst *inst ATTRIBUTE_UNUSED,
876 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
878 /* op1:CRn:CRm:op2 */
879 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
880 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
881 return true;
884 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
886 bool
887 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 const aarch64_opnd_info *info, aarch64_insn *code,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED,
890 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
892 /* CRm */
893 insert_field (FLD_CRm, code, info->barrier->value, 0);
894 return true;
897 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
899 bool
900 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
901 const aarch64_opnd_info *info, aarch64_insn *code,
902 const aarch64_inst *inst ATTRIBUTE_UNUSED,
903 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
905 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
906 encoded in CRm<3:2>. */
907 aarch64_insn value = (info->barrier->value >> 2) - 4;
908 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
909 return true;
912 /* Encode the prefetch operation option operand for e.g.
913 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
915 bool
916 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
917 const aarch64_opnd_info *info, aarch64_insn *code,
918 const aarch64_inst *inst ATTRIBUTE_UNUSED,
919 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
921 /* prfop in Rt */
922 insert_field (FLD_Rt, code, info->prfop->value, 0);
923 return true;
926 /* Encode the hint number for instructions that alias HINT but take an
927 operand. */
929 bool
930 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
931 const aarch64_opnd_info *info, aarch64_insn *code,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED,
933 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
935 /* CRm:op2. */
936 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
937 return true;
940 /* Encode the extended register operand for e.g.
941 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
942 bool
943 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
944 const aarch64_opnd_info *info, aarch64_insn *code,
945 const aarch64_inst *inst ATTRIBUTE_UNUSED,
946 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
948 enum aarch64_modifier_kind kind;
950 /* Rm */
951 insert_field (FLD_Rm, code, info->reg.regno, 0);
952 /* option */
953 kind = info->shifter.kind;
954 if (kind == AARCH64_MOD_LSL)
955 kind = info->qualifier == AARCH64_OPND_QLF_W
956 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
957 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
958 /* imm3 */
959 insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
961 return true;
964 /* Encode the shifted register operand for e.g.
965 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
966 bool
967 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
968 const aarch64_opnd_info *info, aarch64_insn *code,
969 const aarch64_inst *inst ATTRIBUTE_UNUSED,
970 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
972 /* Rm */
973 insert_field (FLD_Rm, code, info->reg.regno, 0);
974 /* shift */
975 insert_field (FLD_shift, code,
976 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
977 /* imm6 */
978 insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
980 return true;
983 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
984 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
985 SELF's operand-dependent value. fields[0] specifies the field that
986 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
987 bool
988 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
989 const aarch64_opnd_info *info,
990 aarch64_insn *code,
991 const aarch64_inst *inst ATTRIBUTE_UNUSED,
992 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
994 int factor = 1 + get_operand_specific_data (self);
995 insert_field (self->fields[0], code, info->addr.base_regno, 0);
996 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
997 return true;
1000 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1001 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1002 SELF's operand-dependent value. fields[0] specifies the field that
1003 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1004 bool
1005 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
1006 const aarch64_opnd_info *info,
1007 aarch64_insn *code,
1008 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1009 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 int factor = 1 + get_operand_specific_data (self);
1012 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1013 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1014 return true;
1017 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1018 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1019 SELF's operand-dependent value. fields[0] specifies the field that
1020 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1021 and imm3 fields, with imm3 being the less-significant part. */
1022 bool
1023 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1024 const aarch64_opnd_info *info,
1025 aarch64_insn *code,
1026 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1027 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1029 int factor = 1 + get_operand_specific_data (self);
1030 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1031 insert_fields (code, info->addr.offset.imm / factor, 0,
1032 2, FLD_imm3_10, FLD_SVE_imm6);
1033 return true;
1036 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1037 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1038 value. fields[0] specifies the base register field. */
1039 bool
1040 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1041 const aarch64_opnd_info *info, aarch64_insn *code,
1042 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1045 int factor = 1 << get_operand_specific_data (self);
1046 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1047 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1048 return true;
1051 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1052 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1053 value. fields[0] specifies the base register field. */
1054 bool
1055 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1056 const aarch64_opnd_info *info, aarch64_insn *code,
1057 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1058 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1060 int factor = 1 << get_operand_specific_data (self);
1061 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1062 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1063 return true;
1066 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1067 is SELF's operand-dependent value. fields[0] specifies the base
1068 register field and fields[1] specifies the offset register field. */
1069 bool
1070 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1071 const aarch64_opnd_info *info, aarch64_insn *code,
1072 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1073 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1075 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1076 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1077 return true;
1080 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1081 <shift> is SELF's operand-dependent value. fields[0] specifies the
1082 base register field, fields[1] specifies the offset register field and
1083 fields[2] is a single-bit field that selects SXTW over UXTW. */
1084 bool
1085 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1086 const aarch64_opnd_info *info, aarch64_insn *code,
1087 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1088 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1090 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1091 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1092 if (info->shifter.kind == AARCH64_MOD_UXTW)
1093 insert_field (self->fields[2], code, 0, 0);
1094 else
1095 insert_field (self->fields[2], code, 1, 0);
1096 return true;
1099 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1100 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1101 fields[0] specifies the base register field. */
1102 bool
1103 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1104 const aarch64_opnd_info *info, aarch64_insn *code,
1105 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1106 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1108 int factor = 1 << get_operand_specific_data (self);
1109 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1110 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1111 return true;
1114 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1115 where <modifier> is fixed by the instruction and where <msz> is a
1116 2-bit unsigned number. fields[0] specifies the base register field
1117 and fields[1] specifies the offset register field. */
1118 static bool
1119 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1120 const aarch64_opnd_info *info, aarch64_insn *code,
1121 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1123 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1124 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1125 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1126 return true;
1129 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1130 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1131 field and fields[1] specifies the offset register field. */
1132 bool
1133 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1134 const aarch64_opnd_info *info, aarch64_insn *code,
1135 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1136 aarch64_operand_error *errors)
1138 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1144 bool
1145 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1146 const aarch64_opnd_info *info,
1147 aarch64_insn *code,
1148 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors)
1151 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1154 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1155 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1156 field and fields[1] specifies the offset register field. */
1157 bool
1158 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1159 const aarch64_opnd_info *info,
1160 aarch64_insn *code,
1161 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1162 aarch64_operand_error *errors)
1164 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1167 /* Encode an SVE ADD/SUB immediate. */
1168 bool
1169 aarch64_ins_sve_aimm (const aarch64_operand *self,
1170 const aarch64_opnd_info *info, aarch64_insn *code,
1171 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1172 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1174 if (info->shifter.amount == 8)
1175 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1176 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1177 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1178 else
1179 insert_all_fields (self, code, info->imm.value & 0xff);
1180 return true;
1183 bool
1184 aarch64_ins_sve_aligned_reglist (const aarch64_operand *self,
1185 const aarch64_opnd_info *info,
1186 aarch64_insn *code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 unsigned int num_regs = get_operand_specific_data (self);
1191 unsigned int val = info->reglist.first_regno;
1192 insert_field (self->fields[0], code, val / num_regs, 0);
1193 return true;
1196 /* Encode an SVE CPY/DUP immediate. */
1197 bool
1198 aarch64_ins_sve_asimm (const aarch64_operand *self,
1199 const aarch64_opnd_info *info, aarch64_insn *code,
1200 const aarch64_inst *inst,
1201 aarch64_operand_error *errors)
1203 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1206 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1207 array specifies which field to use for Zn. MM is encoded in the
1208 concatenation of imm5 and SVE_tszh, with imm5 being the less
1209 significant part. */
1210 bool
1211 aarch64_ins_sve_index (const aarch64_operand *self,
1212 const aarch64_opnd_info *info, aarch64_insn *code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1216 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1217 insert_field (self->fields[0], code, info->reglane.regno, 0);
1218 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1219 2, FLD_imm5, FLD_SVE_tszh);
1220 return true;
1223 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1224 bool
1225 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1226 const aarch64_opnd_info *info, aarch64_insn *code,
1227 const aarch64_inst *inst,
1228 aarch64_operand_error *errors)
1230 return aarch64_ins_limm (self, info, code, inst, errors);
1233 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1234 and where MM occupies the most-significant part. The operand-dependent
1235 value specifies the number of bits in Zn. */
1236 bool
1237 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1238 const aarch64_opnd_info *info, aarch64_insn *code,
1239 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1240 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1242 unsigned int reg_bits = get_operand_specific_data (self);
1243 assert (info->reglane.regno < (1U << reg_bits));
1244 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1245 insert_all_fields (self, code, val);
1246 return true;
1249 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1250 to use for Zn. */
1251 bool
1252 aarch64_ins_sve_reglist (const aarch64_operand *self,
1253 const aarch64_opnd_info *info, aarch64_insn *code,
1254 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1255 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1257 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1258 return true;
1261 /* Encode a strided register list. The first field holds the top bit
1262 (0 or 16) and the second field holds the lower bits. The stride is
1263 16 divided by the list length. */
1264 bool
1265 aarch64_ins_sve_strided_reglist (const aarch64_operand *self,
1266 const aarch64_opnd_info *info,
1267 aarch64_insn *code,
1268 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1269 aarch64_operand_error *errors
1270 ATTRIBUTE_UNUSED)
1272 unsigned int num_regs = get_operand_specific_data (self);
1273 unsigned int mask = 16 | (16 / num_regs - 1);
1274 unsigned int val = info->reglist.first_regno;
1275 assert ((val & mask) == val);
1276 insert_field (self->fields[0], code, val >> 4, 0);
1277 insert_field (self->fields[1], code, val & 15, 0);
1278 return true;
1281 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1282 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1283 field. */
1284 bool
1285 aarch64_ins_sve_scale (const aarch64_operand *self,
1286 const aarch64_opnd_info *info, aarch64_insn *code,
1287 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1288 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1290 insert_all_fields (self, code, info->imm.value);
1291 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1292 return true;
1295 /* Encode an SVE shift left immediate. */
1296 bool
1297 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1298 const aarch64_opnd_info *info, aarch64_insn *code,
1299 const aarch64_inst *inst,
1300 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1302 const aarch64_opnd_info *prev_operand;
1303 unsigned int esize;
1305 assert (info->idx > 0);
1306 prev_operand = &inst->operands[info->idx - 1];
1307 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1308 insert_all_fields (self, code, 8 * esize + info->imm.value);
1309 return true;
1312 /* Encode an SVE shift right immediate. */
1313 bool
1314 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1315 const aarch64_opnd_info *info, aarch64_insn *code,
1316 const aarch64_inst *inst,
1317 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1319 const aarch64_opnd_info *prev_operand;
1320 unsigned int esize;
1322 unsigned int opnd_backshift = get_operand_specific_data (self);
1323 assert (info->idx >= (int)opnd_backshift);
1324 prev_operand = &inst->operands[info->idx - opnd_backshift];
1325 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1326 insert_all_fields (self, code, 16 * esize - info->imm.value);
1327 return true;
1330 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1331 The fields array specifies which field to use. */
1332 bool
1333 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1334 const aarch64_opnd_info *info,
1335 aarch64_insn *code,
1336 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1337 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1339 if (info->imm.value == 0x3f000000)
1340 insert_field (self->fields[0], code, 0, 0);
1341 else
1342 insert_field (self->fields[0], code, 1, 0);
1343 return true;
1346 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1347 The fields array specifies which field to use. */
1348 bool
1349 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1350 const aarch64_opnd_info *info,
1351 aarch64_insn *code,
1352 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1353 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1355 if (info->imm.value == 0x3f000000)
1356 insert_field (self->fields[0], code, 0, 0);
1357 else
1358 insert_field (self->fields[0], code, 1, 0);
1359 return true;
1362 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1363 The fields array specifies which field to use. */
1364 bool
1365 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1366 const aarch64_opnd_info *info,
1367 aarch64_insn *code,
1368 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1369 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1371 if (info->imm.value == 0)
1372 insert_field (self->fields[0], code, 0, 0);
1373 else
1374 insert_field (self->fields[0], code, 1, 0);
1375 return true;
1378 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1379 vector indicator, vector selector and immediate. */
1380 bool
1381 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1382 const aarch64_opnd_info *info,
1383 aarch64_insn *code,
1384 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1385 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1387 int fld_size;
1388 int fld_q;
1389 int fld_v = info->indexed_za.v;
1390 int fld_rv = info->indexed_za.index.regno - 12;
1391 int fld_zan_imm = info->indexed_za.index.imm;
1392 int regno = info->indexed_za.regno;
1394 switch (info->qualifier)
1396 case AARCH64_OPND_QLF_S_B:
1397 fld_size = 0;
1398 fld_q = 0;
1399 break;
1400 case AARCH64_OPND_QLF_S_H:
1401 fld_size = 1;
1402 fld_q = 0;
1403 fld_zan_imm |= regno << 3;
1404 break;
1405 case AARCH64_OPND_QLF_S_S:
1406 fld_size = 2;
1407 fld_q = 0;
1408 fld_zan_imm |= regno << 2;
1409 break;
1410 case AARCH64_OPND_QLF_S_D:
1411 fld_size = 3;
1412 fld_q = 0;
1413 fld_zan_imm |= regno << 1;
1414 break;
1415 case AARCH64_OPND_QLF_S_Q:
1416 fld_size = 3;
1417 fld_q = 1;
1418 fld_zan_imm = regno;
1419 break;
1420 default:
1421 return false;
1424 insert_field (self->fields[0], code, fld_size, 0);
1425 insert_field (self->fields[1], code, fld_q, 0);
1426 insert_field (self->fields[2], code, fld_v, 0);
1427 insert_field (self->fields[3], code, fld_rv, 0);
1428 insert_field (self->fields[4], code, fld_zan_imm, 0);
1430 return true;
1433 bool
1434 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand *self,
1435 const aarch64_opnd_info *info,
1436 aarch64_insn *code,
1437 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1438 aarch64_operand_error *errors
1439 ATTRIBUTE_UNUSED)
1441 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1442 int range_size = get_opcode_dependent_value (inst->opcode);
1443 int fld_v = info->indexed_za.v;
1444 int fld_rv = info->indexed_za.index.regno - 12;
1445 int imm = info->indexed_za.index.imm;
1446 int max_value = 16 / range_size / ebytes;
1448 if (max_value == 0)
1449 max_value = 1;
1451 assert (imm % range_size == 0 && (imm / range_size) < max_value);
1452 int fld_zan_imm = (info->indexed_za.regno * max_value) | (imm / range_size);
1453 assert (fld_zan_imm < (range_size == 4 && ebytes < 8 ? 4 : 8));
1455 insert_field (self->fields[0], code, fld_v, 0);
1456 insert_field (self->fields[1], code, fld_rv, 0);
1457 insert_field (self->fields[2], code, fld_zan_imm, 0);
1459 return true;
1462 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1463 separated by commas, encoded in the "imm8" field.
1465 For programmer convenience an assembler must also accept the names of
1466 32-bit, 16-bit and 8-bit element tiles which are converted into the
1467 corresponding set of 64-bit element tiles.
1469 bool
1470 aarch64_ins_sme_za_list (const aarch64_operand *self,
1471 const aarch64_opnd_info *info,
1472 aarch64_insn *code,
1473 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1474 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1476 int fld_mask = info->imm.value;
1477 insert_field (self->fields[0], code, fld_mask, 0);
1478 return true;
1481 bool
1482 aarch64_ins_sme_za_array (const aarch64_operand *self,
1483 const aarch64_opnd_info *info,
1484 aarch64_insn *code,
1485 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1486 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1488 int regno = info->indexed_za.index.regno & 3;
1489 int imm = info->indexed_za.index.imm;
1490 int countm1 = info->indexed_za.index.countm1;
1491 assert (imm % (countm1 + 1) == 0);
1492 insert_field (self->fields[0], code, regno, 0);
1493 insert_field (self->fields[1], code, imm / (countm1 + 1), 0);
1494 return true;
1497 bool
1498 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1499 const aarch64_opnd_info *info,
1500 aarch64_insn *code,
1501 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1502 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1504 int regno = info->addr.base_regno;
1505 int imm = info->addr.offset.imm;
1506 insert_field (self->fields[0], code, regno, 0);
1507 insert_field (self->fields[1], code, imm, 0);
1508 return true;
1511 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1512 bool
1513 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1514 const aarch64_opnd_info *info,
1515 aarch64_insn *code,
1516 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1517 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1519 aarch64_insn fld_crm;
1520 /* Set CRm[3:1] bits. */
1521 if (info->reg.regno == 's')
1522 fld_crm = 0x02 ; /* SVCRSM. */
1523 else if (info->reg.regno == 'z')
1524 fld_crm = 0x04; /* SVCRZA. */
1525 else
1526 return false;
1528 insert_field (self->fields[0], code, fld_crm, 0);
1529 return true;
1532 /* Encode source scalable predicate register (Pn), name of the index base
1533 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1534 range 0 to one less than the number of vector elements in a 128-bit vector
1535 register, encoded in "i1:tszh:tszl".
1537 bool
1538 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1539 const aarch64_opnd_info *info,
1540 aarch64_insn *code,
1541 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1542 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1544 int fld_pn = info->indexed_za.regno;
1545 int fld_rm = info->indexed_za.index.regno - 12;
1546 int imm = info->indexed_za.index.imm;
1547 int fld_i1, fld_tszh, fld_tshl;
1549 insert_field (self->fields[0], code, fld_rm, 0);
1550 insert_field (self->fields[1], code, fld_pn, 0);
1552 /* Optional element index, defaulting to 0, in the range 0 to one less than
1553 the number of vector elements in a 128-bit vector register, encoded in
1554 "i1:tszh:tszl".
1556 i1 tszh tszl <T>
1557 0 0 000 RESERVED
1558 x x xx1 B
1559 x x x10 H
1560 x x 100 S
1561 x 1 000 D
1563 switch (info->qualifier)
1565 case AARCH64_OPND_QLF_S_B:
1566 /* <imm> is 4 bit value. */
1567 fld_i1 = (imm >> 3) & 0x1;
1568 fld_tszh = (imm >> 2) & 0x1;
1569 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1570 break;
1571 case AARCH64_OPND_QLF_S_H:
1572 /* <imm> is 3 bit value. */
1573 fld_i1 = (imm >> 2) & 0x1;
1574 fld_tszh = (imm >> 1) & 0x1;
1575 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1576 break;
1577 case AARCH64_OPND_QLF_S_S:
1578 /* <imm> is 2 bit value. */
1579 fld_i1 = (imm >> 1) & 0x1;
1580 fld_tszh = imm & 0x1;
1581 fld_tshl = 0x4;
1582 break;
1583 case AARCH64_OPND_QLF_S_D:
1584 /* <imm> is 1 bit value. */
1585 fld_i1 = imm & 0x1;
1586 fld_tszh = 0x1;
1587 fld_tshl = 0x0;
1588 break;
1589 default:
1590 return false;
1593 insert_field (self->fields[2], code, fld_i1, 0);
1594 insert_field (self->fields[3], code, fld_tszh, 0);
1595 insert_field (self->fields[4], code, fld_tshl, 0);
1596 return true;
1599 /* Insert X0-X30. Register 31 is unallocated. */
1600 bool
1601 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1602 const aarch64_opnd_info *info,
1603 aarch64_insn *code,
1604 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1605 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1607 assert (info->reg.regno <= 30);
1608 insert_field (self->fields[0], code, info->reg.regno, 0);
1609 return true;
1612 /* Insert an indexed register, with the first field being the register
1613 number and the remaining fields being the index. */
1614 bool
1615 aarch64_ins_simple_index (const aarch64_operand *self,
1616 const aarch64_opnd_info *info,
1617 aarch64_insn *code,
1618 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1619 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1621 int bias = get_operand_specific_data (self);
1622 insert_field (self->fields[0], code, info->reglane.regno - bias, 0);
1623 insert_all_fields_after (self, 1, code, info->reglane.index);
1624 return true;
1627 /* Insert a plain shift-right immediate, when there is only a single
1628 element size. */
1629 bool
1630 aarch64_ins_plain_shrimm (const aarch64_operand *self,
1631 const aarch64_opnd_info *info, aarch64_insn *code,
1632 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1633 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1635 unsigned int base = 1 << get_operand_field_width (self, 0);
1636 insert_field (self->fields[0], code, base - info->imm.value, 0);
1637 return true;
1640 /* Miscellaneous encoding functions. */
1642 /* Encode size[0], i.e. bit 22, for
1643 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1645 static void
1646 encode_asimd_fcvt (aarch64_inst *inst)
1648 aarch64_insn value;
1649 aarch64_field field = {0, 0};
1650 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1652 switch (inst->opcode->op)
1654 case OP_FCVTN:
1655 case OP_FCVTN2:
1656 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1657 qualifier = inst->operands[1].qualifier;
1658 break;
1659 case OP_FCVTL:
1660 case OP_FCVTL2:
1661 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1662 qualifier = inst->operands[0].qualifier;
1663 break;
1664 default:
1665 return;
1667 assert (qualifier == AARCH64_OPND_QLF_V_4S
1668 || qualifier == AARCH64_OPND_QLF_V_2D);
1669 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1670 gen_sub_field (FLD_size, 0, 1, &field);
1671 insert_field_2 (&field, &inst->value, value, 0);
1674 /* Encode size[0], i.e. bit 22, for
1675 e.g. FCVTXN <Vb><d>, <Va><n>. */
1677 static void
1678 encode_asisd_fcvtxn (aarch64_inst *inst)
1680 aarch64_insn val = 1;
1681 aarch64_field field = {0, 0};
1682 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1683 gen_sub_field (FLD_size, 0, 1, &field);
1684 insert_field_2 (&field, &inst->value, val, 0);
1687 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1688 static void
1689 encode_fcvt (aarch64_inst *inst)
1691 aarch64_insn val;
1692 const aarch64_field field = {15, 2};
1694 /* opc dstsize */
1695 switch (inst->operands[0].qualifier)
1697 case AARCH64_OPND_QLF_S_S: val = 0; break;
1698 case AARCH64_OPND_QLF_S_D: val = 1; break;
1699 case AARCH64_OPND_QLF_S_H: val = 3; break;
1700 default: abort ();
1702 insert_field_2 (&field, &inst->value, val, 0);
1704 return;
1707 /* Return the index in qualifiers_list that INST is using. Should only
1708 be called once the qualifiers are known to be valid. */
1710 static int
1711 aarch64_get_variant (struct aarch64_inst *inst)
1713 int i, nops, variant;
1715 nops = aarch64_num_of_operands (inst->opcode);
1716 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1718 for (i = 0; i < nops; ++i)
1719 if (inst->opcode->qualifiers_list[variant][i]
1720 != inst->operands[i].qualifier)
1721 break;
1722 if (i == nops)
1723 return variant;
1725 abort ();
1728 /* Do miscellaneous encodings that are not common enough to be driven by
1729 flags. */
1731 static void
1732 do_misc_encoding (aarch64_inst *inst)
1734 unsigned int value;
1736 switch (inst->opcode->op)
1738 case OP_FCVT:
1739 encode_fcvt (inst);
1740 break;
1741 case OP_FCVTN:
1742 case OP_FCVTN2:
1743 case OP_FCVTL:
1744 case OP_FCVTL2:
1745 encode_asimd_fcvt (inst);
1746 break;
1747 case OP_FCVTXN_S:
1748 encode_asisd_fcvtxn (inst);
1749 break;
1750 case OP_MOV_P_P:
1751 case OP_MOV_PN_PN:
1752 case OP_MOVS_P_P:
1753 /* Copy Pn to Pm and Pg. */
1754 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1755 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1756 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1757 break;
1758 case OP_MOV_Z_P_Z:
1759 /* Copy Zd to Zm. */
1760 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1761 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1762 break;
1763 case OP_MOV_Z_V:
1764 /* Fill in the zero immediate. */
1765 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1766 2, FLD_imm5, FLD_SVE_tszh);
1767 break;
1768 case OP_MOV_Z_Z:
1769 /* Copy Zn to Zm. */
1770 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1771 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1772 break;
1773 case OP_MOV_Z_Zi:
1774 break;
1775 case OP_MOVM_P_P_P:
1776 /* Copy Pd to Pm. */
1777 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1778 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1779 break;
1780 case OP_MOVZS_P_P_P:
1781 case OP_MOVZ_P_P_P:
1782 /* Copy Pn to Pm. */
1783 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1784 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1785 break;
1786 case OP_NOTS_P_P_P_Z:
1787 case OP_NOT_P_P_P_Z:
1788 /* Copy Pg to Pm. */
1789 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1790 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1791 break;
1792 default: break;
1796 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1797 static void
1798 encode_sizeq (aarch64_inst *inst)
1800 aarch64_insn sizeq;
1801 enum aarch64_field_kind kind;
1802 int idx;
1804 /* Get the index of the operand whose information we are going to use
1805 to encode the size and Q fields.
1806 This is deduced from the possible valid qualifier lists. */
1807 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1808 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1809 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1810 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1811 /* Q */
1812 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1813 /* size */
1814 if (inst->opcode->iclass == asisdlse
1815 || inst->opcode->iclass == asisdlsep
1816 || inst->opcode->iclass == asisdlso
1817 || inst->opcode->iclass == asisdlsop)
1818 kind = FLD_vldst_size;
1819 else
1820 kind = FLD_size;
1821 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1824 /* Opcodes that have fields shared by multiple operands are usually flagged
1825 with flags. In this function, we detect such flags and use the
1826 information in one of the related operands to do the encoding. The 'one'
1827 operand is not any operand but one of the operands that has the enough
1828 information for such an encoding. */
1830 static void
1831 do_special_encoding (struct aarch64_inst *inst)
1833 int idx;
1834 aarch64_insn value = 0;
1836 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1838 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1839 if (inst->opcode->flags & F_COND)
1841 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1843 if (inst->opcode->flags & F_SF)
1845 idx = select_operand_for_sf_field_coding (inst->opcode);
1846 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1847 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1848 ? 1 : 0;
1849 insert_field (FLD_sf, &inst->value, value, 0);
1850 if (inst->opcode->flags & F_N)
1851 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1853 if (inst->opcode->flags & F_LSE_SZ)
1855 idx = select_operand_for_sf_field_coding (inst->opcode);
1856 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1857 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1858 ? 1 : 0;
1859 insert_field (FLD_lse_sz, &inst->value, value, 0);
1861 if (inst->opcode->flags & F_SIZEQ)
1862 encode_sizeq (inst);
1863 if (inst->opcode->flags & F_FPTYPE)
1865 idx = select_operand_for_fptype_field_coding (inst->opcode);
1866 switch (inst->operands[idx].qualifier)
1868 case AARCH64_OPND_QLF_S_S: value = 0; break;
1869 case AARCH64_OPND_QLF_S_D: value = 1; break;
1870 case AARCH64_OPND_QLF_S_H: value = 3; break;
1871 default: return;
1873 insert_field (FLD_type, &inst->value, value, 0);
1875 if (inst->opcode->flags & F_SSIZE)
1877 enum aarch64_opnd_qualifier qualifier;
1878 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1879 qualifier = inst->operands[idx].qualifier;
1880 assert (qualifier >= AARCH64_OPND_QLF_S_B
1881 && qualifier <= AARCH64_OPND_QLF_S_Q);
1882 value = aarch64_get_qualifier_standard_value (qualifier);
1883 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1885 if (inst->opcode->flags & F_T)
1887 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1888 aarch64_field field = {0, 0};
1889 enum aarch64_opnd_qualifier qualifier;
1891 idx = 0;
1892 qualifier = inst->operands[idx].qualifier;
1893 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1894 == AARCH64_OPND_CLASS_SIMD_REG
1895 && qualifier >= AARCH64_OPND_QLF_V_8B
1896 && qualifier <= AARCH64_OPND_QLF_V_2D);
1897 /* imm5<3:0> q <t>
1898 0000 x reserved
1899 xxx1 0 8b
1900 xxx1 1 16b
1901 xx10 0 4h
1902 xx10 1 8h
1903 x100 0 2s
1904 x100 1 4s
1905 1000 0 reserved
1906 1000 1 2d */
1907 value = aarch64_get_qualifier_standard_value (qualifier);
1908 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1909 num = (int) value >> 1;
1910 assert (num >= 0 && num <= 3);
1911 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1912 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1914 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1916 /* Use Rt to encode in the case of e.g.
1917 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1918 enum aarch64_opnd_qualifier qualifier;
1919 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1920 if (idx == -1)
1921 /* Otherwise use the result operand, which has to be a integer
1922 register. */
1923 idx = 0;
1924 assert (idx == 0 || idx == 1);
1925 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1926 == AARCH64_OPND_CLASS_INT_REG);
1927 qualifier = inst->operands[idx].qualifier;
1928 insert_field (FLD_Q, &inst->value,
1929 aarch64_get_qualifier_standard_value (qualifier), 0);
1931 if (inst->opcode->flags & F_LDS_SIZE)
1933 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1934 enum aarch64_opnd_qualifier qualifier;
1935 aarch64_field field = {0, 0};
1936 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1937 == AARCH64_OPND_CLASS_INT_REG);
1938 gen_sub_field (FLD_opc, 0, 1, &field);
1939 qualifier = inst->operands[0].qualifier;
1940 insert_field_2 (&field, &inst->value,
1941 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1943 /* Miscellaneous encoding as the last step. */
1944 if (inst->opcode->flags & F_MISC)
1945 do_misc_encoding (inst);
1947 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1950 /* Some instructions (including all SVE ones) use the instruction class
1951 to describe how a qualifiers_list index is represented in the instruction
1952 encoding. If INST is such an instruction, encode the chosen qualifier
1953 variant. */
1955 static void
1956 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1958 int variant = 0;
1959 switch (inst->opcode->iclass)
1961 case sme_mov:
1962 case sme_psel:
1963 /* The variant is encoded as part of the immediate. */
1964 break;
1966 case sme_size_12_bhs:
1967 insert_field (FLD_SME_size_12, &inst->value,
1968 aarch64_get_variant (inst), 0);
1969 break;
1971 case sme_size_22:
1972 insert_field (FLD_SME_size_22, &inst->value,
1973 aarch64_get_variant (inst), 0);
1974 break;
1976 case sme_size_22_hsd:
1977 insert_field (FLD_SME_size_22, &inst->value,
1978 aarch64_get_variant (inst) + 1, 0);
1979 break;
1981 case sme_size_12_hs:
1982 insert_field (FLD_SME_size_12, &inst->value,
1983 aarch64_get_variant (inst) + 1, 0);
1984 break;
1986 case sme_sz_23:
1987 insert_field (FLD_SME_sz_23, &inst->value,
1988 aarch64_get_variant (inst), 0);
1989 break;
1991 case sve_cpy:
1992 insert_fields (&inst->value, aarch64_get_variant (inst),
1993 0, 2, FLD_SVE_M_14, FLD_size);
1994 break;
1996 case sme_shift:
1997 case sve_index:
1998 case sve_shift_pred:
1999 case sve_shift_unpred:
2000 case sve_shift_tsz_hsd:
2001 case sve_shift_tsz_bhsd:
2002 /* For indices and shift amounts, the variant is encoded as
2003 part of the immediate. */
2004 break;
2006 case sve_limm:
2007 case sme2_mov:
2008 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2009 and depend on the immediate. They don't have a separate
2010 encoding. */
2011 break;
2013 case sme_misc:
2014 case sve_misc:
2015 /* These instructions have only a single variant. */
2016 break;
2018 case sve_movprfx:
2019 insert_fields (&inst->value, aarch64_get_variant (inst),
2020 0, 2, FLD_SVE_M_16, FLD_size);
2021 break;
2023 case sve_pred_zm:
2024 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
2025 break;
2027 case sve_size_bhs:
2028 case sve_size_bhsd:
2029 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
2030 break;
2032 case sve_size_hsd:
2033 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2034 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
2035 break;
2037 case sme_fp_sd:
2038 case sme_int_sd:
2039 case sve_size_bh:
2040 case sve_size_sd:
2041 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
2042 break;
2044 case sve_size_sd2:
2045 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
2046 break;
2048 case sve_size_hsd2:
2049 insert_field (FLD_SVE_size, &inst->value,
2050 aarch64_get_variant (inst) + 1, 0);
2051 break;
2053 case sve_size_tsz_bhs:
2054 insert_fields (&inst->value,
2055 (1 << aarch64_get_variant (inst)),
2056 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
2057 break;
2059 case sve_size_13:
2060 variant = aarch64_get_variant (inst) + 1;
2061 if (variant == 2)
2062 variant = 3;
2063 insert_field (FLD_size, &inst->value, variant, 0);
2064 break;
2066 default:
2067 break;
2071 /* Converters converting an alias opcode instruction to its real form. */
2073 /* ROR <Wd>, <Ws>, #<shift>
2074 is equivalent to:
2075 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2076 static void
2077 convert_ror_to_extr (aarch64_inst *inst)
2079 copy_operand_info (inst, 3, 2);
2080 copy_operand_info (inst, 2, 1);
2083 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2084 is equivalent to:
2085 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2086 static void
2087 convert_xtl_to_shll (aarch64_inst *inst)
2089 inst->operands[2].qualifier = inst->operands[1].qualifier;
2090 inst->operands[2].imm.value = 0;
2093 /* Convert
2094 LSR <Xd>, <Xn>, #<shift>
2096 UBFM <Xd>, <Xn>, #<shift>, #63. */
2097 static void
2098 convert_sr_to_bfm (aarch64_inst *inst)
2100 inst->operands[3].imm.value =
2101 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2104 /* Convert MOV to ORR. */
2105 static void
2106 convert_mov_to_orr (aarch64_inst *inst)
2108 /* MOV <Vd>.<T>, <Vn>.<T>
2109 is equivalent to:
2110 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2111 copy_operand_info (inst, 2, 1);
2114 /* When <imms> >= <immr>, the instruction written:
2115 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2116 is equivalent to:
2117 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2119 static void
2120 convert_bfx_to_bfm (aarch64_inst *inst)
2122 int64_t lsb, width;
2124 /* Convert the operand. */
2125 lsb = inst->operands[2].imm.value;
2126 width = inst->operands[3].imm.value;
2127 inst->operands[2].imm.value = lsb;
2128 inst->operands[3].imm.value = lsb + width - 1;
2131 /* When <imms> < <immr>, the instruction written:
2132 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2133 is equivalent to:
2134 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2136 static void
2137 convert_bfi_to_bfm (aarch64_inst *inst)
2139 int64_t lsb, width;
2141 /* Convert the operand. */
2142 lsb = inst->operands[2].imm.value;
2143 width = inst->operands[3].imm.value;
2144 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2146 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2147 inst->operands[3].imm.value = width - 1;
2149 else
2151 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2152 inst->operands[3].imm.value = width - 1;
2156 /* The instruction written:
2157 BFC <Xd>, #<lsb>, #<width>
2158 is equivalent to:
2159 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2161 static void
2162 convert_bfc_to_bfm (aarch64_inst *inst)
2164 int64_t lsb, width;
2166 /* Insert XZR. */
2167 copy_operand_info (inst, 3, 2);
2168 copy_operand_info (inst, 2, 1);
2169 copy_operand_info (inst, 1, 0);
2170 inst->operands[1].reg.regno = 0x1f;
2172 /* Convert the immediate operand. */
2173 lsb = inst->operands[2].imm.value;
2174 width = inst->operands[3].imm.value;
2175 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2177 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2178 inst->operands[3].imm.value = width - 1;
2180 else
2182 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2183 inst->operands[3].imm.value = width - 1;
2187 /* The instruction written:
2188 LSL <Xd>, <Xn>, #<shift>
2189 is equivalent to:
2190 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2192 static void
2193 convert_lsl_to_ubfm (aarch64_inst *inst)
2195 int64_t shift = inst->operands[2].imm.value;
2197 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2199 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2200 inst->operands[3].imm.value = 31 - shift;
2202 else
2204 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2205 inst->operands[3].imm.value = 63 - shift;
2209 /* CINC <Wd>, <Wn>, <cond>
2210 is equivalent to:
2211 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2213 static void
2214 convert_to_csel (aarch64_inst *inst)
2216 copy_operand_info (inst, 3, 2);
2217 copy_operand_info (inst, 2, 1);
2218 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2221 /* CSET <Wd>, <cond>
2222 is equivalent to:
2223 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2225 static void
2226 convert_cset_to_csinc (aarch64_inst *inst)
2228 copy_operand_info (inst, 3, 1);
2229 copy_operand_info (inst, 2, 0);
2230 copy_operand_info (inst, 1, 0);
2231 inst->operands[1].reg.regno = 0x1f;
2232 inst->operands[2].reg.regno = 0x1f;
2233 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2236 /* MOV <Wd>, #<imm>
2237 is equivalent to:
2238 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2240 static void
2241 convert_mov_to_movewide (aarch64_inst *inst)
2243 int is32;
2244 uint32_t shift_amount;
2245 uint64_t value = ~(uint64_t)0;
2247 switch (inst->opcode->op)
2249 case OP_MOV_IMM_WIDE:
2250 value = inst->operands[1].imm.value;
2251 break;
2252 case OP_MOV_IMM_WIDEN:
2253 value = ~inst->operands[1].imm.value;
2254 break;
2255 default:
2256 return;
2258 inst->operands[1].type = AARCH64_OPND_HALF;
2259 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2260 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2261 /* The constraint check should have guaranteed this wouldn't happen. */
2262 return;
2263 value >>= shift_amount;
2264 value &= 0xffff;
2265 inst->operands[1].imm.value = value;
2266 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2267 inst->operands[1].shifter.amount = shift_amount;
2270 /* MOV <Wd>, #<imm>
2271 is equivalent to:
2272 ORR <Wd>, WZR, #<imm>. */
2274 static void
2275 convert_mov_to_movebitmask (aarch64_inst *inst)
2277 copy_operand_info (inst, 2, 1);
2278 inst->operands[1].reg.regno = 0x1f;
2279 inst->operands[1].skip = 0;
2282 /* Some alias opcodes are assembled by being converted to their real-form. */
2284 static void
2285 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2287 const aarch64_opcode *alias = inst->opcode;
2289 if ((alias->flags & F_CONV) == 0)
2290 goto convert_to_real_return;
2292 switch (alias->op)
2294 case OP_ASR_IMM:
2295 case OP_LSR_IMM:
2296 convert_sr_to_bfm (inst);
2297 break;
2298 case OP_LSL_IMM:
2299 convert_lsl_to_ubfm (inst);
2300 break;
2301 case OP_CINC:
2302 case OP_CINV:
2303 case OP_CNEG:
2304 convert_to_csel (inst);
2305 break;
2306 case OP_CSET:
2307 case OP_CSETM:
2308 convert_cset_to_csinc (inst);
2309 break;
2310 case OP_UBFX:
2311 case OP_BFXIL:
2312 case OP_SBFX:
2313 convert_bfx_to_bfm (inst);
2314 break;
2315 case OP_SBFIZ:
2316 case OP_BFI:
2317 case OP_UBFIZ:
2318 convert_bfi_to_bfm (inst);
2319 break;
2320 case OP_BFC:
2321 convert_bfc_to_bfm (inst);
2322 break;
2323 case OP_MOV_V:
2324 convert_mov_to_orr (inst);
2325 break;
2326 case OP_MOV_IMM_WIDE:
2327 case OP_MOV_IMM_WIDEN:
2328 convert_mov_to_movewide (inst);
2329 break;
2330 case OP_MOV_IMM_LOG:
2331 convert_mov_to_movebitmask (inst);
2332 break;
2333 case OP_ROR_IMM:
2334 convert_ror_to_extr (inst);
2335 break;
2336 case OP_SXTL:
2337 case OP_SXTL2:
2338 case OP_UXTL:
2339 case OP_UXTL2:
2340 convert_xtl_to_shll (inst);
2341 break;
2342 default:
2343 break;
2346 convert_to_real_return:
2347 aarch64_replace_opcode (inst, real);
2350 /* Encode *INST_ORI of the opcode code OPCODE.
2351 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2352 matched operand qualifier sequence in *QLF_SEQ. */
2354 bool
2355 aarch64_opcode_encode (const aarch64_opcode *opcode,
2356 const aarch64_inst *inst_ori, aarch64_insn *code,
2357 aarch64_opnd_qualifier_t *qlf_seq,
2358 aarch64_operand_error *mismatch_detail,
2359 aarch64_instr_sequence* insn_sequence)
2361 int i;
2362 const aarch64_opcode *aliased;
2363 aarch64_inst copy, *inst;
2365 DEBUG_TRACE ("enter with %s", opcode->name);
2367 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2368 copy = *inst_ori;
2369 inst = &copy;
2371 assert (inst->opcode == NULL || inst->opcode == opcode);
2372 if (inst->opcode == NULL)
2373 inst->opcode = opcode;
2375 /* Constrain the operands.
2376 After passing this, the encoding is guaranteed to succeed. */
2377 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2379 DEBUG_TRACE ("FAIL since operand constraint not met");
2380 return 0;
2383 /* Get the base value.
2384 Note: this has to be before the aliasing handling below in order to
2385 get the base value from the alias opcode before we move on to the
2386 aliased opcode for encoding. */
2387 inst->value = opcode->opcode;
2389 /* No need to do anything else if the opcode does not have any operand. */
2390 if (aarch64_num_of_operands (opcode) == 0)
2391 goto encoding_exit;
2393 /* Assign operand indexes and check types. Also put the matched
2394 operand qualifiers in *QLF_SEQ to return. */
2395 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2397 assert (opcode->operands[i] == inst->operands[i].type);
2398 inst->operands[i].idx = i;
2399 if (qlf_seq != NULL)
2400 *qlf_seq = inst->operands[i].qualifier;
2403 aliased = aarch64_find_real_opcode (opcode);
2404 /* If the opcode is an alias and it does not ask for direct encoding by
2405 itself, the instruction will be transformed to the form of real opcode
2406 and the encoding will be carried out using the rules for the aliased
2407 opcode. */
2408 if (aliased != NULL && (opcode->flags & F_CONV))
2410 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2411 aliased->name, opcode->name);
2412 /* Convert the operands to the form of the real opcode. */
2413 convert_to_real (inst, aliased);
2414 opcode = aliased;
2417 aarch64_opnd_info *info = inst->operands;
2419 /* Call the inserter of each operand. */
2420 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2422 const aarch64_operand *opnd;
2423 enum aarch64_opnd type = opcode->operands[i];
2424 if (type == AARCH64_OPND_NIL)
2425 break;
2426 if (info->skip)
2428 DEBUG_TRACE ("skip the incomplete operand %d", i);
2429 continue;
2431 opnd = &aarch64_operands[type];
2432 if (operand_has_inserter (opnd)
2433 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2434 mismatch_detail))
2435 return false;
2438 /* Call opcode encoders indicated by flags. */
2439 if (opcode_has_special_coder (opcode))
2440 do_special_encoding (inst);
2442 /* Possibly use the instruction class to encode the chosen qualifier
2443 variant. */
2444 aarch64_encode_variant_using_iclass (inst);
2446 /* Run a verifier if the instruction has one set. */
2447 if (opcode->verifier)
2449 enum err_type result = opcode->verifier (inst, *code, 0, true,
2450 mismatch_detail, insn_sequence);
2451 switch (result)
2453 case ERR_UND:
2454 case ERR_UNP:
2455 case ERR_NYI:
2456 return false;
2457 default:
2458 break;
2462 /* Always run constrain verifiers, this is needed because constrains need to
2463 maintain a global state. Regardless if the instruction has the flag set
2464 or not. */
2465 enum err_type result = verify_constraints (inst, *code, 0, true,
2466 mismatch_detail, insn_sequence);
2467 switch (result)
2469 case ERR_UND:
2470 case ERR_UNP:
2471 case ERR_NYI:
2472 return false;
2473 default:
2474 break;
2478 encoding_exit:
2479 DEBUG_TRACE ("exit with %s", opcode->name);
2481 *code = inst->value;
2483 return true;