Automatic date update in version.in
[binutils-gdb.git] / opcodes / aarch64-dis.c
blob96f42ae862a395bf3aa498c495fdcea9a3d12a41
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
34 #define INSNLEN 4
36 /* This character is used to encode style information within the output
37 buffers. See get_style_text and print_operands for more details. */
38 #define STYLE_MARKER_CHAR '\002'
40 /* Cached mapping symbol state. */
41 enum map_type
43 MAP_INSN,
44 MAP_DATA
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
53 /* Other options */
54 static int no_aliases = 0; /* If set disassemble as most general inst. */
55 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
56 output as comments. */
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence insn_sequence;
61 static void
62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
66 static void
67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
69 /* Try to match options that are simple flags */
70 if (startswith (option, "no-aliases"))
72 no_aliases = 1;
73 return;
76 if (startswith (option, "aliases"))
78 no_aliases = 0;
79 return;
82 if (startswith (option, "no-notes"))
84 no_notes = 1;
85 return;
88 if (startswith (option, "notes"))
90 no_notes = 0;
91 return;
94 #ifdef DEBUG_AARCH64
95 if (startswith (option, "debug_dump"))
97 debug_dump = 1;
98 return;
100 #endif /* DEBUG_AARCH64 */
102 /* Invalid option. */
103 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
106 static void
107 parse_aarch64_dis_options (const char *options)
109 const char *option_end;
111 if (options == NULL)
112 return;
114 while (*options != '\0')
116 /* Skip empty options. */
117 if (*options == ',')
119 options++;
120 continue;
123 /* We know that *options is neither NUL or a comma. */
124 option_end = options + 1;
125 while (*option_end != ',' && *option_end != '\0')
126 option_end++;
128 parse_aarch64_dis_option (options, option_end - options);
130 /* Go on to the next one. If option_end points to a comma, it
131 will be skipped above. */
132 options = option_end;
136 /* Functions doing the instruction disassembling. */
138 /* The unnamed arguments consist of the number of fields and information about
139 these fields where the VALUE will be extracted from CODE and returned.
140 MASK can be zero or the base mask of the opcode.
142 N.B. the fields are required to be in such an order than the most signficant
143 field for VALUE comes the first, e.g. the <index> in
144 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146 the order of H, L, M. */
148 aarch64_insn
149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
151 uint32_t num;
152 const aarch64_field *field;
153 enum aarch64_field_kind kind;
154 va_list va;
156 va_start (va, mask);
157 num = va_arg (va, uint32_t);
158 assert (num <= 5);
159 aarch64_insn value = 0x0;
160 while (num--)
162 kind = va_arg (va, enum aarch64_field_kind);
163 field = &fields[kind];
164 value <<= field->width;
165 value |= extract_field (kind, code, mask);
167 va_end (va);
168 return value;
171 /* Extract the value of all fields in SELF->fields after START from
172 instruction CODE. The least significant bit comes from the final field. */
174 static aarch64_insn
175 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
176 aarch64_insn code)
178 aarch64_insn value;
179 unsigned int i;
180 enum aarch64_field_kind kind;
182 value = 0;
183 for (i = start;
184 i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
186 kind = self->fields[i];
187 value <<= fields[kind].width;
188 value |= extract_field (kind, code, 0);
190 return value;
193 /* Extract the value of all fields in SELF->fields from instruction CODE.
194 The least significant bit comes from the final field. */
196 static aarch64_insn
197 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
199 return extract_all_fields_after (self, 0, code);
202 /* Sign-extend bit I of VALUE. */
203 static inline uint64_t
204 sign_extend (aarch64_insn value, unsigned i)
206 uint64_t ret, sign;
208 assert (i < 32);
209 ret = value;
210 sign = (uint64_t) 1 << i;
211 return ((ret & (sign + sign - 1)) ^ sign) - sign;
214 /* N.B. the following inline helpfer functions create a dependency on the
215 order of operand qualifier enumerators. */
217 /* Given VALUE, return qualifier for a general purpose register. */
218 static inline enum aarch64_opnd_qualifier
219 get_greg_qualifier_from_value (aarch64_insn value)
221 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
222 assert (value <= 0x1
223 && aarch64_get_qualifier_standard_value (qualifier) == value);
224 return qualifier;
227 /* Given VALUE, return qualifier for a vector register. This does not support
228 decoding instructions that accept the 2H vector type. */
230 static inline enum aarch64_opnd_qualifier
231 get_vreg_qualifier_from_value (aarch64_insn value)
233 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
235 /* Instructions using vector type 2H should not call this function. Skip over
236 the 2H qualifier. */
237 if (qualifier >= AARCH64_OPND_QLF_V_2H)
238 qualifier += 1;
240 assert (value <= 0x8
241 && aarch64_get_qualifier_standard_value (qualifier) == value);
242 return qualifier;
245 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
246 static inline enum aarch64_opnd_qualifier
247 get_sreg_qualifier_from_value (aarch64_insn value)
249 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
251 assert (value <= 0x4
252 && aarch64_get_qualifier_standard_value (qualifier) == value);
253 return qualifier;
256 /* Given the instruction in *INST which is probably half way through the
257 decoding and our caller wants to know the expected qualifier for operand
258 I. Return such a qualifier if we can establish it; otherwise return
259 AARCH64_OPND_QLF_NIL. */
261 static aarch64_opnd_qualifier_t
262 get_expected_qualifier (const aarch64_inst *inst, int i)
264 aarch64_opnd_qualifier_seq_t qualifiers;
265 /* Should not be called if the qualifier is known. */
266 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
267 int invalid_count;
268 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
269 i, qualifiers, &invalid_count))
270 return qualifiers[i];
271 else
272 return AARCH64_OPND_QLF_NIL;
275 /* Operand extractors. */
277 bool
278 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
279 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
280 const aarch64_insn code ATTRIBUTE_UNUSED,
281 const aarch64_inst *inst ATTRIBUTE_UNUSED,
282 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
284 return true;
287 bool
288 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
289 const aarch64_insn code,
290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
293 info->reg.regno = (extract_field (self->fields[0], code, 0)
294 + get_operand_specific_data (self));
295 return true;
298 bool
299 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
300 const aarch64_insn code ATTRIBUTE_UNUSED,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
304 assert (info->idx == 1
305 || info->idx == 2
306 || info->idx == 3
307 || info->idx == 5);
309 unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
310 info->reg.regno = (prev_regno == 0x1f) ? 0x1f
311 : prev_regno + 1;
312 return true;
315 /* e.g. IC <ic_op>{, <Xt>}. */
316 bool
317 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
318 const aarch64_insn code,
319 const aarch64_inst *inst ATTRIBUTE_UNUSED,
320 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
322 info->reg.regno = extract_field (self->fields[0], code, 0);
323 assert (info->idx == 1
324 && (aarch64_get_operand_class (inst->operands[0].type)
325 == AARCH64_OPND_CLASS_SYSTEM));
326 /* This will make the constraint checking happy and more importantly will
327 help the disassembler determine whether this operand is optional or
328 not. */
329 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
331 return true;
334 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
335 bool
336 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
337 const aarch64_insn code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED,
339 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
341 /* regno */
342 info->reglane.regno = extract_field (self->fields[0], code,
343 inst->opcode->mask);
345 /* Index and/or type. */
346 if (inst->opcode->iclass == asisdone
347 || inst->opcode->iclass == asimdins)
349 if (info->type == AARCH64_OPND_En
350 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
352 unsigned shift;
353 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
354 assert (info->idx == 1); /* Vn */
355 aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
356 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
357 info->qualifier = get_expected_qualifier (inst, info->idx);
358 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
359 info->reglane.index = value >> shift;
361 else
363 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
364 imm5<3:0> <V>
365 0000 RESERVED
366 xxx1 B
367 xx10 H
368 x100 S
369 1000 D */
370 int pos = -1;
371 aarch64_insn value = extract_field (FLD_imm5, code, 0);
372 while (++pos <= 3 && (value & 0x1) == 0)
373 value >>= 1;
374 if (pos > 3)
375 return false;
376 info->qualifier = get_sreg_qualifier_from_value (pos);
377 info->reglane.index = (unsigned) (value >> 1);
380 else if (inst->opcode->iclass == dotproduct)
382 /* Need information in other operand(s) to help decoding. */
383 info->qualifier = get_expected_qualifier (inst, info->idx);
384 switch (info->qualifier)
386 case AARCH64_OPND_QLF_S_4B:
387 case AARCH64_OPND_QLF_S_2H:
388 /* L:H */
389 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
390 info->reglane.regno &= 0x1f;
391 break;
392 default:
393 return false;
396 else if (inst->opcode->iclass == cryptosm3)
398 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
399 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
401 else
403 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
404 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
406 /* Need information in other operand(s) to help decoding. */
407 info->qualifier = get_expected_qualifier (inst, info->idx);
408 switch (info->qualifier)
410 case AARCH64_OPND_QLF_S_H:
411 if (info->type == AARCH64_OPND_Em16)
413 /* h:l:m */
414 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
415 FLD_M);
416 info->reglane.regno &= 0xf;
418 else
420 /* h:l */
421 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
423 break;
424 case AARCH64_OPND_QLF_S_S:
425 /* h:l */
426 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
427 break;
428 case AARCH64_OPND_QLF_S_D:
429 /* H */
430 info->reglane.index = extract_field (FLD_H, code, 0);
431 break;
432 default:
433 return false;
436 if (inst->opcode->op == OP_FCMLA_ELEM
437 && info->qualifier != AARCH64_OPND_QLF_S_H)
439 /* Complex operand takes two elements. */
440 if (info->reglane.index & 1)
441 return false;
442 info->reglane.index /= 2;
446 return true;
449 bool
450 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
451 const aarch64_insn code,
452 const aarch64_inst *inst ATTRIBUTE_UNUSED,
453 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
455 /* R */
456 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
457 /* len */
458 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
459 info->reglist.stride = 1;
460 return true;
463 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
464 bool
465 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
466 aarch64_opnd_info *info, const aarch64_insn code,
467 const aarch64_inst *inst,
468 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
470 aarch64_insn value;
471 /* Number of elements in each structure to be loaded/stored. */
472 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
474 struct
476 unsigned is_reserved;
477 unsigned num_regs;
478 unsigned num_elements;
479 } data [] =
480 { {0, 4, 4},
481 {1, 4, 4},
482 {0, 4, 1},
483 {0, 4, 2},
484 {0, 3, 3},
485 {1, 3, 3},
486 {0, 3, 1},
487 {0, 1, 1},
488 {0, 2, 2},
489 {1, 2, 2},
490 {0, 2, 1},
493 /* Rt */
494 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
495 /* opcode */
496 value = extract_field (FLD_opcode, code, 0);
497 /* PR 21595: Check for a bogus value. */
498 if (value >= ARRAY_SIZE (data))
499 return false;
500 if (expected_num != data[value].num_elements || data[value].is_reserved)
501 return false;
502 info->reglist.num_regs = data[value].num_regs;
503 info->reglist.stride = 1;
505 return true;
508 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
509 lanes instructions. */
510 bool
511 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
512 aarch64_opnd_info *info, const aarch64_insn code,
513 const aarch64_inst *inst,
514 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
516 aarch64_insn value;
518 /* Rt */
519 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
520 /* S */
521 value = extract_field (FLD_S, code, 0);
523 /* Number of registers is equal to the number of elements in
524 each structure to be loaded/stored. */
525 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
526 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
528 /* Except when it is LD1R. */
529 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
530 info->reglist.num_regs = 2;
532 info->reglist.stride = 1;
533 return true;
536 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
537 load/store single element instructions. */
538 bool
539 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
540 aarch64_opnd_info *info, const aarch64_insn code,
541 const aarch64_inst *inst ATTRIBUTE_UNUSED,
542 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
544 aarch64_field field = {0, 0};
545 aarch64_insn QSsize; /* fields Q:S:size. */
546 aarch64_insn opcodeh2; /* opcode<2:1> */
548 /* Rt */
549 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
551 /* Decode the index, opcode<2:1> and size. */
552 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
553 opcodeh2 = extract_field_2 (&field, code, 0);
554 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
555 switch (opcodeh2)
557 case 0x0:
558 info->qualifier = AARCH64_OPND_QLF_S_B;
559 /* Index encoded in "Q:S:size". */
560 info->reglist.index = QSsize;
561 break;
562 case 0x1:
563 if (QSsize & 0x1)
564 /* UND. */
565 return false;
566 info->qualifier = AARCH64_OPND_QLF_S_H;
567 /* Index encoded in "Q:S:size<1>". */
568 info->reglist.index = QSsize >> 1;
569 break;
570 case 0x2:
571 if ((QSsize >> 1) & 0x1)
572 /* UND. */
573 return false;
574 if ((QSsize & 0x1) == 0)
576 info->qualifier = AARCH64_OPND_QLF_S_S;
577 /* Index encoded in "Q:S". */
578 info->reglist.index = QSsize >> 2;
580 else
582 if (extract_field (FLD_S, code, 0))
583 /* UND */
584 return false;
585 info->qualifier = AARCH64_OPND_QLF_S_D;
586 /* Index encoded in "Q". */
587 info->reglist.index = QSsize >> 3;
589 break;
590 default:
591 return false;
594 info->reglist.has_index = 1;
595 info->reglist.num_regs = 0;
596 info->reglist.stride = 1;
597 /* Number of registers is equal to the number of elements in
598 each structure to be loaded/stored. */
599 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
600 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
602 return true;
605 /* Decode fields immh:immb and/or Q for e.g.
606 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
607 or SSHR <V><d>, <V><n>, #<shift>. */
609 bool
610 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
611 aarch64_opnd_info *info, const aarch64_insn code,
612 const aarch64_inst *inst,
613 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
615 int pos;
616 aarch64_insn Q, imm, immh;
617 enum aarch64_insn_class iclass = inst->opcode->iclass;
619 immh = extract_field (FLD_immh, code, 0);
620 if (immh == 0)
621 return false;
622 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
623 pos = 4;
624 /* Get highest set bit in immh. */
625 while (--pos >= 0 && (immh & 0x8) == 0)
626 immh <<= 1;
628 assert ((iclass == asimdshf || iclass == asisdshf)
629 && (info->type == AARCH64_OPND_IMM_VLSR
630 || info->type == AARCH64_OPND_IMM_VLSL));
632 if (iclass == asimdshf)
634 Q = extract_field (FLD_Q, code, 0);
635 /* immh Q <T>
636 0000 x SEE AdvSIMD modified immediate
637 0001 0 8B
638 0001 1 16B
639 001x 0 4H
640 001x 1 8H
641 01xx 0 2S
642 01xx 1 4S
643 1xxx 0 RESERVED
644 1xxx 1 2D */
645 info->qualifier =
646 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
648 else
649 info->qualifier = get_sreg_qualifier_from_value (pos);
651 if (info->type == AARCH64_OPND_IMM_VLSR)
652 /* immh <shift>
653 0000 SEE AdvSIMD modified immediate
654 0001 (16-UInt(immh:immb))
655 001x (32-UInt(immh:immb))
656 01xx (64-UInt(immh:immb))
657 1xxx (128-UInt(immh:immb)) */
658 info->imm.value = (16 << pos) - imm;
659 else
660 /* immh:immb
661 immh <shift>
662 0000 SEE AdvSIMD modified immediate
663 0001 (UInt(immh:immb)-8)
664 001x (UInt(immh:immb)-16)
665 01xx (UInt(immh:immb)-32)
666 1xxx (UInt(immh:immb)-64) */
667 info->imm.value = imm - (8 << pos);
669 return true;
672 /* Decode shift immediate for e.g. sshr (imm). */
673 bool
674 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
675 aarch64_opnd_info *info, const aarch64_insn code,
676 const aarch64_inst *inst ATTRIBUTE_UNUSED,
677 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
679 int64_t imm;
680 aarch64_insn val;
681 val = extract_field (FLD_size, code, 0);
682 switch (val)
684 case 0: imm = 8; break;
685 case 1: imm = 16; break;
686 case 2: imm = 32; break;
687 default: return false;
689 info->imm.value = imm;
690 return true;
693 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
694 value in the field(s) will be extracted as unsigned immediate value. */
695 bool
696 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
697 const aarch64_insn code,
698 const aarch64_inst *inst,
699 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
701 uint64_t imm;
703 imm = extract_all_fields (self, code);
705 if (operand_need_sign_extension (self))
706 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
708 if (operand_need_shift_by_two (self))
709 imm <<= 2;
710 else if (operand_need_shift_by_three (self))
711 imm <<= 3;
712 else if (operand_need_shift_by_four (self))
713 imm <<= 4;
715 if (info->type == AARCH64_OPND_ADDR_ADRP)
716 imm <<= 12;
718 if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
719 && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
720 imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
722 info->imm.value = imm;
723 return true;
726 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
727 bool
728 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
729 const aarch64_insn code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED,
731 aarch64_operand_error *errors)
733 aarch64_ext_imm (self, info, code, inst, errors);
734 info->shifter.kind = AARCH64_MOD_LSL;
735 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
736 return true;
739 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
740 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
741 bool
742 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
743 aarch64_opnd_info *info,
744 const aarch64_insn code,
745 const aarch64_inst *inst ATTRIBUTE_UNUSED,
746 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
748 uint64_t imm;
749 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
750 aarch64_field field = {0, 0};
752 assert (info->idx == 1);
754 if (info->type == AARCH64_OPND_SIMD_FPIMM)
755 info->imm.is_fp = 1;
757 /* a:b:c:d:e:f:g:h */
758 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
759 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
761 /* Either MOVI <Dd>, #<imm>
762 or MOVI <Vd>.2D, #<imm>.
763 <imm> is a 64-bit immediate
764 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
765 encoded in "a:b:c:d:e:f:g:h". */
766 int i;
767 unsigned abcdefgh = imm;
768 for (imm = 0ull, i = 0; i < 8; i++)
769 if (((abcdefgh >> i) & 0x1) != 0)
770 imm |= 0xffull << (8 * i);
772 info->imm.value = imm;
774 /* cmode */
775 info->qualifier = get_expected_qualifier (inst, info->idx);
776 switch (info->qualifier)
778 case AARCH64_OPND_QLF_NIL:
779 /* no shift */
780 info->shifter.kind = AARCH64_MOD_NONE;
781 return 1;
782 case AARCH64_OPND_QLF_LSL:
783 /* shift zeros */
784 info->shifter.kind = AARCH64_MOD_LSL;
785 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
787 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
788 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
789 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
790 default: return false;
792 /* 00: 0; 01: 8; 10:16; 11:24. */
793 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
794 break;
795 case AARCH64_OPND_QLF_MSL:
796 /* shift ones */
797 info->shifter.kind = AARCH64_MOD_MSL;
798 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
799 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
800 break;
801 default:
802 return false;
805 return true;
808 /* Decode an 8-bit floating-point immediate. */
809 bool
810 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
811 const aarch64_insn code,
812 const aarch64_inst *inst ATTRIBUTE_UNUSED,
813 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
815 info->imm.value = extract_all_fields (self, code);
816 info->imm.is_fp = 1;
817 return true;
820 /* Decode a 1-bit rotate immediate (#90 or #270). */
821 bool
822 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
823 const aarch64_insn code,
824 const aarch64_inst *inst ATTRIBUTE_UNUSED,
825 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
827 uint64_t rot = extract_field (self->fields[0], code, 0);
828 assert (rot < 2U);
829 info->imm.value = rot * 180 + 90;
830 return true;
833 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
834 bool
835 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
836 const aarch64_insn code,
837 const aarch64_inst *inst ATTRIBUTE_UNUSED,
838 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
840 uint64_t rot = extract_field (self->fields[0], code, 0);
841 assert (rot < 4U);
842 info->imm.value = rot * 90;
843 return true;
846 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
847 bool
848 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
849 aarch64_opnd_info *info, const aarch64_insn code,
850 const aarch64_inst *inst ATTRIBUTE_UNUSED,
851 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
853 info->imm.value = 64- extract_field (FLD_scale, code, 0);
854 return true;
857 /* Decode arithmetic immediate for e.g.
858 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
859 bool
860 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
861 aarch64_opnd_info *info, const aarch64_insn code,
862 const aarch64_inst *inst ATTRIBUTE_UNUSED,
863 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
865 aarch64_insn value;
867 info->shifter.kind = AARCH64_MOD_LSL;
868 /* shift */
869 value = extract_field (FLD_shift, code, 0);
870 if (value >= 2)
871 return false;
872 info->shifter.amount = value ? 12 : 0;
873 /* imm12 (unsigned) */
874 info->imm.value = extract_field (FLD_imm12, code, 0);
876 return true;
879 /* Return true if VALUE is a valid logical immediate encoding, storing the
880 decoded value in *RESULT if so. ESIZE is the number of bytes in the
881 decoded immediate. */
882 static bool
883 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
885 uint64_t imm, mask;
886 uint32_t N, R, S;
887 unsigned simd_size;
889 /* value is N:immr:imms. */
890 S = value & 0x3f;
891 R = (value >> 6) & 0x3f;
892 N = (value >> 12) & 0x1;
894 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
895 (in other words, right rotated by R), then replicated. */
896 if (N != 0)
898 simd_size = 64;
899 mask = 0xffffffffffffffffull;
901 else
903 switch (S)
905 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
906 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
907 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
908 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
909 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
910 default: return false;
912 mask = (1ull << simd_size) - 1;
913 /* Top bits are IGNORED. */
914 R &= simd_size - 1;
917 if (simd_size > esize * 8)
918 return false;
920 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
921 if (S == simd_size - 1)
922 return false;
923 /* S+1 consecutive bits to 1. */
924 /* NOTE: S can't be 63 due to detection above. */
925 imm = (1ull << (S + 1)) - 1;
926 /* Rotate to the left by simd_size - R. */
927 if (R != 0)
928 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
929 /* Replicate the value according to SIMD size. */
930 switch (simd_size)
932 case 2: imm = (imm << 2) | imm;
933 /* Fall through. */
934 case 4: imm = (imm << 4) | imm;
935 /* Fall through. */
936 case 8: imm = (imm << 8) | imm;
937 /* Fall through. */
938 case 16: imm = (imm << 16) | imm;
939 /* Fall through. */
940 case 32: imm = (imm << 32) | imm;
941 /* Fall through. */
942 case 64: break;
943 default: return 0;
946 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
948 return true;
951 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
952 bool
953 aarch64_ext_limm (const aarch64_operand *self,
954 aarch64_opnd_info *info, const aarch64_insn code,
955 const aarch64_inst *inst,
956 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
958 uint32_t esize;
959 aarch64_insn value;
961 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
962 self->fields[2]);
963 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
964 return decode_limm (esize, value, &info->imm.value);
967 /* Decode a logical immediate for the BIC alias of AND (etc.). */
968 bool
969 aarch64_ext_inv_limm (const aarch64_operand *self,
970 aarch64_opnd_info *info, const aarch64_insn code,
971 const aarch64_inst *inst,
972 aarch64_operand_error *errors)
974 if (!aarch64_ext_limm (self, info, code, inst, errors))
975 return false;
976 info->imm.value = ~info->imm.value;
977 return true;
980 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
981 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
982 bool
983 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
984 aarch64_opnd_info *info,
985 const aarch64_insn code, const aarch64_inst *inst,
986 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
988 aarch64_insn value;
990 /* Rt */
991 info->reg.regno = extract_field (FLD_Rt, code, 0);
993 /* size */
994 value = extract_field (FLD_ldst_size, code, 0);
995 if (inst->opcode->iclass == ldstpair_indexed
996 || inst->opcode->iclass == ldstnapair_offs
997 || inst->opcode->iclass == ldstpair_off
998 || inst->opcode->iclass == loadlit)
1000 enum aarch64_opnd_qualifier qualifier;
1001 switch (value)
1003 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1004 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1005 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1006 default: return false;
1008 info->qualifier = qualifier;
1010 else
1012 /* opc1:size */
1013 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1014 if (value > 0x4)
1015 return false;
1016 info->qualifier = get_sreg_qualifier_from_value (value);
1019 return true;
1022 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
1023 bool
1024 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1025 aarch64_opnd_info *info,
1026 aarch64_insn code,
1027 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1028 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1030 /* Rn */
1031 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1032 return true;
1035 /* Decode the address operand for rcpc3 instructions with optional load/store
1036 datasize offset, e.g. STILPP <Xs>, <Xt>, [<Xn|SP>{,#-16}]! and
1037 LIDAP <Xs>, <Xt>, [<Xn|SP>]{,#-16}. */
1038 bool
1039 aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1040 aarch64_opnd_info *info,
1041 aarch64_insn code,
1042 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 aarch64_operand_error *err ATTRIBUTE_UNUSED)
1045 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1046 if (!extract_field (FLD_opc2, code, 0))
1048 info->addr.writeback = 1;
1050 enum aarch64_opnd type;
1051 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1053 aarch64_opnd_info opnd = info[i];
1054 type = opnd.type;
1055 if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
1056 break;
1059 assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
1060 int offset = calc_ldst_datasize (inst->operands);
1062 switch (type)
1064 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
1065 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
1066 info->addr.offset.imm = -offset;
1067 info->addr.preind = 1;
1068 break;
1069 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
1070 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
1071 info->addr.offset.imm = offset;
1072 info->addr.postind = 1;
1073 break;
1074 default:
1075 return false;
1078 return true;
1081 bool
1082 aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1083 aarch64_opnd_info *info,
1084 aarch64_insn code,
1085 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1086 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1088 info->qualifier = get_expected_qualifier (inst, info->idx);
1090 /* Rn */
1091 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1093 /* simm9 */
1094 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1095 info->addr.offset.imm = sign_extend (imm, 8);
1096 return true;
1099 /* Decode the address operand for e.g.
1100 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1101 bool
1102 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1103 aarch64_opnd_info *info,
1104 aarch64_insn code, const aarch64_inst *inst,
1105 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1107 info->qualifier = get_expected_qualifier (inst, info->idx);
1109 /* Rn */
1110 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1112 /* simm9 */
1113 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1114 info->addr.offset.imm = sign_extend (imm, 8);
1115 if (extract_field (self->fields[2], code, 0) == 1) {
1116 info->addr.writeback = 1;
1117 info->addr.preind = 1;
1119 return true;
1122 /* Decode the address operand for e.g.
1123 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1124 bool
1125 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1126 aarch64_opnd_info *info,
1127 aarch64_insn code, const aarch64_inst *inst,
1128 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1130 aarch64_insn S, value;
1132 /* Rn */
1133 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1134 /* Rm */
1135 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1136 /* option */
1137 value = extract_field (FLD_option, code, 0);
1138 info->shifter.kind =
1139 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1140 /* Fix-up the shifter kind; although the table-driven approach is
1141 efficient, it is slightly inflexible, thus needing this fix-up. */
1142 if (info->shifter.kind == AARCH64_MOD_UXTX)
1143 info->shifter.kind = AARCH64_MOD_LSL;
1144 /* S */
1145 S = extract_field (FLD_S, code, 0);
1146 if (S == 0)
1148 info->shifter.amount = 0;
1149 info->shifter.amount_present = 0;
1151 else
1153 int size;
1154 /* Need information in other operand(s) to help achieve the decoding
1155 from 'S' field. */
1156 info->qualifier = get_expected_qualifier (inst, info->idx);
1157 /* Get the size of the data element that is accessed, which may be
1158 different from that of the source register size, e.g. in strb/ldrb. */
1159 size = aarch64_get_qualifier_esize (info->qualifier);
1160 info->shifter.amount = get_logsz (size);
1161 info->shifter.amount_present = 1;
1164 return true;
1167 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1168 bool
1169 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1170 aarch64_insn code, const aarch64_inst *inst,
1171 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1173 aarch64_insn imm;
1174 info->qualifier = get_expected_qualifier (inst, info->idx);
1176 /* Rn */
1177 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1178 /* simm (imm9 or imm7) */
1179 imm = extract_field (self->fields[0], code, 0);
1180 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1181 if (self->fields[0] == FLD_imm7
1182 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1183 /* scaled immediate in ld/st pair instructions. */
1184 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1185 /* qualifier */
1186 if (inst->opcode->iclass == ldst_unscaled
1187 || inst->opcode->iclass == ldstnapair_offs
1188 || inst->opcode->iclass == ldstpair_off
1189 || inst->opcode->iclass == ldst_unpriv)
1190 info->addr.writeback = 0;
1191 else
1193 /* pre/post- index */
1194 info->addr.writeback = 1;
1195 if (extract_field (self->fields[1], code, 0) == 1)
1196 info->addr.preind = 1;
1197 else
1198 info->addr.postind = 1;
1201 return true;
1204 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1205 bool
1206 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1207 aarch64_insn code,
1208 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1209 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1211 int shift;
1212 info->qualifier = get_expected_qualifier (inst, info->idx);
1213 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1214 /* Rn */
1215 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1216 /* uimm12 */
1217 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1218 return true;
1221 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1222 bool
1223 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1224 aarch64_insn code,
1225 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1226 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1228 aarch64_insn imm;
1230 info->qualifier = get_expected_qualifier (inst, info->idx);
1231 /* Rn */
1232 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1233 /* simm10 */
1234 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1235 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1236 if (extract_field (self->fields[3], code, 0) == 1) {
1237 info->addr.writeback = 1;
1238 info->addr.preind = 1;
1240 return true;
1243 /* Decode the address operand for e.g.
1244 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1245 bool
1246 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1247 aarch64_opnd_info *info,
1248 aarch64_insn code, const aarch64_inst *inst,
1249 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1251 /* The opcode dependent area stores the number of elements in
1252 each structure to be loaded/stored. */
1253 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1255 /* Rn */
1256 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1257 /* Rm | #<amount> */
1258 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1259 if (info->addr.offset.regno == 31)
1261 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1262 /* Special handling of loading single structure to all lane. */
1263 info->addr.offset.imm = (is_ld1r ? 1
1264 : inst->operands[0].reglist.num_regs)
1265 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1266 else
1267 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1268 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1269 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1271 else
1272 info->addr.offset.is_reg = 1;
1273 info->addr.writeback = 1;
1275 return true;
1278 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1279 bool
1280 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1281 aarch64_opnd_info *info,
1282 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1283 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1285 aarch64_insn value;
1286 /* cond */
1287 value = extract_field (FLD_cond, code, 0);
1288 info->cond = get_cond_from_value (value);
1289 return true;
1292 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1293 bool
1294 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1295 aarch64_opnd_info *info,
1296 aarch64_insn code,
1297 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1298 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1300 /* op0:op1:CRn:CRm:op2 */
1301 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1302 FLD_CRm, FLD_op2);
1303 info->sysreg.flags = 0;
1305 /* If a system instruction, check which restrictions should be on the register
1306 value during decoding, these will be enforced then. */
1307 if (inst->opcode->iclass == ic_system)
1309 /* Check to see if it's read-only, else check if it's write only.
1310 if it's both or unspecified don't care. */
1311 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1312 info->sysreg.flags = F_REG_READ;
1313 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1314 == F_SYS_WRITE)
1315 info->sysreg.flags = F_REG_WRITE;
1318 return true;
1321 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1322 bool
1323 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1324 aarch64_opnd_info *info, aarch64_insn code,
1325 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1326 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1328 int i;
1329 aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1330 /* op1:op2 */
1331 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1332 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1333 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1335 /* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
1336 uint32_t flags = aarch64_pstatefields[i].flags;
1337 if ((flags & F_REG_IN_CRM)
1338 && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1339 continue;
1340 info->sysreg.flags = flags;
1341 return true;
1343 /* Reserved value in <pstatefield>. */
1344 return false;
1347 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1348 bool
1349 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1350 aarch64_opnd_info *info,
1351 aarch64_insn code,
1352 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1353 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1355 int i;
1356 aarch64_insn value;
1357 const aarch64_sys_ins_reg *sysins_ops;
1358 /* op0:op1:CRn:CRm:op2 */
1359 value = extract_fields (code, 0, 5,
1360 FLD_op0, FLD_op1, FLD_CRn,
1361 FLD_CRm, FLD_op2);
1363 switch (info->type)
1365 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1366 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1367 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1368 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1369 case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
1370 case AARCH64_OPND_SYSREG_SR:
1371 sysins_ops = aarch64_sys_regs_sr;
1372 /* Let's remove op2 for rctx. Refer to comments in the definition of
1373 aarch64_sys_regs_sr[]. */
1374 value = value & ~(0x7);
1375 break;
1376 default: return false;
1379 for (i = 0; sysins_ops[i].name != NULL; ++i)
1380 if (sysins_ops[i].value == value)
1382 info->sysins_op = sysins_ops + i;
1383 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1384 info->sysins_op->name,
1385 (unsigned)info->sysins_op->value,
1386 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1387 return true;
1390 return false;
1393 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1395 bool
1396 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1397 aarch64_opnd_info *info,
1398 aarch64_insn code,
1399 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1400 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1402 /* CRm */
1403 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1404 return true;
1407 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>. */
1409 bool
1410 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1411 aarch64_opnd_info *info,
1412 aarch64_insn code,
1413 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1414 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1416 /* For the DSB nXS barrier variant immediate is encoded in 2-bit field. */
1417 aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1418 info->barrier = aarch64_barrier_dsb_nxs_options + field;
1419 return true;
1422 /* Decode the prefetch operation option operand for e.g.
1423 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1425 bool
1426 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1427 aarch64_opnd_info *info,
1428 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1429 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1431 /* prfop in Rt */
1432 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1433 return true;
1436 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1437 to the matching name/value pair in aarch64_hint_options. */
1439 bool
1440 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1441 aarch64_opnd_info *info,
1442 aarch64_insn code,
1443 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1444 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1446 /* CRm:op2. */
1447 unsigned hint_number;
1448 int i;
1450 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1452 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1454 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1456 info->hint_option = &(aarch64_hint_options[i]);
1457 return true;
1461 return false;
1464 /* Decode the extended register operand for e.g.
1465 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1466 bool
1467 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1468 aarch64_opnd_info *info,
1469 aarch64_insn code,
1470 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1471 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1473 aarch64_insn value;
1475 /* Rm */
1476 info->reg.regno = extract_field (FLD_Rm, code, 0);
1477 /* option */
1478 value = extract_field (FLD_option, code, 0);
1479 info->shifter.kind =
1480 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1481 /* imm3 */
1482 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1484 /* This makes the constraint checking happy. */
1485 info->shifter.operator_present = 1;
1487 /* Assume inst->operands[0].qualifier has been resolved. */
1488 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1489 info->qualifier = AARCH64_OPND_QLF_W;
1490 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1491 && (info->shifter.kind == AARCH64_MOD_UXTX
1492 || info->shifter.kind == AARCH64_MOD_SXTX))
1493 info->qualifier = AARCH64_OPND_QLF_X;
1495 return true;
1498 /* Decode the shifted register operand for e.g.
1499 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1500 bool
1501 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1502 aarch64_opnd_info *info,
1503 aarch64_insn code,
1504 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1507 aarch64_insn value;
1509 /* Rm */
1510 info->reg.regno = extract_field (FLD_Rm, code, 0);
1511 /* shift */
1512 value = extract_field (FLD_shift, code, 0);
1513 info->shifter.kind =
1514 aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1515 if (info->shifter.kind == AARCH64_MOD_ROR
1516 && inst->opcode->iclass != log_shift)
1517 /* ROR is not available for the shifted register operand in arithmetic
1518 instructions. */
1519 return false;
1520 /* imm6 */
1521 info->shifter.amount = extract_field (FLD_imm6_10, code, 0);
1523 /* This makes the constraint checking happy. */
1524 info->shifter.operator_present = 1;
1526 return true;
1529 /* Decode the LSL-shifted register operand for e.g.
1530 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1531 bool
1532 aarch64_ext_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1533 aarch64_opnd_info *info,
1534 aarch64_insn code,
1535 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1536 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1538 /* Rm */
1539 info->reg.regno = extract_field (FLD_Rm, code, 0);
1540 /* imm3 */
1541 info->shifter.kind = AARCH64_MOD_LSL;
1542 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1543 return true;
1546 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1547 where <offset> is given by the OFFSET parameter and where <factor> is
1548 1 plus SELF's operand-dependent value. fields[0] specifies the field
1549 that holds <base>. */
1550 static bool
1551 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1552 aarch64_opnd_info *info, aarch64_insn code,
1553 int64_t offset)
1555 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1556 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1557 info->addr.offset.is_reg = false;
1558 info->addr.writeback = false;
1559 info->addr.preind = true;
1560 if (offset != 0)
1561 info->shifter.kind = AARCH64_MOD_MUL_VL;
1562 info->shifter.amount = 1;
1563 info->shifter.operator_present = (info->addr.offset.imm != 0);
1564 info->shifter.amount_present = false;
1565 return true;
1568 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1569 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1570 SELF's operand-dependent value. fields[0] specifies the field that
1571 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1572 bool
1573 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1574 aarch64_opnd_info *info, aarch64_insn code,
1575 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1576 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1578 int offset;
1580 offset = extract_field (FLD_SVE_imm4, code, 0);
1581 offset = ((offset + 8) & 15) - 8;
1582 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1585 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1586 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1587 SELF's operand-dependent value. fields[0] specifies the field that
1588 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1589 bool
1590 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1591 aarch64_opnd_info *info, aarch64_insn code,
1592 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1593 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1595 int offset;
1597 offset = extract_field (FLD_SVE_imm6, code, 0);
1598 offset = (((offset + 32) & 63) - 32);
1599 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1602 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1603 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1604 SELF's operand-dependent value. fields[0] specifies the field that
1605 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1606 and imm3 fields, with imm3 being the less-significant part. */
1607 bool
1608 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1609 aarch64_opnd_info *info,
1610 aarch64_insn code,
1611 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1612 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1614 int offset;
1616 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1617 offset = (((offset + 256) & 511) - 256);
1618 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1621 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1622 is given by the OFFSET parameter and where <shift> is SELF's operand-
1623 dependent value. fields[0] specifies the base register field <base>. */
1624 static bool
1625 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1626 aarch64_opnd_info *info, aarch64_insn code,
1627 int64_t offset)
1629 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1630 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1631 info->addr.offset.is_reg = false;
1632 info->addr.writeback = false;
1633 info->addr.preind = true;
1634 info->shifter.operator_present = false;
1635 info->shifter.amount_present = false;
1636 return true;
1639 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1640 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1641 value. fields[0] specifies the base register field. */
1642 bool
1643 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1644 aarch64_opnd_info *info, aarch64_insn code,
1645 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1646 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1648 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1649 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1652 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1653 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1654 value. fields[0] specifies the base register field. */
1655 bool
1656 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1657 aarch64_opnd_info *info, aarch64_insn code,
1658 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1659 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1661 int offset = extract_field (FLD_SVE_imm6, code, 0);
1662 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1665 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1666 is SELF's operand-dependent value. fields[0] specifies the base
1667 register field and fields[1] specifies the offset register field. */
1668 bool
1669 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1670 aarch64_opnd_info *info, aarch64_insn code,
1671 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1672 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1674 int index_regno;
1676 index_regno = extract_field (self->fields[1], code, 0);
1677 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1678 return false;
1680 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1681 info->addr.offset.regno = index_regno;
1682 info->addr.offset.is_reg = true;
1683 info->addr.writeback = false;
1684 info->addr.preind = true;
1685 info->shifter.kind = AARCH64_MOD_LSL;
1686 info->shifter.amount = get_operand_specific_data (self);
1687 info->shifter.operator_present = (info->shifter.amount != 0);
1688 info->shifter.amount_present = (info->shifter.amount != 0);
1689 return true;
1692 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1693 <shift> is SELF's operand-dependent value. fields[0] specifies the
1694 base register field, fields[1] specifies the offset register field and
1695 fields[2] is a single-bit field that selects SXTW over UXTW. */
1696 bool
1697 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1698 aarch64_opnd_info *info, aarch64_insn code,
1699 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1700 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1702 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1703 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1704 info->addr.offset.is_reg = true;
1705 info->addr.writeback = false;
1706 info->addr.preind = true;
1707 if (extract_field (self->fields[2], code, 0))
1708 info->shifter.kind = AARCH64_MOD_SXTW;
1709 else
1710 info->shifter.kind = AARCH64_MOD_UXTW;
1711 info->shifter.amount = get_operand_specific_data (self);
1712 info->shifter.operator_present = true;
1713 info->shifter.amount_present = (info->shifter.amount != 0);
1714 return true;
1717 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1718 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1719 fields[0] specifies the base register field. */
1720 bool
1721 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1722 aarch64_opnd_info *info, aarch64_insn code,
1723 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1724 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1726 int offset = extract_field (FLD_imm5, code, 0);
1727 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1730 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1731 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1732 number. fields[0] specifies the base register field and fields[1]
1733 specifies the offset register field. */
1734 static bool
1735 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1736 aarch64_insn code, enum aarch64_modifier_kind kind)
1738 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1739 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1740 info->addr.offset.is_reg = true;
1741 info->addr.writeback = false;
1742 info->addr.preind = true;
1743 info->shifter.kind = kind;
1744 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1745 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1746 || info->shifter.amount != 0);
1747 info->shifter.amount_present = (info->shifter.amount != 0);
1748 return true;
1751 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1752 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1753 field and fields[1] specifies the offset register field. */
1754 bool
1755 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1756 aarch64_opnd_info *info, aarch64_insn code,
1757 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1758 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1760 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1763 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1764 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1765 field and fields[1] specifies the offset register field. */
1766 bool
1767 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1768 aarch64_opnd_info *info, aarch64_insn code,
1769 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1770 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1772 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1775 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1776 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1777 field and fields[1] specifies the offset register field. */
1778 bool
1779 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1780 aarch64_opnd_info *info, aarch64_insn code,
1781 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1782 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1784 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1787 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1788 has the raw field value and that the low 8 bits decode to VALUE. */
1789 static bool
1790 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1792 info->shifter.kind = AARCH64_MOD_LSL;
1793 info->shifter.amount = 0;
1794 if (info->imm.value & 0x100)
1796 if (value == 0)
1797 /* Decode 0x100 as #0, LSL #8. */
1798 info->shifter.amount = 8;
1799 else
1800 value *= 256;
1802 info->shifter.operator_present = (info->shifter.amount != 0);
1803 info->shifter.amount_present = (info->shifter.amount != 0);
1804 info->imm.value = value;
1805 return true;
1808 /* Decode an SVE ADD/SUB immediate. */
1809 bool
1810 aarch64_ext_sve_aimm (const aarch64_operand *self,
1811 aarch64_opnd_info *info, const aarch64_insn code,
1812 const aarch64_inst *inst,
1813 aarch64_operand_error *errors)
1815 return (aarch64_ext_imm (self, info, code, inst, errors)
1816 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1819 bool
1820 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1821 aarch64_opnd_info *info, aarch64_insn code,
1822 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1823 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1825 unsigned int num_regs = get_operand_specific_data (self);
1826 unsigned int val = extract_field (self->fields[0], code, 0);
1827 info->reglist.first_regno = val * num_regs;
1828 info->reglist.num_regs = num_regs;
1829 info->reglist.stride = 1;
1830 return true;
1833 /* Decode an SVE CPY/DUP immediate. */
1834 bool
1835 aarch64_ext_sve_asimm (const aarch64_operand *self,
1836 aarch64_opnd_info *info, const aarch64_insn code,
1837 const aarch64_inst *inst,
1838 aarch64_operand_error *errors)
1840 return (aarch64_ext_imm (self, info, code, inst, errors)
1841 && decode_sve_aimm (info, (int8_t) info->imm.value));
1844 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1845 The fields array specifies which field to use. */
1846 bool
1847 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1848 aarch64_opnd_info *info, aarch64_insn code,
1849 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1850 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1852 if (extract_field (self->fields[0], code, 0))
1853 info->imm.value = 0x3f800000;
1854 else
1855 info->imm.value = 0x3f000000;
1856 info->imm.is_fp = true;
1857 return true;
1860 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1861 The fields array specifies which field to use. */
1862 bool
1863 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1864 aarch64_opnd_info *info, aarch64_insn code,
1865 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1866 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1868 if (extract_field (self->fields[0], code, 0))
1869 info->imm.value = 0x40000000;
1870 else
1871 info->imm.value = 0x3f000000;
1872 info->imm.is_fp = true;
1873 return true;
1876 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1877 The fields array specifies which field to use. */
1878 bool
1879 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1880 aarch64_opnd_info *info, aarch64_insn code,
1881 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1882 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1884 if (extract_field (self->fields[0], code, 0))
1885 info->imm.value = 0x3f800000;
1886 else
1887 info->imm.value = 0x0;
1888 info->imm.is_fp = true;
1889 return true;
1892 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1893 immediate on numerous SME instruction fields such as MOVA. */
1894 bool
1895 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1896 aarch64_opnd_info *info, aarch64_insn code,
1897 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1898 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1900 int fld_size = extract_field (self->fields[0], code, 0);
1901 int fld_q = extract_field (self->fields[1], code, 0);
1902 int fld_v = extract_field (self->fields[2], code, 0);
1903 int fld_rv = extract_field (self->fields[3], code, 0);
1904 int fld_zan_imm = extract_field (self->fields[4], code, 0);
1906 /* Deduce qualifier encoded in size and Q fields. */
1907 if (fld_size == 0)
1909 info->indexed_za.regno = 0;
1910 info->indexed_za.index.imm = fld_zan_imm;
1912 else if (fld_size == 1)
1914 info->indexed_za.regno = fld_zan_imm >> 3;
1915 info->indexed_za.index.imm = fld_zan_imm & 0x07;
1917 else if (fld_size == 2)
1919 info->indexed_za.regno = fld_zan_imm >> 2;
1920 info->indexed_za.index.imm = fld_zan_imm & 0x03;
1922 else if (fld_size == 3 && fld_q == 0)
1924 info->indexed_za.regno = fld_zan_imm >> 1;
1925 info->indexed_za.index.imm = fld_zan_imm & 0x01;
1927 else if (fld_size == 3 && fld_q == 1)
1929 info->indexed_za.regno = fld_zan_imm;
1930 info->indexed_za.index.imm = 0;
1932 else
1933 return false;
1935 info->indexed_za.index.regno = fld_rv + 12;
1936 info->indexed_za.v = fld_v;
1938 return true;
1941 bool
1942 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
1943 aarch64_opnd_info *info, aarch64_insn code,
1944 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1945 aarch64_operand_error *errors
1946 ATTRIBUTE_UNUSED)
1948 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1949 int range_size = get_opcode_dependent_value (inst->opcode);
1950 int fld_v = extract_field (self->fields[0], code, 0);
1951 int fld_rv = extract_field (self->fields[1], code, 0);
1952 int fld_zan_imm = extract_field (self->fields[2], code, 0);
1953 int max_value = 16 / range_size / ebytes;
1955 if (max_value == 0)
1956 max_value = 1;
1958 int regno = fld_zan_imm / max_value;
1959 if (regno >= ebytes)
1960 return false;
1962 info->indexed_za.regno = regno;
1963 info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
1964 info->indexed_za.index.countm1 = range_size - 1;
1965 info->indexed_za.index.regno = fld_rv + 12;
1966 info->indexed_za.v = fld_v;
1968 return true;
1971 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
1972 separated by commas, encoded in the "imm8" field.
1974 For programmer convenience an assembler must also accept the names of
1975 32-bit, 16-bit and 8-bit element tiles which are converted into the
1976 corresponding set of 64-bit element tiles.
1978 bool
1979 aarch64_ext_sme_za_list (const aarch64_operand *self,
1980 aarch64_opnd_info *info, aarch64_insn code,
1981 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1982 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1984 int mask = extract_field (self->fields[0], code, 0);
1985 info->imm.value = mask;
1986 return true;
1989 /* Decode ZA array vector select register (Rv field), optional vector and
1990 memory offset (imm4_11 field).
1992 bool
1993 aarch64_ext_sme_za_array (const aarch64_operand *self,
1994 aarch64_opnd_info *info, aarch64_insn code,
1995 const aarch64_inst *inst,
1996 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1998 int regno = extract_field (self->fields[0], code, 0);
1999 if (info->type == AARCH64_OPND_SME_ZA_array_off4)
2000 regno += 12;
2001 else
2002 regno += 8;
2003 int imm = extract_field (self->fields[1], code, 0);
2004 int num_offsets = get_operand_specific_data (self);
2005 if (num_offsets == 0)
2006 num_offsets = 1;
2007 info->indexed_za.index.regno = regno;
2008 info->indexed_za.index.imm = imm * num_offsets;
2009 info->indexed_za.index.countm1 = num_offsets - 1;
2010 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2011 return true;
2014 /* Decode two ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2015 bool
2016 aarch64_ext_sme_za_vrs1 (const aarch64_operand *self,
2017 aarch64_opnd_info *info, aarch64_insn code,
2018 const aarch64_inst *inst,
2019 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2021 int v = extract_field (self->fields[0], code, 0);
2022 int regno = 12 + extract_field (self->fields[1], code, 0);
2023 int imm, za_reg, num_offset = 2;
2025 switch (info->qualifier)
2027 case AARCH64_OPND_QLF_S_B:
2028 imm = extract_field (self->fields[2], code, 0);
2029 info->indexed_za.index.imm = imm * num_offset;
2030 break;
2031 case AARCH64_OPND_QLF_S_H:
2032 case AARCH64_OPND_QLF_S_S:
2033 za_reg = extract_field (self->fields[2], code, 0);
2034 imm = extract_field (self->fields[3], code, 0);
2035 info->indexed_za.index.imm = imm * num_offset;
2036 info->indexed_za.regno = za_reg;
2037 break;
2038 case AARCH64_OPND_QLF_S_D:
2039 za_reg = extract_field (self->fields[2], code, 0);
2040 info->indexed_za.regno = za_reg;
2041 break;
2042 default:
2043 return false;
2046 info->indexed_za.index.regno = regno;
2047 info->indexed_za.index.countm1 = num_offset - 1;
2048 info->indexed_za.v = v;
2049 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2050 return true;
2053 /* Decode four ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2054 bool
2055 aarch64_ext_sme_za_vrs2 (const aarch64_operand *self,
2056 aarch64_opnd_info *info, aarch64_insn code,
2057 const aarch64_inst *inst,
2058 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2060 int v = extract_field (self->fields[0], code, 0);
2061 int regno = 12 + extract_field (self->fields[1], code, 0);
2062 int imm, za_reg, num_offset =4;
2064 switch (info->qualifier)
2066 case AARCH64_OPND_QLF_S_B:
2067 imm = extract_field (self->fields[2], code, 0);
2068 info->indexed_za.index.imm = imm * num_offset;
2069 break;
2070 case AARCH64_OPND_QLF_S_H:
2071 za_reg = extract_field (self->fields[2], code, 0);
2072 imm = extract_field (self->fields[3], code, 0);
2073 info->indexed_za.index.imm = imm * num_offset;
2074 info->indexed_za.regno = za_reg;
2075 break;
2076 case AARCH64_OPND_QLF_S_S:
2077 case AARCH64_OPND_QLF_S_D:
2078 za_reg = extract_field (self->fields[2], code, 0);
2079 info->indexed_za.regno = za_reg;
2080 break;
2081 default:
2082 return false;
2085 info->indexed_za.index.regno = regno;
2086 info->indexed_za.index.countm1 = num_offset - 1;
2087 info->indexed_za.v = v;
2088 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2089 return true;
2092 bool
2093 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
2094 aarch64_opnd_info *info, aarch64_insn code,
2095 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2096 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2098 int regno = extract_field (self->fields[0], code, 0);
2099 int imm = extract_field (self->fields[1], code, 0);
2100 info->addr.base_regno = regno;
2101 info->addr.offset.imm = imm;
2102 /* MUL VL operator is always present for this operand. */
2103 info->shifter.kind = AARCH64_MOD_MUL_VL;
2104 info->shifter.operator_present = (imm != 0);
2105 return true;
2108 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions. */
2109 bool
2110 aarch64_ext_sme_sm_za (const aarch64_operand *self,
2111 aarch64_opnd_info *info, aarch64_insn code,
2112 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2113 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2115 info->pstatefield = 0x1b;
2116 aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
2117 fld_crm >>= 1; /* CRm[3:1]. */
2119 if (fld_crm == 0x1)
2120 info->reg.regno = 's';
2121 else if (fld_crm == 0x2)
2122 info->reg.regno = 'z';
2123 else
2124 return false;
2126 return true;
2129 bool
2130 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
2131 aarch64_opnd_info *info, aarch64_insn code,
2132 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2133 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2135 aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
2136 aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
2137 aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
2138 aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
2139 aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
2140 int imm;
2142 info->indexed_za.regno = fld_pn;
2143 info->indexed_za.index.regno = fld_rm + 12;
2145 if (fld_tszl & 0x1)
2146 imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
2147 else if (fld_tszl & 0x2)
2148 imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
2149 else if (fld_tszl & 0x4)
2150 imm = (fld_i1 << 1) | fld_tszh;
2151 else if (fld_tszh)
2152 imm = fld_i1;
2153 else
2154 return false;
2156 info->indexed_za.index.imm = imm;
2157 return true;
2160 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
2161 array specifies which field to use for Zn. MM is encoded in the
2162 concatenation of imm5 and SVE_tszh, with imm5 being the less
2163 significant part. */
2164 bool
2165 aarch64_ext_sve_index (const aarch64_operand *self,
2166 aarch64_opnd_info *info, aarch64_insn code,
2167 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2168 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2170 int val;
2172 info->reglane.regno = extract_field (self->fields[0], code, 0);
2173 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
2174 if ((val & 31) == 0)
2175 return 0;
2176 while ((val & 1) == 0)
2177 val /= 2;
2178 info->reglane.index = val / 2;
2179 return true;
2182 /* Decode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
2183 than the number of elements in 128 bit, which can encode il:tsz. */
2184 bool
2185 aarch64_ext_sve_index_imm (const aarch64_operand *self,
2186 aarch64_opnd_info *info, aarch64_insn code,
2187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2190 int val;
2192 info->reglane.regno = extract_field (self->fields[0], code, 0);
2193 val = extract_fields (code, 0, 2, self->fields[2], self->fields[1]);
2194 if ((val & 15) == 0)
2195 return 0;
2196 while ((val & 1) == 0)
2197 val /= 2;
2198 info->reglane.index = val / 2;
2199 return true;
2202 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
2203 bool
2204 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2205 aarch64_opnd_info *info, const aarch64_insn code,
2206 const aarch64_inst *inst,
2207 aarch64_operand_error *errors)
2209 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2210 return (aarch64_ext_limm (self, info, code, inst, errors)
2211 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2214 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2215 and where MM occupies the most-significant part. The operand-dependent
2216 value specifies the number of bits in Zn. */
2217 bool
2218 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2219 aarch64_opnd_info *info, aarch64_insn code,
2220 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2221 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2223 unsigned int reg_bits = get_operand_specific_data (self);
2224 unsigned int val = extract_all_fields (self, code);
2225 info->reglane.regno = val & ((1 << reg_bits) - 1);
2226 info->reglane.index = val >> reg_bits;
2227 return true;
2230 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
2231 to use for Zn. The opcode-dependent value specifies the number
2232 of registers in the list. */
2233 bool
2234 aarch64_ext_sve_reglist (const aarch64_operand *self,
2235 aarch64_opnd_info *info, aarch64_insn code,
2236 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2237 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2239 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2240 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2241 info->reglist.stride = 1;
2242 return true;
2245 /* Decode {Zn.<T> , Zm.<T>}. The fields array specifies which field
2246 to use for Zn. The opcode-dependent value specifies the number
2247 of registers in the list. */
2248 bool
2249 aarch64_ext_sve_reglist_zt (const aarch64_operand *self,
2250 aarch64_opnd_info *info, aarch64_insn code,
2251 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2252 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2254 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2255 info->reglist.num_regs = get_operand_specific_data (self);
2256 info->reglist.stride = 1;
2257 return true;
2260 /* Decode a strided register list. The first field holds the top bit
2261 (0 or 16) and the second field holds the lower bits. The stride is
2262 16 divided by the list length. */
2263 bool
2264 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2265 aarch64_opnd_info *info, aarch64_insn code,
2266 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2267 aarch64_operand_error *errors
2268 ATTRIBUTE_UNUSED)
2270 unsigned int upper = extract_field (self->fields[0], code, 0);
2271 unsigned int lower = extract_field (self->fields[1], code, 0);
2272 info->reglist.first_regno = upper * 16 + lower;
2273 info->reglist.num_regs = get_operand_specific_data (self);
2274 info->reglist.stride = 16 / info->reglist.num_regs;
2275 return true;
2278 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
2279 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
2280 field. */
2281 bool
2282 aarch64_ext_sve_scale (const aarch64_operand *self,
2283 aarch64_opnd_info *info, aarch64_insn code,
2284 const aarch64_inst *inst, aarch64_operand_error *errors)
2286 int val;
2288 if (!aarch64_ext_imm (self, info, code, inst, errors))
2289 return false;
2290 val = extract_field (FLD_SVE_imm4, code, 0);
2291 info->shifter.kind = AARCH64_MOD_MUL;
2292 info->shifter.amount = val + 1;
2293 info->shifter.operator_present = (val != 0);
2294 info->shifter.amount_present = (val != 0);
2295 return true;
2298 /* Return the top set bit in VALUE, which is expected to be relatively
2299 small. */
2300 static uint64_t
2301 get_top_bit (uint64_t value)
2303 while ((value & -value) != value)
2304 value -= value & -value;
2305 return value;
2308 /* Decode an SVE shift-left immediate. */
2309 bool
2310 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2311 aarch64_opnd_info *info, const aarch64_insn code,
2312 const aarch64_inst *inst, aarch64_operand_error *errors)
2314 if (!aarch64_ext_imm (self, info, code, inst, errors)
2315 || info->imm.value == 0)
2316 return false;
2318 info->imm.value -= get_top_bit (info->imm.value);
2319 return true;
2322 /* Decode an SVE shift-right immediate. */
2323 bool
2324 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2325 aarch64_opnd_info *info, const aarch64_insn code,
2326 const aarch64_inst *inst, aarch64_operand_error *errors)
2328 if (!aarch64_ext_imm (self, info, code, inst, errors)
2329 || info->imm.value == 0)
2330 return false;
2332 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2333 return true;
2336 /* Decode X0-X30. Register 31 is unallocated. */
2337 bool
2338 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2339 const aarch64_insn code,
2340 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2341 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2343 info->reg.regno = extract_field (self->fields[0], code, 0);
2344 return info->reg.regno <= 30;
2347 /* Decode an indexed register, with the first field being the register
2348 number and the remaining fields being the index. */
2349 bool
2350 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2351 const aarch64_insn code,
2352 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2353 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2355 int bias = get_operand_specific_data (self);
2356 info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2357 info->reglane.index = extract_all_fields_after (self, 1, code);
2358 return true;
2361 /* Decode a plain shift-right immediate, when there is only a single
2362 element size. */
2363 bool
2364 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2365 const aarch64_insn code,
2366 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2367 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2369 unsigned int base = 1 << get_operand_field_width (self, 0);
2370 info->imm.value = base - extract_field (self->fields[0], code, 0);
2371 return true;
2374 /* Bitfields that are commonly used to encode certain operands' information
2375 may be partially used as part of the base opcode in some instructions.
2376 For example, the bit 1 of the field 'size' in
2377 FCVTXN <Vb><d>, <Va><n>
2378 is actually part of the base opcode, while only size<0> is available
2379 for encoding the register type. Another example is the AdvSIMD
2380 instruction ORR (register), in which the field 'size' is also used for
2381 the base opcode, leaving only the field 'Q' available to encode the
2382 vector register arrangement specifier '8B' or '16B'.
2384 This function tries to deduce the qualifier from the value of partially
2385 constrained field(s). Given the VALUE of such a field or fields, the
2386 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2387 operand encoding), the function returns the matching qualifier or
2388 AARCH64_OPND_QLF_NIL if nothing matches.
2390 N.B. CANDIDATES is a group of possible qualifiers that are valid for
2391 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2392 may end with AARCH64_OPND_QLF_NIL. */
2394 static enum aarch64_opnd_qualifier
2395 get_qualifier_from_partial_encoding (aarch64_insn value,
2396 const enum aarch64_opnd_qualifier* \
2397 candidates,
2398 aarch64_insn mask)
2400 int i;
2401 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2402 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2404 aarch64_insn standard_value;
2405 if (candidates[i] == AARCH64_OPND_QLF_NIL)
2406 break;
2407 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2408 if ((standard_value & mask) == (value & mask))
2409 return candidates[i];
2411 return AARCH64_OPND_QLF_NIL;
2414 /* Given a list of qualifier sequences, return all possible valid qualifiers
2415 for operand IDX in QUALIFIERS.
2416 Assume QUALIFIERS is an array whose length is large enough. */
2418 static void
2419 get_operand_possible_qualifiers (int idx,
2420 const aarch64_opnd_qualifier_seq_t *list,
2421 enum aarch64_opnd_qualifier *qualifiers)
2423 int i;
2424 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2425 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2426 break;
2429 /* Decode the size Q field for e.g. SHADD.
2430 We tag one operand with the qualifer according to the code;
2431 whether the qualifier is valid for this opcode or not, it is the
2432 duty of the semantic checking. */
2434 static int
2435 decode_sizeq (aarch64_inst *inst)
2437 int idx;
2438 enum aarch64_opnd_qualifier qualifier;
2439 aarch64_insn code;
2440 aarch64_insn value, mask;
2441 enum aarch64_field_kind fld_sz;
2442 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2444 if (inst->opcode->iclass == asisdlse
2445 || inst->opcode->iclass == asisdlsep
2446 || inst->opcode->iclass == asisdlso
2447 || inst->opcode->iclass == asisdlsop)
2448 fld_sz = FLD_vldst_size;
2449 else
2450 fld_sz = FLD_size;
2452 code = inst->value;
2453 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2454 /* Obtain the info that which bits of fields Q and size are actually
2455 available for operand encoding. Opcodes like FMAXNM and FMLA have
2456 size[1] unavailable. */
2457 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2459 /* The index of the operand we are going to tag a qualifier and the qualifer
2460 itself are reasoned from the value of the size and Q fields and the
2461 possible valid qualifier lists. */
2462 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2463 DEBUG_TRACE ("key idx: %d", idx);
2465 /* For most related instruciton, size:Q are fully available for operand
2466 encoding. */
2467 if (mask == 0x7)
2469 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2470 return 1;
2473 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2474 candidates);
2475 #ifdef DEBUG_AARCH64
2476 if (debug_dump)
2478 int i;
2479 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2480 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2481 DEBUG_TRACE ("qualifier %d: %s", i,
2482 aarch64_get_qualifier_name(candidates[i]));
2483 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2485 #endif /* DEBUG_AARCH64 */
2487 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2489 if (qualifier == AARCH64_OPND_QLF_NIL)
2490 return 0;
2492 inst->operands[idx].qualifier = qualifier;
2493 return 1;
2496 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2497 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2499 static int
2500 decode_asimd_fcvt (aarch64_inst *inst)
2502 aarch64_field field = {0, 0};
2503 aarch64_insn value;
2504 enum aarch64_opnd_qualifier qualifier;
2506 gen_sub_field (FLD_size, 0, 1, &field);
2507 value = extract_field_2 (&field, inst->value, 0);
2508 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2509 : AARCH64_OPND_QLF_V_2D;
2510 switch (inst->opcode->op)
2512 case OP_FCVTN:
2513 case OP_FCVTN2:
2514 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2515 inst->operands[1].qualifier = qualifier;
2516 break;
2517 case OP_FCVTL:
2518 case OP_FCVTL2:
2519 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2520 inst->operands[0].qualifier = qualifier;
2521 break;
2522 default:
2523 return 0;
2526 return 1;
2529 /* Decode size[0], i.e. bit 22, for
2530 e.g. FCVTXN <Vb><d>, <Va><n>. */
2532 static int
2533 decode_asisd_fcvtxn (aarch64_inst *inst)
2535 aarch64_field field = {0, 0};
2536 gen_sub_field (FLD_size, 0, 1, &field);
2537 if (!extract_field_2 (&field, inst->value, 0))
2538 return 0;
2539 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2540 return 1;
2543 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2544 static int
2545 decode_fcvt (aarch64_inst *inst)
2547 enum aarch64_opnd_qualifier qualifier;
2548 aarch64_insn value;
2549 const aarch64_field field = {15, 2};
2551 /* opc dstsize */
2552 value = extract_field_2 (&field, inst->value, 0);
2553 switch (value)
2555 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2556 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2557 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2558 default: return 0;
2560 inst->operands[0].qualifier = qualifier;
2562 return 1;
2565 /* Do miscellaneous decodings that are not common enough to be driven by
2566 flags. */
2568 static int
2569 do_misc_decoding (aarch64_inst *inst)
2571 unsigned int value;
2572 switch (inst->opcode->op)
2574 case OP_FCVT:
2575 return decode_fcvt (inst);
2577 case OP_FCVTN:
2578 case OP_FCVTN2:
2579 case OP_FCVTL:
2580 case OP_FCVTL2:
2581 return decode_asimd_fcvt (inst);
2583 case OP_FCVTXN_S:
2584 return decode_asisd_fcvtxn (inst);
2586 case OP_MOV_P_P:
2587 case OP_MOVS_P_P:
2588 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2589 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2590 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2592 case OP_MOV_Z_P_Z:
2593 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2594 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2596 case OP_MOV_Z_V:
2597 /* Index must be zero. */
2598 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2599 return value > 0 && value <= 16 && value == (value & -value);
2601 case OP_MOV_Z_Z:
2602 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2603 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2605 case OP_MOV_Z_Zi:
2606 /* Index must be nonzero. */
2607 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2608 return value > 0 && value != (value & -value);
2610 case OP_MOVM_P_P_P:
2611 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2612 == extract_field (FLD_SVE_Pm, inst->value, 0));
2614 case OP_MOVZS_P_P_P:
2615 case OP_MOVZ_P_P_P:
2616 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2617 == extract_field (FLD_SVE_Pm, inst->value, 0));
2619 case OP_NOTS_P_P_P_Z:
2620 case OP_NOT_P_P_P_Z:
2621 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2622 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2624 default:
2625 return 0;
2629 /* Opcodes that have fields shared by multiple operands are usually flagged
2630 with flags. In this function, we detect such flags, decode the related
2631 field(s) and store the information in one of the related operands. The
2632 'one' operand is not any operand but one of the operands that can
2633 accommadate all the information that has been decoded. */
2635 static int
2636 do_special_decoding (aarch64_inst *inst)
2638 int idx;
2639 aarch64_insn value;
2640 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2641 if (inst->opcode->flags & F_COND)
2643 value = extract_field (FLD_cond2, inst->value, 0);
2644 inst->cond = get_cond_from_value (value);
2646 /* 'sf' field. */
2647 if (inst->opcode->flags & F_SF)
2649 idx = select_operand_for_sf_field_coding (inst->opcode);
2650 value = extract_field (FLD_sf, inst->value, 0);
2651 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2652 if ((inst->opcode->flags & F_N)
2653 && extract_field (FLD_N, inst->value, 0) != value)
2654 return 0;
2656 /* 'sf' field. */
2657 if (inst->opcode->flags & F_LSE_SZ)
2659 idx = select_operand_for_sf_field_coding (inst->opcode);
2660 value = extract_field (FLD_lse_sz, inst->value, 0);
2661 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2663 /* rcpc3 'size' field. */
2664 if (inst->opcode->flags & F_RCPC3_SIZE)
2666 value = extract_field (FLD_rcpc3_size, inst->value, 0);
2667 for (int i = 0;
2668 aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2669 i++)
2671 if (aarch64_operands[inst->operands[i].type].op_class
2672 == AARCH64_OPND_CLASS_INT_REG)
2673 inst->operands[i].qualifier = get_greg_qualifier_from_value (value & 1);
2674 else if (aarch64_operands[inst->operands[i].type].op_class
2675 == AARCH64_OPND_CLASS_FP_REG)
2677 value += (extract_field (FLD_opc1, inst->value, 0) << 2);
2678 inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2683 /* size:Q fields. */
2684 if (inst->opcode->flags & F_SIZEQ)
2685 return decode_sizeq (inst);
2687 if (inst->opcode->flags & F_FPTYPE)
2689 idx = select_operand_for_fptype_field_coding (inst->opcode);
2690 value = extract_field (FLD_type, inst->value, 0);
2691 switch (value)
2693 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2694 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2695 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2696 default: return 0;
2700 if (inst->opcode->flags & F_SSIZE)
2702 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2703 of the base opcode. */
2704 aarch64_insn mask;
2705 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2706 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2707 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2708 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2709 /* For most related instruciton, the 'size' field is fully available for
2710 operand encoding. */
2711 if (mask == 0x3)
2712 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2713 else
2715 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2716 candidates);
2717 inst->operands[idx].qualifier
2718 = get_qualifier_from_partial_encoding (value, candidates, mask);
2722 if (inst->opcode->flags & F_T)
2724 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2725 int num = 0;
2726 unsigned val, Q;
2727 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2728 == AARCH64_OPND_CLASS_SIMD_REG);
2729 /* imm5<3:0> q <t>
2730 0000 x reserved
2731 xxx1 0 8b
2732 xxx1 1 16b
2733 xx10 0 4h
2734 xx10 1 8h
2735 x100 0 2s
2736 x100 1 4s
2737 1000 0 reserved
2738 1000 1 2d */
2739 val = extract_field (FLD_imm5, inst->value, 0);
2740 while ((val & 0x1) == 0 && ++num <= 3)
2741 val >>= 1;
2742 if (num > 3)
2743 return 0;
2744 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2745 inst->operands[0].qualifier =
2746 get_vreg_qualifier_from_value ((num << 1) | Q);
2749 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2751 unsigned size;
2752 size = (unsigned) extract_field (FLD_size, inst->value,
2753 inst->opcode->mask);
2754 inst->operands[0].qualifier
2755 = get_vreg_qualifier_from_value (1 + (size << 1));
2756 inst->operands[2].qualifier = get_sreg_qualifier_from_value (size);
2759 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2761 /* Use Rt to encode in the case of e.g.
2762 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2763 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2764 if (idx == -1)
2766 /* Otherwise use the result operand, which has to be a integer
2767 register. */
2768 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2769 == AARCH64_OPND_CLASS_INT_REG);
2770 idx = 0;
2772 assert (idx == 0 || idx == 1);
2773 value = extract_field (FLD_Q, inst->value, 0);
2774 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2777 if (inst->opcode->flags & F_LDS_SIZE)
2779 aarch64_field field = {0, 0};
2780 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2781 == AARCH64_OPND_CLASS_INT_REG);
2782 gen_sub_field (FLD_opc, 0, 1, &field);
2783 value = extract_field_2 (&field, inst->value, 0);
2784 inst->operands[0].qualifier
2785 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2788 /* Miscellaneous decoding; done as the last step. */
2789 if (inst->opcode->flags & F_MISC)
2790 return do_misc_decoding (inst);
2792 return 1;
2795 /* Converters converting a real opcode instruction to its alias form. */
2797 /* ROR <Wd>, <Ws>, #<shift>
2798 is equivalent to:
2799 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2800 static int
2801 convert_extr_to_ror (aarch64_inst *inst)
2803 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2805 copy_operand_info (inst, 2, 3);
2806 inst->operands[3].type = AARCH64_OPND_NIL;
2807 return 1;
2809 return 0;
2812 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2813 is equivalent to:
2814 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2815 static int
2816 convert_shll_to_xtl (aarch64_inst *inst)
2818 if (inst->operands[2].imm.value == 0)
2820 inst->operands[2].type = AARCH64_OPND_NIL;
2821 return 1;
2823 return 0;
2826 /* Convert
2827 UBFM <Xd>, <Xn>, #<shift>, #63.
2829 LSR <Xd>, <Xn>, #<shift>. */
2830 static int
2831 convert_bfm_to_sr (aarch64_inst *inst)
2833 int64_t imms, val;
2835 imms = inst->operands[3].imm.value;
2836 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2837 if (imms == val)
2839 inst->operands[3].type = AARCH64_OPND_NIL;
2840 return 1;
2843 return 0;
2846 /* Convert MOV to ORR. */
2847 static int
2848 convert_orr_to_mov (aarch64_inst *inst)
2850 /* MOV <Vd>.<T>, <Vn>.<T>
2851 is equivalent to:
2852 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2853 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2855 inst->operands[2].type = AARCH64_OPND_NIL;
2856 return 1;
2858 return 0;
2861 /* When <imms> >= <immr>, the instruction written:
2862 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2863 is equivalent to:
2864 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2866 static int
2867 convert_bfm_to_bfx (aarch64_inst *inst)
2869 int64_t immr, imms;
2871 immr = inst->operands[2].imm.value;
2872 imms = inst->operands[3].imm.value;
2873 if (imms >= immr)
2875 int64_t lsb = immr;
2876 inst->operands[2].imm.value = lsb;
2877 inst->operands[3].imm.value = imms + 1 - lsb;
2878 /* The two opcodes have different qualifiers for
2879 the immediate operands; reset to help the checking. */
2880 reset_operand_qualifier (inst, 2);
2881 reset_operand_qualifier (inst, 3);
2882 return 1;
2885 return 0;
2888 /* When <imms> < <immr>, the instruction written:
2889 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2890 is equivalent to:
2891 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2893 static int
2894 convert_bfm_to_bfi (aarch64_inst *inst)
2896 int64_t immr, imms, val;
2898 immr = inst->operands[2].imm.value;
2899 imms = inst->operands[3].imm.value;
2900 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2901 if (imms < immr)
2903 inst->operands[2].imm.value = (val - immr) & (val - 1);
2904 inst->operands[3].imm.value = imms + 1;
2905 /* The two opcodes have different qualifiers for
2906 the immediate operands; reset to help the checking. */
2907 reset_operand_qualifier (inst, 2);
2908 reset_operand_qualifier (inst, 3);
2909 return 1;
2912 return 0;
2915 /* The instruction written:
2916 BFC <Xd>, #<lsb>, #<width>
2917 is equivalent to:
2918 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2920 static int
2921 convert_bfm_to_bfc (aarch64_inst *inst)
2923 int64_t immr, imms, val;
2925 /* Should have been assured by the base opcode value. */
2926 assert (inst->operands[1].reg.regno == 0x1f);
2928 immr = inst->operands[2].imm.value;
2929 imms = inst->operands[3].imm.value;
2930 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2931 if (imms < immr)
2933 /* Drop XZR from the second operand. */
2934 copy_operand_info (inst, 1, 2);
2935 copy_operand_info (inst, 2, 3);
2936 inst->operands[3].type = AARCH64_OPND_NIL;
2938 /* Recalculate the immediates. */
2939 inst->operands[1].imm.value = (val - immr) & (val - 1);
2940 inst->operands[2].imm.value = imms + 1;
2942 /* The two opcodes have different qualifiers for the operands; reset to
2943 help the checking. */
2944 reset_operand_qualifier (inst, 1);
2945 reset_operand_qualifier (inst, 2);
2946 reset_operand_qualifier (inst, 3);
2948 return 1;
2951 return 0;
2954 /* The instruction written:
2955 LSL <Xd>, <Xn>, #<shift>
2956 is equivalent to:
2957 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2959 static int
2960 convert_ubfm_to_lsl (aarch64_inst *inst)
2962 int64_t immr = inst->operands[2].imm.value;
2963 int64_t imms = inst->operands[3].imm.value;
2964 int64_t val
2965 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2967 if ((immr == 0 && imms == val) || immr == imms + 1)
2969 inst->operands[3].type = AARCH64_OPND_NIL;
2970 inst->operands[2].imm.value = val - imms;
2971 return 1;
2974 return 0;
2977 /* CINC <Wd>, <Wn>, <cond>
2978 is equivalent to:
2979 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2980 where <cond> is not AL or NV. */
2982 static int
2983 convert_from_csel (aarch64_inst *inst)
2985 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2986 && (inst->operands[3].cond->value & 0xe) != 0xe)
2988 copy_operand_info (inst, 2, 3);
2989 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2990 inst->operands[3].type = AARCH64_OPND_NIL;
2991 return 1;
2993 return 0;
2996 /* CSET <Wd>, <cond>
2997 is equivalent to:
2998 CSINC <Wd>, WZR, WZR, invert(<cond>)
2999 where <cond> is not AL or NV. */
3001 static int
3002 convert_csinc_to_cset (aarch64_inst *inst)
3004 if (inst->operands[1].reg.regno == 0x1f
3005 && inst->operands[2].reg.regno == 0x1f
3006 && (inst->operands[3].cond->value & 0xe) != 0xe)
3008 copy_operand_info (inst, 1, 3);
3009 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
3010 inst->operands[3].type = AARCH64_OPND_NIL;
3011 inst->operands[2].type = AARCH64_OPND_NIL;
3012 return 1;
3014 return 0;
3017 /* MOV <Wd>, #<imm>
3018 is equivalent to:
3019 MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
3021 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3022 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3023 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3024 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3025 machine-instruction mnemonic must be used. */
3027 static int
3028 convert_movewide_to_mov (aarch64_inst *inst)
3030 uint64_t value = inst->operands[1].imm.value;
3031 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
3032 if (value == 0 && inst->operands[1].shifter.amount != 0)
3033 return 0;
3034 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3035 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
3036 value <<= inst->operands[1].shifter.amount;
3037 /* As an alias convertor, it has to be clear that the INST->OPCODE
3038 is the opcode of the real instruction. */
3039 if (inst->opcode->op == OP_MOVN)
3041 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3042 value = ~value;
3043 /* A MOVN has an immediate that could be encoded by MOVZ. */
3044 if (aarch64_wide_constant_p (value, is32, NULL))
3045 return 0;
3047 inst->operands[1].imm.value = value;
3048 inst->operands[1].shifter.amount = 0;
3049 return 1;
3052 /* MOV <Wd>, #<imm>
3053 is equivalent to:
3054 ORR <Wd>, WZR, #<imm>.
3056 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3057 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3058 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3059 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3060 machine-instruction mnemonic must be used. */
3062 static int
3063 convert_movebitmask_to_mov (aarch64_inst *inst)
3065 int is32;
3066 uint64_t value;
3068 /* Should have been assured by the base opcode value. */
3069 assert (inst->operands[1].reg.regno == 0x1f);
3070 copy_operand_info (inst, 1, 2);
3071 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3072 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3073 value = inst->operands[1].imm.value;
3074 /* ORR has an immediate that could be generated by a MOVZ or MOVN
3075 instruction. */
3076 if (inst->operands[0].reg.regno != 0x1f
3077 && (aarch64_wide_constant_p (value, is32, NULL)
3078 || aarch64_wide_constant_p (~value, is32, NULL)))
3079 return 0;
3081 inst->operands[2].type = AARCH64_OPND_NIL;
3082 return 1;
3085 /* Some alias opcodes are disassembled by being converted from their real-form.
3086 N.B. INST->OPCODE is the real opcode rather than the alias. */
3088 static int
3089 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
3091 switch (alias->op)
3093 case OP_ASR_IMM:
3094 case OP_LSR_IMM:
3095 return convert_bfm_to_sr (inst);
3096 case OP_LSL_IMM:
3097 return convert_ubfm_to_lsl (inst);
3098 case OP_CINC:
3099 case OP_CINV:
3100 case OP_CNEG:
3101 return convert_from_csel (inst);
3102 case OP_CSET:
3103 case OP_CSETM:
3104 return convert_csinc_to_cset (inst);
3105 case OP_UBFX:
3106 case OP_BFXIL:
3107 case OP_SBFX:
3108 return convert_bfm_to_bfx (inst);
3109 case OP_SBFIZ:
3110 case OP_BFI:
3111 case OP_UBFIZ:
3112 return convert_bfm_to_bfi (inst);
3113 case OP_BFC:
3114 return convert_bfm_to_bfc (inst);
3115 case OP_MOV_V:
3116 return convert_orr_to_mov (inst);
3117 case OP_MOV_IMM_WIDE:
3118 case OP_MOV_IMM_WIDEN:
3119 return convert_movewide_to_mov (inst);
3120 case OP_MOV_IMM_LOG:
3121 return convert_movebitmask_to_mov (inst);
3122 case OP_ROR_IMM:
3123 return convert_extr_to_ror (inst);
3124 case OP_SXTL:
3125 case OP_SXTL2:
3126 case OP_UXTL:
3127 case OP_UXTL2:
3128 return convert_shll_to_xtl (inst);
3129 default:
3130 return 0;
3134 static bool
3135 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
3136 aarch64_inst *, int, aarch64_operand_error *errors);
3138 /* Given the instruction information in *INST, check if the instruction has
3139 any alias form that can be used to represent *INST. If the answer is yes,
3140 update *INST to be in the form of the determined alias. */
3142 /* In the opcode description table, the following flags are used in opcode
3143 entries to help establish the relations between the real and alias opcodes:
3145 F_ALIAS: opcode is an alias
3146 F_HAS_ALIAS: opcode has alias(es)
3147 F_P1
3148 F_P2
3149 F_P3: Disassembly preference priority 1-3 (the larger the
3150 higher). If nothing is specified, it is the priority
3151 0 by default, i.e. the lowest priority.
3153 Although the relation between the machine and the alias instructions are not
3154 explicitly described, it can be easily determined from the base opcode
3155 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
3156 description entries:
3158 The mask of an alias opcode must be equal to or a super-set (i.e. more
3159 constrained) of that of the aliased opcode; so is the base opcode value.
3161 if (opcode_has_alias (real) && alias_opcode_p (opcode)
3162 && (opcode->mask & real->mask) == real->mask
3163 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
3164 then OPCODE is an alias of, and only of, the REAL instruction
3166 The alias relationship is forced flat-structured to keep related algorithm
3167 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
3169 During the disassembling, the decoding decision tree (in
3170 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
3171 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
3172 not specified), the disassembler will check whether there is any alias
3173 instruction exists for this real instruction. If there is, the disassembler
3174 will try to disassemble the 32-bit binary again using the alias's rule, or
3175 try to convert the IR to the form of the alias. In the case of the multiple
3176 aliases, the aliases are tried one by one from the highest priority
3177 (currently the flag F_P3) to the lowest priority (no priority flag), and the
3178 first succeeds first adopted.
3180 You may ask why there is a need for the conversion of IR from one form to
3181 another in handling certain aliases. This is because on one hand it avoids
3182 adding more operand code to handle unusual encoding/decoding; on other
3183 hand, during the disassembling, the conversion is an effective approach to
3184 check the condition of an alias (as an alias may be adopted only if certain
3185 conditions are met).
3187 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
3188 aarch64_opcode_table and generated aarch64_find_alias_opcode and
3189 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
3191 static void
3192 determine_disassembling_preference (struct aarch64_inst *inst,
3193 aarch64_operand_error *errors)
3195 const aarch64_opcode *opcode;
3196 const aarch64_opcode *alias;
3198 opcode = inst->opcode;
3200 /* This opcode does not have an alias, so use itself. */
3201 if (!opcode_has_alias (opcode))
3202 return;
3204 alias = aarch64_find_alias_opcode (opcode);
3205 assert (alias);
3207 #ifdef DEBUG_AARCH64
3208 if (debug_dump)
3210 const aarch64_opcode *tmp = alias;
3211 printf ("#### LIST orderd: ");
3212 while (tmp)
3214 printf ("%s, ", tmp->name);
3215 tmp = aarch64_find_next_alias_opcode (tmp);
3217 printf ("\n");
3219 #endif /* DEBUG_AARCH64 */
3221 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
3223 DEBUG_TRACE ("try %s", alias->name);
3224 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
3226 /* An alias can be a pseudo opcode which will never be used in the
3227 disassembly, e.g. BIC logical immediate is such a pseudo opcode
3228 aliasing AND. */
3229 if (pseudo_opcode_p (alias))
3231 DEBUG_TRACE ("skip pseudo %s", alias->name);
3232 continue;
3235 if ((inst->value & alias->mask) != alias->opcode)
3237 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3238 continue;
3241 if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3243 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3244 continue;
3247 /* No need to do any complicated transformation on operands, if the alias
3248 opcode does not have any operand. */
3249 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3251 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3252 aarch64_replace_opcode (inst, alias);
3253 return;
3255 if (alias->flags & F_CONV)
3257 aarch64_inst copy;
3258 memcpy (&copy, inst, sizeof (aarch64_inst));
3259 /* ALIAS is the preference as long as the instruction can be
3260 successfully converted to the form of ALIAS. */
3261 if (convert_to_alias (&copy, alias) == 1)
3263 aarch64_replace_opcode (&copy, alias);
3264 if (aarch64_match_operands_constraint (&copy, NULL) != 1)
3266 DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3268 else
3270 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3271 memcpy (inst, &copy, sizeof (aarch64_inst));
3273 return;
3276 else
3278 /* Directly decode the alias opcode. */
3279 aarch64_inst temp;
3280 memset (&temp, '\0', sizeof (aarch64_inst));
3281 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3283 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3284 memcpy (inst, &temp, sizeof (aarch64_inst));
3285 return;
3291 /* Some instructions (including all SVE ones) use the instruction class
3292 to describe how a qualifiers_list index is represented in the instruction
3293 encoding. If INST is such an instruction, decode the appropriate fields
3294 and fill in the operand qualifiers accordingly. Return true if no
3295 problems are found. */
3297 static bool
3298 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3300 int i, variant;
3302 variant = 0;
3303 switch (inst->opcode->iclass)
3305 case sme_mov:
3306 variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3307 if (variant >= 4 && variant < 7)
3308 return false;
3309 if (variant == 7)
3310 variant = 4;
3311 break;
3313 case sme_psel:
3314 i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3315 if (i == 0)
3316 return false;
3317 while ((i & 1) == 0)
3319 i >>= 1;
3320 variant += 1;
3322 break;
3324 case sme_shift:
3325 i = extract_field (FLD_SVE_tszh, inst->value, 0);
3326 goto sve_shift;
3328 case sme_size_12_bhs:
3329 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3330 if (variant >= 3)
3331 return false;
3332 break;
3334 case sme_size_12_hs:
3335 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3336 if (variant != 1 && variant != 2)
3337 return false;
3338 variant -= 1;
3339 break;
3341 case sme_size_22:
3342 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3343 break;
3345 case sme_size_22_hsd:
3346 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3347 if (variant < 1)
3348 return false;
3349 variant -= 1;
3350 break;
3352 case sme_sz_23:
3353 variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3354 break;
3356 case sve_cpy:
3357 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3358 break;
3360 case sve_index:
3361 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
3362 if ((i & 31) == 0)
3363 return false;
3364 while ((i & 1) == 0)
3366 i >>= 1;
3367 variant += 1;
3369 break;
3371 case sve_index1:
3372 i = extract_fields (inst->value, 0, 2, FLD_SVE_tsz, FLD_SVE_i2h);
3373 if ((i & 15) == 0)
3374 return false;
3375 while ((i & 1) == 0)
3377 i >>= 1;
3378 variant += 1;
3380 break;
3382 case sve_limm:
3383 /* Pick the smallest applicable element size. */
3384 if ((inst->value & 0x20600) == 0x600)
3385 variant = 0;
3386 else if ((inst->value & 0x20400) == 0x400)
3387 variant = 1;
3388 else if ((inst->value & 0x20000) == 0)
3389 variant = 2;
3390 else
3391 variant = 3;
3392 break;
3394 case sme2_mov:
3395 /* .D is preferred over the other sizes in disassembly. */
3396 variant = 3;
3397 break;
3399 case sme2_movaz:
3400 case sme_misc:
3401 case sve_misc:
3402 /* These instructions have only a single variant. */
3403 break;
3405 case sve_movprfx:
3406 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3407 break;
3409 case sve_pred_zm:
3410 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3411 break;
3413 case sve_shift_pred:
3414 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3415 sve_shift:
3416 if (i == 0)
3417 return false;
3418 while (i != 1)
3420 i >>= 1;
3421 variant += 1;
3423 break;
3425 case sve_shift_unpred:
3426 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3427 goto sve_shift;
3429 case sve_size_bhs:
3430 variant = extract_field (FLD_size, inst->value, 0);
3431 if (variant >= 3)
3432 return false;
3433 break;
3435 case sve_size_bhsd:
3436 variant = extract_field (FLD_size, inst->value, 0);
3437 break;
3439 case sve_size_hsd:
3440 i = extract_field (FLD_size, inst->value, 0);
3441 if (i < 1)
3442 return false;
3443 variant = i - 1;
3444 break;
3446 case sme_fp_sd:
3447 case sme_int_sd:
3448 case sve_size_bh:
3449 case sve_size_sd:
3450 variant = extract_field (FLD_SVE_sz, inst->value, 0);
3451 break;
3453 case sve_size_sd2:
3454 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3455 break;
3457 case sve_size_hsd2:
3458 i = extract_field (FLD_SVE_size, inst->value, 0);
3459 if (i < 1)
3460 return false;
3461 variant = i - 1;
3462 break;
3464 case sve_size_13:
3465 /* Ignore low bit of this field since that is set in the opcode for
3466 instructions of this iclass. */
3467 i = (extract_field (FLD_size, inst->value, 0) & 2);
3468 variant = (i >> 1);
3469 break;
3471 case sve_shift_tsz_bhsd:
3472 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3473 if (i == 0)
3474 return false;
3475 while (i != 1)
3477 i >>= 1;
3478 variant += 1;
3480 break;
3482 case sve_size_tsz_bhs:
3483 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3484 if (i == 0)
3485 return false;
3486 while (i != 1)
3488 if (i & 1)
3489 return false;
3490 i >>= 1;
3491 variant += 1;
3493 break;
3495 case sve_shift_tsz_hsd:
3496 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3497 if (i == 0)
3498 return false;
3499 while (i != 1)
3501 i >>= 1;
3502 variant += 1;
3504 break;
3506 default:
3507 /* No mapping between instruction class and qualifiers. */
3508 return true;
3511 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3512 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3513 return true;
3515 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
3516 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3517 return 1.
3519 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3520 determined and used to disassemble CODE; this is done just before the
3521 return. */
3523 static bool
3524 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3525 aarch64_inst *inst, int noaliases_p,
3526 aarch64_operand_error *errors)
3528 int i;
3530 DEBUG_TRACE ("enter with %s", opcode->name);
3532 assert (opcode && inst);
3534 /* Clear inst. */
3535 memset (inst, '\0', sizeof (aarch64_inst));
3537 /* Check the base opcode. */
3538 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3540 DEBUG_TRACE ("base opcode match FAIL");
3541 goto decode_fail;
3544 inst->opcode = opcode;
3545 inst->value = code;
3547 /* Assign operand codes and indexes. */
3548 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3550 if (opcode->operands[i] == AARCH64_OPND_NIL)
3551 break;
3552 inst->operands[i].type = opcode->operands[i];
3553 inst->operands[i].idx = i;
3556 /* Call the opcode decoder indicated by flags. */
3557 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3559 DEBUG_TRACE ("opcode flag-based decoder FAIL");
3560 goto decode_fail;
3563 /* Possibly use the instruction class to determine the correct
3564 qualifier. */
3565 if (!aarch64_decode_variant_using_iclass (inst))
3567 DEBUG_TRACE ("iclass-based decoder FAIL");
3568 goto decode_fail;
3571 /* Call operand decoders. */
3572 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3574 const aarch64_operand *opnd;
3575 enum aarch64_opnd type;
3577 type = opcode->operands[i];
3578 if (type == AARCH64_OPND_NIL)
3579 break;
3580 opnd = &aarch64_operands[type];
3581 if (operand_has_extractor (opnd)
3582 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3583 errors)))
3585 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3586 goto decode_fail;
3590 /* If the opcode has a verifier, then check it now. */
3591 if (opcode->verifier
3592 && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3594 DEBUG_TRACE ("operand verifier FAIL");
3595 goto decode_fail;
3598 /* Match the qualifiers. */
3599 if (aarch64_match_operands_constraint (inst, NULL) == 1)
3601 /* Arriving here, the CODE has been determined as a valid instruction
3602 of OPCODE and *INST has been filled with information of this OPCODE
3603 instruction. Before the return, check if the instruction has any
3604 alias and should be disassembled in the form of its alias instead.
3605 If the answer is yes, *INST will be updated. */
3606 if (!noaliases_p)
3607 determine_disassembling_preference (inst, errors);
3608 DEBUG_TRACE ("SUCCESS");
3609 return true;
3611 else
3613 DEBUG_TRACE ("constraint matching FAIL");
3616 decode_fail:
3617 return false;
3620 /* This does some user-friendly fix-up to *INST. It is currently focus on
3621 the adjustment of qualifiers to help the printed instruction
3622 recognized/understood more easily. */
3624 static void
3625 user_friendly_fixup (aarch64_inst *inst)
3627 switch (inst->opcode->iclass)
3629 case testbranch:
3630 /* TBNZ Xn|Wn, #uimm6, label
3631 Test and Branch Not Zero: conditionally jumps to label if bit number
3632 uimm6 in register Xn is not zero. The bit number implies the width of
3633 the register, which may be written and should be disassembled as Wn if
3634 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3636 if (inst->operands[1].imm.value < 32)
3637 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3638 break;
3639 default: break;
3643 /* Decode INSN and fill in *INST the instruction information. An alias
3644 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3645 success. */
3647 enum err_type
3648 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3649 bool noaliases_p,
3650 aarch64_operand_error *errors)
3652 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3654 #ifdef DEBUG_AARCH64
3655 if (debug_dump)
3657 const aarch64_opcode *tmp = opcode;
3658 printf ("\n");
3659 DEBUG_TRACE ("opcode lookup:");
3660 while (tmp != NULL)
3662 aarch64_verbose (" %s", tmp->name);
3663 tmp = aarch64_find_next_opcode (tmp);
3666 #endif /* DEBUG_AARCH64 */
3668 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3669 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3670 opcode field and value, apart from the difference that one of them has an
3671 extra field as part of the opcode, but such a field is used for operand
3672 encoding in other opcode(s) ('immh' in the case of the example). */
3673 while (opcode != NULL)
3675 /* But only one opcode can be decoded successfully for, as the
3676 decoding routine will check the constraint carefully. */
3677 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3678 return ERR_OK;
3679 opcode = aarch64_find_next_opcode (opcode);
3682 return ERR_UND;
3685 /* Return a short string to indicate a switch to STYLE. These strings
3686 will be embedded into the disassembled operand text (as produced by
3687 aarch64_print_operand), and then spotted in the print_operands function
3688 so that the disassembler output can be split by style. */
3690 static const char *
3691 get_style_text (enum disassembler_style style)
3693 static bool init = false;
3694 static char formats[16][4];
3695 unsigned num;
3697 /* First time through we build a string for every possible format. This
3698 code relies on there being no more than 16 different styles (there's
3699 an assert below for this). */
3700 if (!init)
3702 int i;
3704 for (i = 0; i <= 0xf; ++i)
3706 int res ATTRIBUTE_UNUSED
3707 = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3708 STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3709 assert (res == 3);
3712 init = true;
3715 /* Return the string that marks switching to STYLE. */
3716 num = (unsigned) style;
3717 assert (style <= 0xf);
3718 return formats[num];
3721 /* Callback used by aarch64_print_operand to apply STYLE to the
3722 disassembler output created from FMT and ARGS. The STYLER object holds
3723 any required state. Must return a pointer to a string (created from FMT
3724 and ARGS) that will continue to be valid until the complete disassembled
3725 instruction has been printed.
3727 We return a string that includes two embedded style markers, the first,
3728 places at the start of the string, indicates a switch to STYLE, and the
3729 second, placed at the end of the string, indicates a switch back to the
3730 default text style.
3732 Later, when we print the operand text we take care to collapse any
3733 adjacent style markers, and to ignore any style markers that appear at
3734 the very end of a complete operand string. */
3736 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3737 enum disassembler_style style,
3738 const char *fmt,
3739 va_list args)
3741 int res;
3742 char *ptr, *tmp;
3743 struct obstack *stack = (struct obstack *) styler->state;
3744 va_list ap;
3746 /* These are the two strings for switching styles. */
3747 const char *style_on = get_style_text (style);
3748 const char *style_off = get_style_text (dis_style_text);
3750 /* Calculate space needed once FMT and ARGS are expanded. */
3751 va_copy (ap, args);
3752 res = vsnprintf (NULL, 0, fmt, ap);
3753 va_end (ap);
3754 assert (res >= 0);
3756 /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3757 as the two strings for switching styles, then write all of these
3758 strings onto the obstack. */
3759 ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3760 + strlen (style_off) + 1);
3761 tmp = stpcpy (ptr, style_on);
3762 res = vsnprintf (tmp, (res + 1), fmt, args);
3763 assert (res >= 0);
3764 tmp += res;
3765 strcpy (tmp, style_off);
3767 return ptr;
3770 /* Print operands. */
3772 static void
3773 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3774 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3775 bool *has_notes)
3777 char *notes = NULL;
3778 int i, pcrel_p, num_printed;
3779 struct aarch64_styler styler;
3780 struct obstack content;
3781 obstack_init (&content);
3783 styler.apply_style = aarch64_apply_style;
3784 styler.state = (void *) &content;
3786 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3788 char str[128];
3789 char cmt[128];
3791 /* We regard the opcode operand info more, however we also look into
3792 the inst->operands to support the disassembling of the optional
3793 operand.
3794 The two operand code should be the same in all cases, apart from
3795 when the operand can be optional. */
3796 if (opcode->operands[i] == AARCH64_OPND_NIL
3797 || opnds[i].type == AARCH64_OPND_NIL)
3798 break;
3800 /* Generate the operand string in STR. */
3801 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3802 &info->target, &notes, cmt, sizeof (cmt),
3803 arch_variant, &styler);
3805 /* Print the delimiter (taking account of omitted operand(s)). */
3806 if (str[0] != '\0')
3807 (*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3808 num_printed++ == 0 ? "\t" : ", ");
3810 /* Print the operand. */
3811 if (pcrel_p)
3812 (*info->print_address_func) (info->target, info);
3813 else
3815 /* This operand came from aarch64_print_operand, and will include
3816 embedded strings indicating which style each character should
3817 have. In the following code we split the text based on
3818 CURR_STYLE, and call the styled print callback to print each
3819 block of text in the appropriate style. */
3820 char *start, *curr;
3821 enum disassembler_style curr_style = dis_style_text;
3823 start = curr = str;
3826 if (*curr == '\0'
3827 || (*curr == STYLE_MARKER_CHAR
3828 && ISXDIGIT (*(curr + 1))
3829 && *(curr + 2) == STYLE_MARKER_CHAR))
3831 /* Output content between our START position and CURR. */
3832 int len = curr - start;
3833 if (len > 0)
3835 if ((*info->fprintf_styled_func) (info->stream,
3836 curr_style,
3837 "%.*s",
3838 len, start) < 0)
3839 break;
3842 if (*curr == '\0')
3843 break;
3845 /* Skip over the initial STYLE_MARKER_CHAR. */
3846 ++curr;
3848 /* Update the CURR_STYLE. As there are less than 16
3849 styles, it is possible, that if the input is corrupted
3850 in some way, that we might set CURR_STYLE to an
3851 invalid value. Don't worry though, we check for this
3852 situation. */
3853 if (*curr >= '0' && *curr <= '9')
3854 curr_style = (enum disassembler_style) (*curr - '0');
3855 else if (*curr >= 'a' && *curr <= 'f')
3856 curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3857 else
3858 curr_style = dis_style_text;
3860 /* Check for an invalid style having been selected. This
3861 should never happen, but it doesn't hurt to be a
3862 little paranoid. */
3863 if (curr_style > dis_style_comment_start)
3864 curr_style = dis_style_text;
3866 /* Skip the hex character, and the closing STYLE_MARKER_CHAR. */
3867 curr += 2;
3869 /* Reset the START to after the style marker. */
3870 start = curr;
3872 else
3873 ++curr;
3875 while (true);
3878 /* Print the comment. This works because only the last operand ever
3879 adds a comment. If that ever changes then we'll need to be
3880 smarter here. */
3881 if (cmt[0] != '\0')
3882 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3883 "\t// %s", cmt);
3886 if (notes && !no_notes)
3888 *has_notes = true;
3889 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3890 " // note: %s", notes);
3893 obstack_free (&content, NULL);
3896 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3898 static void
3899 remove_dot_suffix (char *name, const aarch64_inst *inst)
3901 char *ptr;
3902 size_t len;
3904 ptr = strchr (inst->opcode->name, '.');
3905 assert (ptr && inst->cond);
3906 len = ptr - inst->opcode->name;
3907 assert (len < 8);
3908 strncpy (name, inst->opcode->name, len);
3909 name[len] = '\0';
3912 /* Print the instruction mnemonic name. */
3914 static void
3915 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3917 if (inst->opcode->flags & F_COND)
3919 /* For instructions that are truly conditionally executed, e.g. b.cond,
3920 prepare the full mnemonic name with the corresponding condition
3921 suffix. */
3922 char name[8];
3924 remove_dot_suffix (name, inst);
3925 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3926 "%s.%s", name, inst->cond->names[0]);
3928 else
3929 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3930 "%s", inst->opcode->name);
3933 /* Decide whether we need to print a comment after the operands of
3934 instruction INST. */
3936 static void
3937 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3939 if (inst->opcode->flags & F_COND)
3941 char name[8];
3942 unsigned int i, num_conds;
3944 remove_dot_suffix (name, inst);
3945 num_conds = ARRAY_SIZE (inst->cond->names);
3946 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3947 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3948 "%s %s.%s",
3949 i == 1 ? " //" : ",",
3950 name, inst->cond->names[i]);
3954 /* Build notes from verifiers into a string for printing. */
3956 static void
3957 print_verifier_notes (aarch64_operand_error *detail,
3958 struct disassemble_info *info)
3960 if (no_notes)
3961 return;
3963 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3964 would not have succeeded. We can safely ignore these. */
3965 assert (detail->non_fatal);
3967 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3968 " // note: ");
3969 switch (detail->kind)
3971 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
3972 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3973 _("this `%s' should have an immediately"
3974 " preceding `%s'"),
3975 detail->data[0].s, detail->data[1].s);
3976 break;
3978 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
3979 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3980 _("expected `%s' after previous `%s'"),
3981 detail->data[0].s, detail->data[1].s);
3982 break;
3984 default:
3985 assert (detail->error);
3986 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3987 "%s", detail->error);
3988 if (detail->index >= 0)
3989 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3990 " at operand %d", detail->index + 1);
3991 break;
3995 /* Print the instruction according to *INST. */
3997 static void
3998 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3999 const aarch64_insn code,
4000 struct disassemble_info *info,
4001 aarch64_operand_error *mismatch_details)
4003 bool has_notes = false;
4005 print_mnemonic_name (inst, info);
4006 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
4007 print_comment (inst, info);
4009 /* We've already printed a note, not enough space to print more so exit.
4010 Usually notes shouldn't overlap so it shouldn't happen that we have a note
4011 from a register and instruction at the same time. */
4012 if (has_notes)
4013 return;
4015 /* Always run constraint verifiers, this is needed because constraints need to
4016 maintain a global state regardless of whether the instruction has the flag
4017 set or not. */
4018 enum err_type result = verify_constraints (inst, code, pc, false,
4019 mismatch_details, &insn_sequence);
4020 switch (result)
4022 case ERR_VFI:
4023 print_verifier_notes (mismatch_details, info);
4024 break;
4025 case ERR_UND:
4026 case ERR_UNP:
4027 case ERR_NYI:
4028 default:
4029 break;
4033 /* Entry-point of the instruction disassembler and printer. */
4035 static void
4036 print_insn_aarch64_word (bfd_vma pc,
4037 uint32_t word,
4038 struct disassemble_info *info,
4039 aarch64_operand_error *errors)
4041 static const char *err_msg[ERR_NR_ENTRIES+1] =
4043 [ERR_OK] = "_",
4044 [ERR_UND] = "undefined",
4045 [ERR_UNP] = "unpredictable",
4046 [ERR_NYI] = "NYI"
4049 enum err_type ret;
4050 aarch64_inst inst;
4052 info->insn_info_valid = 1;
4053 info->branch_delay_insns = 0;
4054 info->data_size = 0;
4055 info->target = 0;
4056 info->target2 = 0;
4058 if (info->flags & INSN_HAS_RELOC)
4059 /* If the instruction has a reloc associated with it, then
4060 the offset field in the instruction will actually be the
4061 addend for the reloc. (If we are using REL type relocs).
4062 In such cases, we can ignore the pc when computing
4063 addresses, since the addend is not currently pc-relative. */
4064 pc = 0;
4066 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
4068 if (((word >> 21) & 0x3ff) == 1)
4070 /* RESERVED for ALES. */
4071 assert (ret != ERR_OK);
4072 ret = ERR_NYI;
4075 switch (ret)
4077 case ERR_UND:
4078 case ERR_UNP:
4079 case ERR_NYI:
4080 /* Handle undefined instructions. */
4081 info->insn_type = dis_noninsn;
4082 (*info->fprintf_styled_func) (info->stream,
4083 dis_style_assembler_directive,
4084 ".inst\t");
4085 (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
4086 "0x%08x", word);
4087 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4088 " ; %s", err_msg[ret]);
4089 break;
4090 case ERR_OK:
4091 user_friendly_fixup (&inst);
4092 if (inst.opcode->iclass == condbranch
4093 || inst.opcode->iclass == testbranch
4094 || inst.opcode->iclass == compbranch)
4095 info->insn_type = dis_condbranch;
4096 else if (inst.opcode->iclass == branch_imm)
4097 info->insn_type = dis_jsr;
4098 print_aarch64_insn (pc, &inst, word, info, errors);
4099 break;
4100 default:
4101 abort ();
4105 /* Disallow mapping symbols ($x, $d etc) from
4106 being displayed in symbol relative addresses. */
4108 bool
4109 aarch64_symbol_is_valid (asymbol * sym,
4110 struct disassemble_info * info ATTRIBUTE_UNUSED)
4112 const char * name;
4114 if (sym == NULL)
4115 return false;
4117 name = bfd_asymbol_name (sym);
4119 return name
4120 && (name[0] != '$'
4121 || (name[1] != 'x' && name[1] != 'd')
4122 || (name[2] != '\0' && name[2] != '.'));
4125 /* Print data bytes on INFO->STREAM. */
4127 static void
4128 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
4129 uint32_t word,
4130 struct disassemble_info *info,
4131 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4133 switch (info->bytes_per_chunk)
4135 case 1:
4136 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4137 ".byte\t");
4138 info->fprintf_styled_func (info->stream, dis_style_immediate,
4139 "0x%02x", word);
4140 break;
4141 case 2:
4142 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4143 ".short\t");
4144 info->fprintf_styled_func (info->stream, dis_style_immediate,
4145 "0x%04x", word);
4146 break;
4147 case 4:
4148 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4149 ".word\t");
4150 info->fprintf_styled_func (info->stream, dis_style_immediate,
4151 "0x%08x", word);
4152 break;
4153 default:
4154 abort ();
4158 /* Try to infer the code or data type from a symbol.
4159 Returns nonzero if *MAP_TYPE was set. */
4161 static int
4162 get_sym_code_type (struct disassemble_info *info, int n,
4163 enum map_type *map_type)
4165 asymbol * as;
4166 elf_symbol_type *es;
4167 unsigned int type;
4168 const char *name;
4170 /* If the symbol is in a different section, ignore it. */
4171 if (info->section != NULL && info->section != info->symtab[n]->section)
4172 return false;
4174 if (n >= info->symtab_size)
4175 return false;
4177 as = info->symtab[n];
4178 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
4179 return false;
4180 es = (elf_symbol_type *) as;
4182 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
4184 /* If the symbol has function type then use that. */
4185 if (type == STT_FUNC)
4187 *map_type = MAP_INSN;
4188 return true;
4191 /* Check for mapping symbols. */
4192 name = bfd_asymbol_name(info->symtab[n]);
4193 if (name[0] == '$'
4194 && (name[1] == 'x' || name[1] == 'd')
4195 && (name[2] == '\0' || name[2] == '.'))
4197 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
4198 return true;
4201 return false;
4204 /* Set the feature bits in arch_variant in order to get the correct disassembly
4205 for the chosen architecture variant.
4207 Currently we only restrict disassembly for Armv8-R and otherwise enable all
4208 non-R-profile features. */
4209 static void
4210 select_aarch64_variant (unsigned mach)
4212 switch (mach)
4214 case bfd_mach_aarch64_8R:
4215 AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
4216 break;
4217 default:
4218 arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
4219 AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
4223 /* Entry-point of the AArch64 disassembler. */
4226 print_insn_aarch64 (bfd_vma pc,
4227 struct disassemble_info *info)
4229 bfd_byte buffer[INSNLEN];
4230 int status;
4231 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
4232 aarch64_operand_error *);
4233 bool found = false;
4234 unsigned int size = 4;
4235 unsigned long data;
4236 aarch64_operand_error errors;
4237 static bool set_features;
4239 if (info->disassembler_options)
4241 set_default_aarch64_dis_options (info);
4243 parse_aarch64_dis_options (info->disassembler_options);
4245 /* To avoid repeated parsing of these options, we remove them here. */
4246 info->disassembler_options = NULL;
4249 if (!set_features)
4251 select_aarch64_variant (info->mach);
4252 set_features = true;
4255 /* Aarch64 instructions are always little-endian */
4256 info->endian_code = BFD_ENDIAN_LITTLE;
4258 /* Default to DATA. A text section is required by the ABI to contain an
4259 INSN mapping symbol at the start. A data section has no such
4260 requirement, hence if no mapping symbol is found the section must
4261 contain only data. This however isn't very useful if the user has
4262 fully stripped the binaries. If this is the case use the section
4263 attributes to determine the default. If we have no section default to
4264 INSN as well, as we may be disassembling some raw bytes on a baremetal
4265 HEX file or similar. */
4266 enum map_type type = MAP_DATA;
4267 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4268 type = MAP_INSN;
4270 /* First check the full symtab for a mapping symbol, even if there
4271 are no usable non-mapping symbols for this address. */
4272 if (info->symtab_size != 0
4273 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4275 int last_sym = -1;
4276 bfd_vma addr, section_vma = 0;
4277 bool can_use_search_opt_p;
4278 int n;
4280 if (pc <= last_mapping_addr)
4281 last_mapping_sym = -1;
4283 /* Start scanning at the start of the function, or wherever
4284 we finished last time. */
4285 n = info->symtab_pos + 1;
4287 /* If the last stop offset is different from the current one it means we
4288 are disassembling a different glob of bytes. As such the optimization
4289 would not be safe and we should start over. */
4290 can_use_search_opt_p = last_mapping_sym >= 0
4291 && info->stop_offset == last_stop_offset;
4293 if (n >= last_mapping_sym && can_use_search_opt_p)
4294 n = last_mapping_sym;
4296 /* Look down while we haven't passed the location being disassembled.
4297 The reason for this is that there's no defined order between a symbol
4298 and an mapping symbol that may be at the same address. We may have to
4299 look at least one position ahead. */
4300 for (; n < info->symtab_size; n++)
4302 addr = bfd_asymbol_value (info->symtab[n]);
4303 if (addr > pc)
4304 break;
4305 if (get_sym_code_type (info, n, &type))
4307 last_sym = n;
4308 found = true;
4312 if (!found)
4314 n = info->symtab_pos;
4315 if (n >= last_mapping_sym && can_use_search_opt_p)
4316 n = last_mapping_sym;
4318 /* No mapping symbol found at this address. Look backwards
4319 for a preceeding one, but don't go pass the section start
4320 otherwise a data section with no mapping symbol can pick up
4321 a text mapping symbol of a preceeding section. The documentation
4322 says section can be NULL, in which case we will seek up all the
4323 way to the top. */
4324 if (info->section)
4325 section_vma = info->section->vma;
4327 for (; n >= 0; n--)
4329 addr = bfd_asymbol_value (info->symtab[n]);
4330 if (addr < section_vma)
4331 break;
4333 if (get_sym_code_type (info, n, &type))
4335 last_sym = n;
4336 found = true;
4337 break;
4342 last_mapping_sym = last_sym;
4343 last_type = type;
4344 last_stop_offset = info->stop_offset;
4346 /* Look a little bit ahead to see if we should print out
4347 less than four bytes of data. If there's a symbol,
4348 mapping or otherwise, after two bytes then don't
4349 print more. */
4350 if (last_type == MAP_DATA)
4352 size = 4 - (pc & 3);
4353 for (n = last_sym + 1; n < info->symtab_size; n++)
4355 addr = bfd_asymbol_value (info->symtab[n]);
4356 if (addr > pc)
4358 if (addr - pc < size)
4359 size = addr - pc;
4360 break;
4363 /* If the next symbol is after three bytes, we need to
4364 print only part of the data, so that we can use either
4365 .byte or .short. */
4366 if (size == 3)
4367 size = (pc & 1) ? 1 : 2;
4370 else
4371 last_type = type;
4373 /* PR 10263: Disassemble data if requested to do so by the user. */
4374 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4376 /* size was set above. */
4377 info->bytes_per_chunk = size;
4378 info->display_endian = info->endian;
4379 printer = print_insn_data;
4381 else
4383 info->bytes_per_chunk = size = INSNLEN;
4384 info->display_endian = info->endian_code;
4385 printer = print_insn_aarch64_word;
4388 status = (*info->read_memory_func) (pc, buffer, size, info);
4389 if (status != 0)
4391 (*info->memory_error_func) (status, pc, info);
4392 return -1;
4395 data = bfd_get_bits (buffer, size * 8,
4396 info->display_endian == BFD_ENDIAN_BIG);
4398 (*printer) (pc, data, info, &errors);
4400 return size;
4403 void
4404 print_aarch64_disassembler_options (FILE *stream)
4406 fprintf (stream, _("\n\
4407 The following AARCH64 specific disassembler options are supported for use\n\
4408 with the -M switch (multiple options should be separated by commas):\n"));
4410 fprintf (stream, _("\n\
4411 no-aliases Don't print instruction aliases.\n"));
4413 fprintf (stream, _("\n\
4414 aliases Do print instruction aliases.\n"));
4416 fprintf (stream, _("\n\
4417 no-notes Don't print instruction notes.\n"));
4419 fprintf (stream, _("\n\
4420 notes Do print instruction notes.\n"));
4422 #ifdef DEBUG_AARCH64
4423 fprintf (stream, _("\n\
4424 debug_dump Temp switch for debug trace.\n"));
4425 #endif /* DEBUG_AARCH64 */
4427 fprintf (stream, _("\n"));