ld: Use correct types for crc64 calculations
[binutils-gdb.git] / opcodes / aarch64-dis.c
blob02ce8345979db25fb1d6b3265a0c1596e98142cb
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
34 #define INSNLEN 4
36 /* This character is used to encode style information within the output
37 buffers. See get_style_text and print_operands for more details. */
38 #define STYLE_MARKER_CHAR '\002'
40 /* Cached mapping symbol state. */
41 enum map_type
43 MAP_INSN,
44 MAP_DATA
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
53 /* Other options */
54 static int no_aliases = 0; /* If set disassemble as most general inst. */
55 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
56 output as comments. */
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence insn_sequence;
61 static void
62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
66 static void
67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
69 /* Try to match options that are simple flags */
70 if (startswith (option, "no-aliases"))
72 no_aliases = 1;
73 return;
76 if (startswith (option, "aliases"))
78 no_aliases = 0;
79 return;
82 if (startswith (option, "no-notes"))
84 no_notes = 1;
85 return;
88 if (startswith (option, "notes"))
90 no_notes = 0;
91 return;
94 #ifdef DEBUG_AARCH64
95 if (startswith (option, "debug_dump"))
97 debug_dump = 1;
98 return;
100 #endif /* DEBUG_AARCH64 */
102 /* Invalid option. */
103 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
106 static void
107 parse_aarch64_dis_options (const char *options)
109 const char *option_end;
111 if (options == NULL)
112 return;
114 while (*options != '\0')
116 /* Skip empty options. */
117 if (*options == ',')
119 options++;
120 continue;
123 /* We know that *options is neither NUL or a comma. */
124 option_end = options + 1;
125 while (*option_end != ',' && *option_end != '\0')
126 option_end++;
128 parse_aarch64_dis_option (options, option_end - options);
130 /* Go on to the next one. If option_end points to a comma, it
131 will be skipped above. */
132 options = option_end;
136 /* Functions doing the instruction disassembling. */
138 /* The unnamed arguments consist of the number of fields and information about
139 these fields where the VALUE will be extracted from CODE and returned.
140 MASK can be zero or the base mask of the opcode.
142 N.B. the fields are required to be in such an order than the most signficant
143 field for VALUE comes the first, e.g. the <index> in
144 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146 the order of H, L, M. */
148 aarch64_insn
149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
151 uint32_t num;
152 const aarch64_field *field;
153 enum aarch64_field_kind kind;
154 va_list va;
156 va_start (va, mask);
157 num = va_arg (va, uint32_t);
158 assert (num <= 5);
159 aarch64_insn value = 0x0;
160 while (num--)
162 kind = va_arg (va, enum aarch64_field_kind);
163 field = &fields[kind];
164 value <<= field->width;
165 value |= extract_field (kind, code, mask);
167 va_end (va);
168 return value;
171 /* Extract the value of all fields in SELF->fields from instruction CODE.
172 The least significant bit comes from the final field. */
174 static aarch64_insn
175 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
177 aarch64_insn value;
178 unsigned int i;
179 enum aarch64_field_kind kind;
181 value = 0;
182 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
184 kind = self->fields[i];
185 value <<= fields[kind].width;
186 value |= extract_field (kind, code, 0);
188 return value;
191 /* Sign-extend bit I of VALUE. */
192 static inline uint64_t
193 sign_extend (aarch64_insn value, unsigned i)
195 uint64_t ret, sign;
197 assert (i < 32);
198 ret = value;
199 sign = (uint64_t) 1 << i;
200 return ((ret & (sign + sign - 1)) ^ sign) - sign;
203 /* N.B. the following inline helpfer functions create a dependency on the
204 order of operand qualifier enumerators. */
206 /* Given VALUE, return qualifier for a general purpose register. */
207 static inline enum aarch64_opnd_qualifier
208 get_greg_qualifier_from_value (aarch64_insn value)
210 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
211 assert (value <= 0x1
212 && aarch64_get_qualifier_standard_value (qualifier) == value);
213 return qualifier;
216 /* Given VALUE, return qualifier for a vector register. This does not support
217 decoding instructions that accept the 2H vector type. */
219 static inline enum aarch64_opnd_qualifier
220 get_vreg_qualifier_from_value (aarch64_insn value)
222 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
224 /* Instructions using vector type 2H should not call this function. Skip over
225 the 2H qualifier. */
226 if (qualifier >= AARCH64_OPND_QLF_V_2H)
227 qualifier += 1;
229 assert (value <= 0x8
230 && aarch64_get_qualifier_standard_value (qualifier) == value);
231 return qualifier;
234 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
235 static inline enum aarch64_opnd_qualifier
236 get_sreg_qualifier_from_value (aarch64_insn value)
238 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
240 assert (value <= 0x4
241 && aarch64_get_qualifier_standard_value (qualifier) == value);
242 return qualifier;
245 /* Given the instruction in *INST which is probably half way through the
246 decoding and our caller wants to know the expected qualifier for operand
247 I. Return such a qualifier if we can establish it; otherwise return
248 AARCH64_OPND_QLF_NIL. */
250 static aarch64_opnd_qualifier_t
251 get_expected_qualifier (const aarch64_inst *inst, int i)
253 aarch64_opnd_qualifier_seq_t qualifiers;
254 /* Should not be called if the qualifier is known. */
255 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
256 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
257 i, qualifiers))
258 return qualifiers[i];
259 else
260 return AARCH64_OPND_QLF_NIL;
263 /* Operand extractors. */
265 bool
266 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
267 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
268 const aarch64_insn code ATTRIBUTE_UNUSED,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
272 return true;
275 bool
276 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
277 const aarch64_insn code,
278 const aarch64_inst *inst ATTRIBUTE_UNUSED,
279 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
281 info->reg.regno = extract_field (self->fields[0], code, 0);
282 return true;
285 bool
286 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
287 const aarch64_insn code ATTRIBUTE_UNUSED,
288 const aarch64_inst *inst ATTRIBUTE_UNUSED,
289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
291 assert (info->idx == 1
292 || info->idx ==3);
293 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
294 return true;
297 /* e.g. IC <ic_op>{, <Xt>}. */
298 bool
299 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
300 const aarch64_insn code,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
304 info->reg.regno = extract_field (self->fields[0], code, 0);
305 assert (info->idx == 1
306 && (aarch64_get_operand_class (inst->operands[0].type)
307 == AARCH64_OPND_CLASS_SYSTEM));
308 /* This will make the constraint checking happy and more importantly will
309 help the disassembler determine whether this operand is optional or
310 not. */
311 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
313 return true;
316 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
317 bool
318 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
319 const aarch64_insn code,
320 const aarch64_inst *inst ATTRIBUTE_UNUSED,
321 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
323 /* regno */
324 info->reglane.regno = extract_field (self->fields[0], code,
325 inst->opcode->mask);
327 /* Index and/or type. */
328 if (inst->opcode->iclass == asisdone
329 || inst->opcode->iclass == asimdins)
331 if (info->type == AARCH64_OPND_En
332 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
334 unsigned shift;
335 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
336 assert (info->idx == 1); /* Vn */
337 aarch64_insn value = extract_field (FLD_imm4, code, 0);
338 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
339 info->qualifier = get_expected_qualifier (inst, info->idx);
340 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
341 info->reglane.index = value >> shift;
343 else
345 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
346 imm5<3:0> <V>
347 0000 RESERVED
348 xxx1 B
349 xx10 H
350 x100 S
351 1000 D */
352 int pos = -1;
353 aarch64_insn value = extract_field (FLD_imm5, code, 0);
354 while (++pos <= 3 && (value & 0x1) == 0)
355 value >>= 1;
356 if (pos > 3)
357 return false;
358 info->qualifier = get_sreg_qualifier_from_value (pos);
359 info->reglane.index = (unsigned) (value >> 1);
362 else if (inst->opcode->iclass == dotproduct)
364 /* Need information in other operand(s) to help decoding. */
365 info->qualifier = get_expected_qualifier (inst, info->idx);
366 switch (info->qualifier)
368 case AARCH64_OPND_QLF_S_4B:
369 case AARCH64_OPND_QLF_S_2H:
370 /* L:H */
371 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
372 info->reglane.regno &= 0x1f;
373 break;
374 default:
375 return false;
378 else if (inst->opcode->iclass == cryptosm3)
380 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
381 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
383 else
385 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
386 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
388 /* Need information in other operand(s) to help decoding. */
389 info->qualifier = get_expected_qualifier (inst, info->idx);
390 switch (info->qualifier)
392 case AARCH64_OPND_QLF_S_H:
393 if (info->type == AARCH64_OPND_Em16)
395 /* h:l:m */
396 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
397 FLD_M);
398 info->reglane.regno &= 0xf;
400 else
402 /* h:l */
403 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
405 break;
406 case AARCH64_OPND_QLF_S_S:
407 /* h:l */
408 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
409 break;
410 case AARCH64_OPND_QLF_S_D:
411 /* H */
412 info->reglane.index = extract_field (FLD_H, code, 0);
413 break;
414 default:
415 return false;
418 if (inst->opcode->op == OP_FCMLA_ELEM
419 && info->qualifier != AARCH64_OPND_QLF_S_H)
421 /* Complex operand takes two elements. */
422 if (info->reglane.index & 1)
423 return false;
424 info->reglane.index /= 2;
428 return true;
431 bool
432 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
433 const aarch64_insn code,
434 const aarch64_inst *inst ATTRIBUTE_UNUSED,
435 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
437 /* R */
438 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
439 /* len */
440 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
441 return true;
444 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
445 bool
446 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
447 aarch64_opnd_info *info, const aarch64_insn code,
448 const aarch64_inst *inst,
449 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
451 aarch64_insn value;
452 /* Number of elements in each structure to be loaded/stored. */
453 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
455 struct
457 unsigned is_reserved;
458 unsigned num_regs;
459 unsigned num_elements;
460 } data [] =
461 { {0, 4, 4},
462 {1, 4, 4},
463 {0, 4, 1},
464 {0, 4, 2},
465 {0, 3, 3},
466 {1, 3, 3},
467 {0, 3, 1},
468 {0, 1, 1},
469 {0, 2, 2},
470 {1, 2, 2},
471 {0, 2, 1},
474 /* Rt */
475 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
476 /* opcode */
477 value = extract_field (FLD_opcode, code, 0);
478 /* PR 21595: Check for a bogus value. */
479 if (value >= ARRAY_SIZE (data))
480 return false;
481 if (expected_num != data[value].num_elements || data[value].is_reserved)
482 return false;
483 info->reglist.num_regs = data[value].num_regs;
485 return true;
488 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
489 lanes instructions. */
490 bool
491 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
492 aarch64_opnd_info *info, const aarch64_insn code,
493 const aarch64_inst *inst,
494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
496 aarch64_insn value;
498 /* Rt */
499 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
500 /* S */
501 value = extract_field (FLD_S, code, 0);
503 /* Number of registers is equal to the number of elements in
504 each structure to be loaded/stored. */
505 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
506 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
508 /* Except when it is LD1R. */
509 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
510 info->reglist.num_regs = 2;
512 return true;
515 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
516 load/store single element instructions. */
517 bool
518 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
519 aarch64_opnd_info *info, const aarch64_insn code,
520 const aarch64_inst *inst ATTRIBUTE_UNUSED,
521 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
523 aarch64_field field = {0, 0};
524 aarch64_insn QSsize; /* fields Q:S:size. */
525 aarch64_insn opcodeh2; /* opcode<2:1> */
527 /* Rt */
528 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
530 /* Decode the index, opcode<2:1> and size. */
531 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
532 opcodeh2 = extract_field_2 (&field, code, 0);
533 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
534 switch (opcodeh2)
536 case 0x0:
537 info->qualifier = AARCH64_OPND_QLF_S_B;
538 /* Index encoded in "Q:S:size". */
539 info->reglist.index = QSsize;
540 break;
541 case 0x1:
542 if (QSsize & 0x1)
543 /* UND. */
544 return false;
545 info->qualifier = AARCH64_OPND_QLF_S_H;
546 /* Index encoded in "Q:S:size<1>". */
547 info->reglist.index = QSsize >> 1;
548 break;
549 case 0x2:
550 if ((QSsize >> 1) & 0x1)
551 /* UND. */
552 return false;
553 if ((QSsize & 0x1) == 0)
555 info->qualifier = AARCH64_OPND_QLF_S_S;
556 /* Index encoded in "Q:S". */
557 info->reglist.index = QSsize >> 2;
559 else
561 if (extract_field (FLD_S, code, 0))
562 /* UND */
563 return false;
564 info->qualifier = AARCH64_OPND_QLF_S_D;
565 /* Index encoded in "Q". */
566 info->reglist.index = QSsize >> 3;
568 break;
569 default:
570 return false;
573 info->reglist.has_index = 1;
574 info->reglist.num_regs = 0;
575 /* Number of registers is equal to the number of elements in
576 each structure to be loaded/stored. */
577 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
578 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
580 return true;
583 /* Decode fields immh:immb and/or Q for e.g.
584 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
585 or SSHR <V><d>, <V><n>, #<shift>. */
587 bool
588 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
589 aarch64_opnd_info *info, const aarch64_insn code,
590 const aarch64_inst *inst,
591 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
593 int pos;
594 aarch64_insn Q, imm, immh;
595 enum aarch64_insn_class iclass = inst->opcode->iclass;
597 immh = extract_field (FLD_immh, code, 0);
598 if (immh == 0)
599 return false;
600 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
601 pos = 4;
602 /* Get highest set bit in immh. */
603 while (--pos >= 0 && (immh & 0x8) == 0)
604 immh <<= 1;
606 assert ((iclass == asimdshf || iclass == asisdshf)
607 && (info->type == AARCH64_OPND_IMM_VLSR
608 || info->type == AARCH64_OPND_IMM_VLSL));
610 if (iclass == asimdshf)
612 Q = extract_field (FLD_Q, code, 0);
613 /* immh Q <T>
614 0000 x SEE AdvSIMD modified immediate
615 0001 0 8B
616 0001 1 16B
617 001x 0 4H
618 001x 1 8H
619 01xx 0 2S
620 01xx 1 4S
621 1xxx 0 RESERVED
622 1xxx 1 2D */
623 info->qualifier =
624 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
626 else
627 info->qualifier = get_sreg_qualifier_from_value (pos);
629 if (info->type == AARCH64_OPND_IMM_VLSR)
630 /* immh <shift>
631 0000 SEE AdvSIMD modified immediate
632 0001 (16-UInt(immh:immb))
633 001x (32-UInt(immh:immb))
634 01xx (64-UInt(immh:immb))
635 1xxx (128-UInt(immh:immb)) */
636 info->imm.value = (16 << pos) - imm;
637 else
638 /* immh:immb
639 immh <shift>
640 0000 SEE AdvSIMD modified immediate
641 0001 (UInt(immh:immb)-8)
642 001x (UInt(immh:immb)-16)
643 01xx (UInt(immh:immb)-32)
644 1xxx (UInt(immh:immb)-64) */
645 info->imm.value = imm - (8 << pos);
647 return true;
650 /* Decode shift immediate for e.g. sshr (imm). */
651 bool
652 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
653 aarch64_opnd_info *info, const aarch64_insn code,
654 const aarch64_inst *inst ATTRIBUTE_UNUSED,
655 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
657 int64_t imm;
658 aarch64_insn val;
659 val = extract_field (FLD_size, code, 0);
660 switch (val)
662 case 0: imm = 8; break;
663 case 1: imm = 16; break;
664 case 2: imm = 32; break;
665 default: return false;
667 info->imm.value = imm;
668 return true;
671 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
672 value in the field(s) will be extracted as unsigned immediate value. */
673 bool
674 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
675 const aarch64_insn code,
676 const aarch64_inst *inst,
677 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
679 uint64_t imm;
681 imm = extract_all_fields (self, code);
683 if (operand_need_sign_extension (self))
684 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
686 if (operand_need_shift_by_two (self))
687 imm <<= 2;
688 else if (operand_need_shift_by_four (self))
689 imm <<= 4;
691 if (info->type == AARCH64_OPND_ADDR_ADRP)
692 imm <<= 12;
694 if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
695 && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
696 imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
698 info->imm.value = imm;
699 return true;
702 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
703 bool
704 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
705 const aarch64_insn code,
706 const aarch64_inst *inst ATTRIBUTE_UNUSED,
707 aarch64_operand_error *errors)
709 aarch64_ext_imm (self, info, code, inst, errors);
710 info->shifter.kind = AARCH64_MOD_LSL;
711 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
712 return true;
715 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
716 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
717 bool
718 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
719 aarch64_opnd_info *info,
720 const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED,
722 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
724 uint64_t imm;
725 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
726 aarch64_field field = {0, 0};
728 assert (info->idx == 1);
730 if (info->type == AARCH64_OPND_SIMD_FPIMM)
731 info->imm.is_fp = 1;
733 /* a:b:c:d:e:f:g:h */
734 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
735 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
737 /* Either MOVI <Dd>, #<imm>
738 or MOVI <Vd>.2D, #<imm>.
739 <imm> is a 64-bit immediate
740 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
741 encoded in "a:b:c:d:e:f:g:h". */
742 int i;
743 unsigned abcdefgh = imm;
744 for (imm = 0ull, i = 0; i < 8; i++)
745 if (((abcdefgh >> i) & 0x1) != 0)
746 imm |= 0xffull << (8 * i);
748 info->imm.value = imm;
750 /* cmode */
751 info->qualifier = get_expected_qualifier (inst, info->idx);
752 switch (info->qualifier)
754 case AARCH64_OPND_QLF_NIL:
755 /* no shift */
756 info->shifter.kind = AARCH64_MOD_NONE;
757 return 1;
758 case AARCH64_OPND_QLF_LSL:
759 /* shift zeros */
760 info->shifter.kind = AARCH64_MOD_LSL;
761 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
763 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
764 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
765 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
766 default: return false;
768 /* 00: 0; 01: 8; 10:16; 11:24. */
769 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
770 break;
771 case AARCH64_OPND_QLF_MSL:
772 /* shift ones */
773 info->shifter.kind = AARCH64_MOD_MSL;
774 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
775 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
776 break;
777 default:
778 return false;
781 return true;
784 /* Decode an 8-bit floating-point immediate. */
785 bool
786 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
787 const aarch64_insn code,
788 const aarch64_inst *inst ATTRIBUTE_UNUSED,
789 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
791 info->imm.value = extract_all_fields (self, code);
792 info->imm.is_fp = 1;
793 return true;
796 /* Decode a 1-bit rotate immediate (#90 or #270). */
797 bool
798 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
799 const aarch64_insn code,
800 const aarch64_inst *inst ATTRIBUTE_UNUSED,
801 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
803 uint64_t rot = extract_field (self->fields[0], code, 0);
804 assert (rot < 2U);
805 info->imm.value = rot * 180 + 90;
806 return true;
809 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
810 bool
811 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
812 const aarch64_insn code,
813 const aarch64_inst *inst ATTRIBUTE_UNUSED,
814 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
816 uint64_t rot = extract_field (self->fields[0], code, 0);
817 assert (rot < 4U);
818 info->imm.value = rot * 90;
819 return true;
822 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
823 bool
824 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
825 aarch64_opnd_info *info, const aarch64_insn code,
826 const aarch64_inst *inst ATTRIBUTE_UNUSED,
827 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
829 info->imm.value = 64- extract_field (FLD_scale, code, 0);
830 return true;
833 /* Decode arithmetic immediate for e.g.
834 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
835 bool
836 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
837 aarch64_opnd_info *info, const aarch64_insn code,
838 const aarch64_inst *inst ATTRIBUTE_UNUSED,
839 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
841 aarch64_insn value;
843 info->shifter.kind = AARCH64_MOD_LSL;
844 /* shift */
845 value = extract_field (FLD_shift, code, 0);
846 if (value >= 2)
847 return false;
848 info->shifter.amount = value ? 12 : 0;
849 /* imm12 (unsigned) */
850 info->imm.value = extract_field (FLD_imm12, code, 0);
852 return true;
855 /* Return true if VALUE is a valid logical immediate encoding, storing the
856 decoded value in *RESULT if so. ESIZE is the number of bytes in the
857 decoded immediate. */
858 static bool
859 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
861 uint64_t imm, mask;
862 uint32_t N, R, S;
863 unsigned simd_size;
865 /* value is N:immr:imms. */
866 S = value & 0x3f;
867 R = (value >> 6) & 0x3f;
868 N = (value >> 12) & 0x1;
870 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
871 (in other words, right rotated by R), then replicated. */
872 if (N != 0)
874 simd_size = 64;
875 mask = 0xffffffffffffffffull;
877 else
879 switch (S)
881 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
882 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
883 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
884 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
885 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
886 default: return false;
888 mask = (1ull << simd_size) - 1;
889 /* Top bits are IGNORED. */
890 R &= simd_size - 1;
893 if (simd_size > esize * 8)
894 return false;
896 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
897 if (S == simd_size - 1)
898 return false;
899 /* S+1 consecutive bits to 1. */
900 /* NOTE: S can't be 63 due to detection above. */
901 imm = (1ull << (S + 1)) - 1;
902 /* Rotate to the left by simd_size - R. */
903 if (R != 0)
904 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
905 /* Replicate the value according to SIMD size. */
906 switch (simd_size)
908 case 2: imm = (imm << 2) | imm;
909 /* Fall through. */
910 case 4: imm = (imm << 4) | imm;
911 /* Fall through. */
912 case 8: imm = (imm << 8) | imm;
913 /* Fall through. */
914 case 16: imm = (imm << 16) | imm;
915 /* Fall through. */
916 case 32: imm = (imm << 32) | imm;
917 /* Fall through. */
918 case 64: break;
919 default: return 0;
922 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
924 return true;
927 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
928 bool
929 aarch64_ext_limm (const aarch64_operand *self,
930 aarch64_opnd_info *info, const aarch64_insn code,
931 const aarch64_inst *inst,
932 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
934 uint32_t esize;
935 aarch64_insn value;
937 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
938 self->fields[2]);
939 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
940 return decode_limm (esize, value, &info->imm.value);
943 /* Decode a logical immediate for the BIC alias of AND (etc.). */
944 bool
945 aarch64_ext_inv_limm (const aarch64_operand *self,
946 aarch64_opnd_info *info, const aarch64_insn code,
947 const aarch64_inst *inst,
948 aarch64_operand_error *errors)
950 if (!aarch64_ext_limm (self, info, code, inst, errors))
951 return false;
952 info->imm.value = ~info->imm.value;
953 return true;
956 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
957 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
958 bool
959 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
960 aarch64_opnd_info *info,
961 const aarch64_insn code, const aarch64_inst *inst,
962 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
964 aarch64_insn value;
966 /* Rt */
967 info->reg.regno = extract_field (FLD_Rt, code, 0);
969 /* size */
970 value = extract_field (FLD_ldst_size, code, 0);
971 if (inst->opcode->iclass == ldstpair_indexed
972 || inst->opcode->iclass == ldstnapair_offs
973 || inst->opcode->iclass == ldstpair_off
974 || inst->opcode->iclass == loadlit)
976 enum aarch64_opnd_qualifier qualifier;
977 switch (value)
979 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
980 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
981 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
982 default: return false;
984 info->qualifier = qualifier;
986 else
988 /* opc1:size */
989 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
990 if (value > 0x4)
991 return false;
992 info->qualifier = get_sreg_qualifier_from_value (value);
995 return true;
998 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
999 bool
1000 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1001 aarch64_opnd_info *info,
1002 aarch64_insn code,
1003 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1004 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1006 /* Rn */
1007 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1008 return true;
1011 /* Decode the address operand for e.g.
1012 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1013 bool
1014 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1015 aarch64_opnd_info *info,
1016 aarch64_insn code, const aarch64_inst *inst,
1017 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1019 info->qualifier = get_expected_qualifier (inst, info->idx);
1021 /* Rn */
1022 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1024 /* simm9 */
1025 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1026 info->addr.offset.imm = sign_extend (imm, 8);
1027 if (extract_field (self->fields[2], code, 0) == 1) {
1028 info->addr.writeback = 1;
1029 info->addr.preind = 1;
1031 return true;
1034 /* Decode the address operand for e.g.
1035 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1036 bool
1037 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1038 aarch64_opnd_info *info,
1039 aarch64_insn code, const aarch64_inst *inst,
1040 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1042 aarch64_insn S, value;
1044 /* Rn */
1045 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1046 /* Rm */
1047 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1048 /* option */
1049 value = extract_field (FLD_option, code, 0);
1050 info->shifter.kind =
1051 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1052 /* Fix-up the shifter kind; although the table-driven approach is
1053 efficient, it is slightly inflexible, thus needing this fix-up. */
1054 if (info->shifter.kind == AARCH64_MOD_UXTX)
1055 info->shifter.kind = AARCH64_MOD_LSL;
1056 /* S */
1057 S = extract_field (FLD_S, code, 0);
1058 if (S == 0)
1060 info->shifter.amount = 0;
1061 info->shifter.amount_present = 0;
1063 else
1065 int size;
1066 /* Need information in other operand(s) to help achieve the decoding
1067 from 'S' field. */
1068 info->qualifier = get_expected_qualifier (inst, info->idx);
1069 /* Get the size of the data element that is accessed, which may be
1070 different from that of the source register size, e.g. in strb/ldrb. */
1071 size = aarch64_get_qualifier_esize (info->qualifier);
1072 info->shifter.amount = get_logsz (size);
1073 info->shifter.amount_present = 1;
1076 return true;
1079 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1080 bool
1081 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1082 aarch64_insn code, const aarch64_inst *inst,
1083 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1085 aarch64_insn imm;
1086 info->qualifier = get_expected_qualifier (inst, info->idx);
1088 /* Rn */
1089 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1090 /* simm (imm9 or imm7) */
1091 imm = extract_field (self->fields[0], code, 0);
1092 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1093 if (self->fields[0] == FLD_imm7
1094 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1095 /* scaled immediate in ld/st pair instructions. */
1096 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1097 /* qualifier */
1098 if (inst->opcode->iclass == ldst_unscaled
1099 || inst->opcode->iclass == ldstnapair_offs
1100 || inst->opcode->iclass == ldstpair_off
1101 || inst->opcode->iclass == ldst_unpriv)
1102 info->addr.writeback = 0;
1103 else
1105 /* pre/post- index */
1106 info->addr.writeback = 1;
1107 if (extract_field (self->fields[1], code, 0) == 1)
1108 info->addr.preind = 1;
1109 else
1110 info->addr.postind = 1;
1113 return true;
1116 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1117 bool
1118 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1119 aarch64_insn code,
1120 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1121 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1123 int shift;
1124 info->qualifier = get_expected_qualifier (inst, info->idx);
1125 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1126 /* Rn */
1127 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1128 /* uimm12 */
1129 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1130 return true;
1133 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1134 bool
1135 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1136 aarch64_insn code,
1137 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1138 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1140 aarch64_insn imm;
1142 info->qualifier = get_expected_qualifier (inst, info->idx);
1143 /* Rn */
1144 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1145 /* simm10 */
1146 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1147 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1148 if (extract_field (self->fields[3], code, 0) == 1) {
1149 info->addr.writeback = 1;
1150 info->addr.preind = 1;
1152 return true;
1155 /* Decode the address operand for e.g.
1156 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1157 bool
1158 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1159 aarch64_opnd_info *info,
1160 aarch64_insn code, const aarch64_inst *inst,
1161 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1163 /* The opcode dependent area stores the number of elements in
1164 each structure to be loaded/stored. */
1165 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1167 /* Rn */
1168 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1169 /* Rm | #<amount> */
1170 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1171 if (info->addr.offset.regno == 31)
1173 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1174 /* Special handling of loading single structure to all lane. */
1175 info->addr.offset.imm = (is_ld1r ? 1
1176 : inst->operands[0].reglist.num_regs)
1177 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1178 else
1179 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1180 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1181 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1183 else
1184 info->addr.offset.is_reg = 1;
1185 info->addr.writeback = 1;
1187 return true;
1190 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1191 bool
1192 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1193 aarch64_opnd_info *info,
1194 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1195 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1197 aarch64_insn value;
1198 /* cond */
1199 value = extract_field (FLD_cond, code, 0);
1200 info->cond = get_cond_from_value (value);
1201 return true;
1204 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1205 bool
1206 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1207 aarch64_opnd_info *info,
1208 aarch64_insn code,
1209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1212 /* op0:op1:CRn:CRm:op2 */
1213 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1214 FLD_CRm, FLD_op2);
1215 info->sysreg.flags = 0;
1217 /* If a system instruction, check which restrictions should be on the register
1218 value during decoding, these will be enforced then. */
1219 if (inst->opcode->iclass == ic_system)
1221 /* Check to see if it's read-only, else check if it's write only.
1222 if it's both or unspecified don't care. */
1223 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1224 info->sysreg.flags = F_REG_READ;
1225 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1226 == F_SYS_WRITE)
1227 info->sysreg.flags = F_REG_WRITE;
1230 return true;
1233 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1234 bool
1235 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1236 aarch64_opnd_info *info, aarch64_insn code,
1237 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1238 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1240 int i;
1241 aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1242 /* op1:op2 */
1243 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1244 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1245 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1247 /* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
1248 uint32_t flags = aarch64_pstatefields[i].flags;
1249 if ((flags & F_REG_IN_CRM)
1250 && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1251 continue;
1252 info->sysreg.flags = flags;
1253 return true;
1255 /* Reserved value in <pstatefield>. */
1256 return false;
1259 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1260 bool
1261 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1262 aarch64_opnd_info *info,
1263 aarch64_insn code,
1264 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1265 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1267 int i;
1268 aarch64_insn value;
1269 const aarch64_sys_ins_reg *sysins_ops;
1270 /* op0:op1:CRn:CRm:op2 */
1271 value = extract_fields (code, 0, 5,
1272 FLD_op0, FLD_op1, FLD_CRn,
1273 FLD_CRm, FLD_op2);
1275 switch (info->type)
1277 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1278 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1279 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1280 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1281 case AARCH64_OPND_SYSREG_SR:
1282 sysins_ops = aarch64_sys_regs_sr;
1283 /* Let's remove op2 for rctx. Refer to comments in the definition of
1284 aarch64_sys_regs_sr[]. */
1285 value = value & ~(0x7);
1286 break;
1287 default: return false;
1290 for (i = 0; sysins_ops[i].name != NULL; ++i)
1291 if (sysins_ops[i].value == value)
1293 info->sysins_op = sysins_ops + i;
1294 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1295 info->sysins_op->name,
1296 (unsigned)info->sysins_op->value,
1297 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1298 return true;
1301 return false;
1304 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1306 bool
1307 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1308 aarch64_opnd_info *info,
1309 aarch64_insn code,
1310 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1311 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1313 /* CRm */
1314 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1315 return true;
1318 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>. */
1320 bool
1321 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1322 aarch64_opnd_info *info,
1323 aarch64_insn code,
1324 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1325 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1327 /* For the DSB nXS barrier variant immediate is encoded in 2-bit field. */
1328 aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1329 info->barrier = aarch64_barrier_dsb_nxs_options + field;
1330 return true;
1333 /* Decode the prefetch operation option operand for e.g.
1334 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1336 bool
1337 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1338 aarch64_opnd_info *info,
1339 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1340 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1342 /* prfop in Rt */
1343 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1344 return true;
1347 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1348 to the matching name/value pair in aarch64_hint_options. */
1350 bool
1351 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1352 aarch64_opnd_info *info,
1353 aarch64_insn code,
1354 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1355 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1357 /* CRm:op2. */
1358 unsigned hint_number;
1359 int i;
1361 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1363 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1365 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1367 info->hint_option = &(aarch64_hint_options[i]);
1368 return true;
1372 return false;
1375 /* Decode the extended register operand for e.g.
1376 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1377 bool
1378 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1379 aarch64_opnd_info *info,
1380 aarch64_insn code,
1381 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1382 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1384 aarch64_insn value;
1386 /* Rm */
1387 info->reg.regno = extract_field (FLD_Rm, code, 0);
1388 /* option */
1389 value = extract_field (FLD_option, code, 0);
1390 info->shifter.kind =
1391 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1392 /* imm3 */
1393 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1395 /* This makes the constraint checking happy. */
1396 info->shifter.operator_present = 1;
1398 /* Assume inst->operands[0].qualifier has been resolved. */
1399 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1400 info->qualifier = AARCH64_OPND_QLF_W;
1401 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1402 && (info->shifter.kind == AARCH64_MOD_UXTX
1403 || info->shifter.kind == AARCH64_MOD_SXTX))
1404 info->qualifier = AARCH64_OPND_QLF_X;
1406 return true;
1409 /* Decode the shifted register operand for e.g.
1410 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1411 bool
1412 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1413 aarch64_opnd_info *info,
1414 aarch64_insn code,
1415 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1416 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1418 aarch64_insn value;
1420 /* Rm */
1421 info->reg.regno = extract_field (FLD_Rm, code, 0);
1422 /* shift */
1423 value = extract_field (FLD_shift, code, 0);
1424 info->shifter.kind =
1425 aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1426 if (info->shifter.kind == AARCH64_MOD_ROR
1427 && inst->opcode->iclass != log_shift)
1428 /* ROR is not available for the shifted register operand in arithmetic
1429 instructions. */
1430 return false;
1431 /* imm6 */
1432 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1434 /* This makes the constraint checking happy. */
1435 info->shifter.operator_present = 1;
1437 return true;
1440 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1441 where <offset> is given by the OFFSET parameter and where <factor> is
1442 1 plus SELF's operand-dependent value. fields[0] specifies the field
1443 that holds <base>. */
1444 static bool
1445 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1446 aarch64_opnd_info *info, aarch64_insn code,
1447 int64_t offset)
1449 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1450 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1451 info->addr.offset.is_reg = false;
1452 info->addr.writeback = false;
1453 info->addr.preind = true;
1454 if (offset != 0)
1455 info->shifter.kind = AARCH64_MOD_MUL_VL;
1456 info->shifter.amount = 1;
1457 info->shifter.operator_present = (info->addr.offset.imm != 0);
1458 info->shifter.amount_present = false;
1459 return true;
1462 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1463 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1464 SELF's operand-dependent value. fields[0] specifies the field that
1465 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1466 bool
1467 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1468 aarch64_opnd_info *info, aarch64_insn code,
1469 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1470 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1472 int offset;
1474 offset = extract_field (FLD_SVE_imm4, code, 0);
1475 offset = ((offset + 8) & 15) - 8;
1476 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1479 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1480 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1481 SELF's operand-dependent value. fields[0] specifies the field that
1482 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1483 bool
1484 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1485 aarch64_opnd_info *info, aarch64_insn code,
1486 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1487 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1489 int offset;
1491 offset = extract_field (FLD_SVE_imm6, code, 0);
1492 offset = (((offset + 32) & 63) - 32);
1493 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1496 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1497 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1498 SELF's operand-dependent value. fields[0] specifies the field that
1499 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1500 and imm3 fields, with imm3 being the less-significant part. */
1501 bool
1502 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1503 aarch64_opnd_info *info,
1504 aarch64_insn code,
1505 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1506 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1508 int offset;
1510 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1511 offset = (((offset + 256) & 511) - 256);
1512 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1515 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1516 is given by the OFFSET parameter and where <shift> is SELF's operand-
1517 dependent value. fields[0] specifies the base register field <base>. */
1518 static bool
1519 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1520 aarch64_opnd_info *info, aarch64_insn code,
1521 int64_t offset)
1523 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1524 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1525 info->addr.offset.is_reg = false;
1526 info->addr.writeback = false;
1527 info->addr.preind = true;
1528 info->shifter.operator_present = false;
1529 info->shifter.amount_present = false;
1530 return true;
1533 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1534 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1535 value. fields[0] specifies the base register field. */
1536 bool
1537 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1538 aarch64_opnd_info *info, aarch64_insn code,
1539 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1540 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1542 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1543 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1546 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1547 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1548 value. fields[0] specifies the base register field. */
1549 bool
1550 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1551 aarch64_opnd_info *info, aarch64_insn code,
1552 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1553 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1555 int offset = extract_field (FLD_SVE_imm6, code, 0);
1556 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1559 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1560 is SELF's operand-dependent value. fields[0] specifies the base
1561 register field and fields[1] specifies the offset register field. */
1562 bool
1563 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1564 aarch64_opnd_info *info, aarch64_insn code,
1565 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1566 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1568 int index_regno;
1570 index_regno = extract_field (self->fields[1], code, 0);
1571 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1572 return false;
1574 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1575 info->addr.offset.regno = index_regno;
1576 info->addr.offset.is_reg = true;
1577 info->addr.writeback = false;
1578 info->addr.preind = true;
1579 info->shifter.kind = AARCH64_MOD_LSL;
1580 info->shifter.amount = get_operand_specific_data (self);
1581 info->shifter.operator_present = (info->shifter.amount != 0);
1582 info->shifter.amount_present = (info->shifter.amount != 0);
1583 return true;
1586 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1587 <shift> is SELF's operand-dependent value. fields[0] specifies the
1588 base register field, fields[1] specifies the offset register field and
1589 fields[2] is a single-bit field that selects SXTW over UXTW. */
1590 bool
1591 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1592 aarch64_opnd_info *info, aarch64_insn code,
1593 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1594 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1596 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1597 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1598 info->addr.offset.is_reg = true;
1599 info->addr.writeback = false;
1600 info->addr.preind = true;
1601 if (extract_field (self->fields[2], code, 0))
1602 info->shifter.kind = AARCH64_MOD_SXTW;
1603 else
1604 info->shifter.kind = AARCH64_MOD_UXTW;
1605 info->shifter.amount = get_operand_specific_data (self);
1606 info->shifter.operator_present = true;
1607 info->shifter.amount_present = (info->shifter.amount != 0);
1608 return true;
1611 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1612 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1613 fields[0] specifies the base register field. */
1614 bool
1615 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1616 aarch64_opnd_info *info, aarch64_insn code,
1617 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1618 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1620 int offset = extract_field (FLD_imm5, code, 0);
1621 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1624 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1625 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1626 number. fields[0] specifies the base register field and fields[1]
1627 specifies the offset register field. */
1628 static bool
1629 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1630 aarch64_insn code, enum aarch64_modifier_kind kind)
1632 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1633 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1634 info->addr.offset.is_reg = true;
1635 info->addr.writeback = false;
1636 info->addr.preind = true;
1637 info->shifter.kind = kind;
1638 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1639 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1640 || info->shifter.amount != 0);
1641 info->shifter.amount_present = (info->shifter.amount != 0);
1642 return true;
1645 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1646 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1647 field and fields[1] specifies the offset register field. */
1648 bool
1649 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1650 aarch64_opnd_info *info, aarch64_insn code,
1651 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1652 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1654 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1657 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1658 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1659 field and fields[1] specifies the offset register field. */
1660 bool
1661 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1662 aarch64_opnd_info *info, aarch64_insn code,
1663 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1664 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1666 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1669 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1670 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1671 field and fields[1] specifies the offset register field. */
1672 bool
1673 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1674 aarch64_opnd_info *info, aarch64_insn code,
1675 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1676 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1678 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1681 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1682 has the raw field value and that the low 8 bits decode to VALUE. */
1683 static bool
1684 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1686 info->shifter.kind = AARCH64_MOD_LSL;
1687 info->shifter.amount = 0;
1688 if (info->imm.value & 0x100)
1690 if (value == 0)
1691 /* Decode 0x100 as #0, LSL #8. */
1692 info->shifter.amount = 8;
1693 else
1694 value *= 256;
1696 info->shifter.operator_present = (info->shifter.amount != 0);
1697 info->shifter.amount_present = (info->shifter.amount != 0);
1698 info->imm.value = value;
1699 return true;
1702 /* Decode an SVE ADD/SUB immediate. */
1703 bool
1704 aarch64_ext_sve_aimm (const aarch64_operand *self,
1705 aarch64_opnd_info *info, const aarch64_insn code,
1706 const aarch64_inst *inst,
1707 aarch64_operand_error *errors)
1709 return (aarch64_ext_imm (self, info, code, inst, errors)
1710 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1713 /* Decode an SVE CPY/DUP immediate. */
1714 bool
1715 aarch64_ext_sve_asimm (const aarch64_operand *self,
1716 aarch64_opnd_info *info, const aarch64_insn code,
1717 const aarch64_inst *inst,
1718 aarch64_operand_error *errors)
1720 return (aarch64_ext_imm (self, info, code, inst, errors)
1721 && decode_sve_aimm (info, (int8_t) info->imm.value));
1724 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1725 The fields array specifies which field to use. */
1726 bool
1727 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1728 aarch64_opnd_info *info, aarch64_insn code,
1729 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1730 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1732 if (extract_field (self->fields[0], code, 0))
1733 info->imm.value = 0x3f800000;
1734 else
1735 info->imm.value = 0x3f000000;
1736 info->imm.is_fp = true;
1737 return true;
1740 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1741 The fields array specifies which field to use. */
1742 bool
1743 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1744 aarch64_opnd_info *info, aarch64_insn code,
1745 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1746 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1748 if (extract_field (self->fields[0], code, 0))
1749 info->imm.value = 0x40000000;
1750 else
1751 info->imm.value = 0x3f000000;
1752 info->imm.is_fp = true;
1753 return true;
1756 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1757 The fields array specifies which field to use. */
1758 bool
1759 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1760 aarch64_opnd_info *info, aarch64_insn code,
1761 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1762 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1764 if (extract_field (self->fields[0], code, 0))
1765 info->imm.value = 0x3f800000;
1766 else
1767 info->imm.value = 0x0;
1768 info->imm.is_fp = true;
1769 return true;
1772 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1773 immediate on numerous SME instruction fields such as MOVA. */
1774 bool
1775 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1776 aarch64_opnd_info *info, aarch64_insn code,
1777 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1778 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1780 int fld_size = extract_field (self->fields[0], code, 0);
1781 int fld_q = extract_field (self->fields[1], code, 0);
1782 int fld_v = extract_field (self->fields[2], code, 0);
1783 int fld_rv = extract_field (self->fields[3], code, 0);
1784 int fld_zan_imm = extract_field (self->fields[4], code, 0);
1786 /* Deduce qualifier encoded in size and Q fields. */
1787 if (fld_size == 0)
1788 info->qualifier = AARCH64_OPND_QLF_S_B;
1789 else if (fld_size == 1)
1790 info->qualifier = AARCH64_OPND_QLF_S_H;
1791 else if (fld_size == 2)
1792 info->qualifier = AARCH64_OPND_QLF_S_S;
1793 else if (fld_size == 3 && fld_q == 0)
1794 info->qualifier = AARCH64_OPND_QLF_S_D;
1795 else if (fld_size == 3 && fld_q == 1)
1796 info->qualifier = AARCH64_OPND_QLF_S_Q;
1798 info->za_tile_vector.index.regno = fld_rv + 12;
1799 info->za_tile_vector.v = fld_v;
1801 switch (info->qualifier)
1803 case AARCH64_OPND_QLF_S_B:
1804 info->za_tile_vector.regno = 0;
1805 info->za_tile_vector.index.imm = fld_zan_imm;
1806 break;
1807 case AARCH64_OPND_QLF_S_H:
1808 info->za_tile_vector.regno = fld_zan_imm >> 3;
1809 info->za_tile_vector.index.imm = fld_zan_imm & 0x07;
1810 break;
1811 case AARCH64_OPND_QLF_S_S:
1812 info->za_tile_vector.regno = fld_zan_imm >> 2;
1813 info->za_tile_vector.index.imm = fld_zan_imm & 0x03;
1814 break;
1815 case AARCH64_OPND_QLF_S_D:
1816 info->za_tile_vector.regno = fld_zan_imm >> 1;
1817 info->za_tile_vector.index.imm = fld_zan_imm & 0x01;
1818 break;
1819 case AARCH64_OPND_QLF_S_Q:
1820 info->za_tile_vector.regno = fld_zan_imm;
1821 info->za_tile_vector.index.imm = 0;
1822 break;
1823 default:
1824 return false;
1827 return true;
1830 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
1831 separated by commas, encoded in the "imm8" field.
1833 For programmer convenience an assembler must also accept the names of
1834 32-bit, 16-bit and 8-bit element tiles which are converted into the
1835 corresponding set of 64-bit element tiles.
1837 bool
1838 aarch64_ext_sme_za_list (const aarch64_operand *self,
1839 aarch64_opnd_info *info, aarch64_insn code,
1840 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1841 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1843 int mask = extract_field (self->fields[0], code, 0);
1844 info->imm.value = mask;
1845 return true;
1848 /* Decode ZA array vector select register (Rv field), optional vector and
1849 memory offset (imm4 field).
1851 bool
1852 aarch64_ext_sme_za_array (const aarch64_operand *self,
1853 aarch64_opnd_info *info, aarch64_insn code,
1854 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1855 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1857 int regno = extract_field (self->fields[0], code, 0) + 12;
1858 int imm = extract_field (self->fields[1], code, 0);
1859 info->za_tile_vector.index.regno = regno;
1860 info->za_tile_vector.index.imm = imm;
1861 return true;
1864 bool
1865 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
1866 aarch64_opnd_info *info, aarch64_insn code,
1867 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1868 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1870 int regno = extract_field (self->fields[0], code, 0);
1871 int imm = extract_field (self->fields[1], code, 0);
1872 info->addr.base_regno = regno;
1873 info->addr.offset.imm = imm;
1874 /* MUL VL operator is always present for this operand. */
1875 info->shifter.kind = AARCH64_MOD_MUL_VL;
1876 info->shifter.operator_present = (imm != 0);
1877 return true;
1880 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions. */
1881 bool
1882 aarch64_ext_sme_sm_za (const aarch64_operand *self,
1883 aarch64_opnd_info *info, aarch64_insn code,
1884 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1885 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1887 info->pstatefield = 0x1b;
1888 aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
1889 fld_crm >>= 1; /* CRm[3:1]. */
1891 if (fld_crm == 0x1)
1892 info->reg.regno = 's';
1893 else if (fld_crm == 0x2)
1894 info->reg.regno = 'z';
1895 else
1896 return false;
1898 return true;
1901 bool
1902 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
1903 aarch64_opnd_info *info, aarch64_insn code,
1904 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1905 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1907 aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
1908 aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
1909 aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
1910 aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
1911 aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
1912 int imm;
1914 info->za_tile_vector.regno = fld_pn;
1915 info->za_tile_vector.index.regno = fld_rm + 12;
1917 if (fld_tszh == 0x1 && fld_tszl == 0x0)
1919 info->qualifier = AARCH64_OPND_QLF_S_D;
1920 imm = fld_i1;
1922 else if (fld_tszl == 0x4)
1924 info->qualifier = AARCH64_OPND_QLF_S_S;
1925 imm = (fld_i1 << 1) | fld_tszh;
1927 else if ((fld_tszl & 0x3) == 0x2)
1929 info->qualifier = AARCH64_OPND_QLF_S_H;
1930 imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
1932 else if (fld_tszl & 0x1)
1934 info->qualifier = AARCH64_OPND_QLF_S_B;
1935 imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
1937 else
1938 return false;
1940 info->za_tile_vector.index.imm = imm;
1941 return true;
1944 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1945 array specifies which field to use for Zn. MM is encoded in the
1946 concatenation of imm5 and SVE_tszh, with imm5 being the less
1947 significant part. */
1948 bool
1949 aarch64_ext_sve_index (const aarch64_operand *self,
1950 aarch64_opnd_info *info, aarch64_insn code,
1951 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1952 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1954 int val;
1956 info->reglane.regno = extract_field (self->fields[0], code, 0);
1957 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1958 if ((val & 31) == 0)
1959 return 0;
1960 while ((val & 1) == 0)
1961 val /= 2;
1962 info->reglane.index = val / 2;
1963 return true;
1966 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1967 bool
1968 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1969 aarch64_opnd_info *info, const aarch64_insn code,
1970 const aarch64_inst *inst,
1971 aarch64_operand_error *errors)
1973 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1974 return (aarch64_ext_limm (self, info, code, inst, errors)
1975 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1978 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1979 and where MM occupies the most-significant part. The operand-dependent
1980 value specifies the number of bits in Zn. */
1981 bool
1982 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1983 aarch64_opnd_info *info, aarch64_insn code,
1984 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1985 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1987 unsigned int reg_bits = get_operand_specific_data (self);
1988 unsigned int val = extract_all_fields (self, code);
1989 info->reglane.regno = val & ((1 << reg_bits) - 1);
1990 info->reglane.index = val >> reg_bits;
1991 return true;
1994 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1995 to use for Zn. The opcode-dependent value specifies the number
1996 of registers in the list. */
1997 bool
1998 aarch64_ext_sve_reglist (const aarch64_operand *self,
1999 aarch64_opnd_info *info, aarch64_insn code,
2000 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2001 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2003 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2004 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2005 return true;
2008 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
2009 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
2010 field. */
2011 bool
2012 aarch64_ext_sve_scale (const aarch64_operand *self,
2013 aarch64_opnd_info *info, aarch64_insn code,
2014 const aarch64_inst *inst, aarch64_operand_error *errors)
2016 int val;
2018 if (!aarch64_ext_imm (self, info, code, inst, errors))
2019 return false;
2020 val = extract_field (FLD_SVE_imm4, code, 0);
2021 info->shifter.kind = AARCH64_MOD_MUL;
2022 info->shifter.amount = val + 1;
2023 info->shifter.operator_present = (val != 0);
2024 info->shifter.amount_present = (val != 0);
2025 return true;
2028 /* Return the top set bit in VALUE, which is expected to be relatively
2029 small. */
2030 static uint64_t
2031 get_top_bit (uint64_t value)
2033 while ((value & -value) != value)
2034 value -= value & -value;
2035 return value;
2038 /* Decode an SVE shift-left immediate. */
2039 bool
2040 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2041 aarch64_opnd_info *info, const aarch64_insn code,
2042 const aarch64_inst *inst, aarch64_operand_error *errors)
2044 if (!aarch64_ext_imm (self, info, code, inst, errors)
2045 || info->imm.value == 0)
2046 return false;
2048 info->imm.value -= get_top_bit (info->imm.value);
2049 return true;
2052 /* Decode an SVE shift-right immediate. */
2053 bool
2054 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2055 aarch64_opnd_info *info, const aarch64_insn code,
2056 const aarch64_inst *inst, aarch64_operand_error *errors)
2058 if (!aarch64_ext_imm (self, info, code, inst, errors)
2059 || info->imm.value == 0)
2060 return false;
2062 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2063 return true;
2066 /* Decode X0-X30. Register 31 is unallocated. */
2067 bool
2068 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2069 const aarch64_insn code,
2070 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2071 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2073 info->reg.regno = extract_field (self->fields[0], code, 0);
2074 return info->reg.regno <= 30;
2077 /* Bitfields that are commonly used to encode certain operands' information
2078 may be partially used as part of the base opcode in some instructions.
2079 For example, the bit 1 of the field 'size' in
2080 FCVTXN <Vb><d>, <Va><n>
2081 is actually part of the base opcode, while only size<0> is available
2082 for encoding the register type. Another example is the AdvSIMD
2083 instruction ORR (register), in which the field 'size' is also used for
2084 the base opcode, leaving only the field 'Q' available to encode the
2085 vector register arrangement specifier '8B' or '16B'.
2087 This function tries to deduce the qualifier from the value of partially
2088 constrained field(s). Given the VALUE of such a field or fields, the
2089 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2090 operand encoding), the function returns the matching qualifier or
2091 AARCH64_OPND_QLF_NIL if nothing matches.
2093 N.B. CANDIDATES is a group of possible qualifiers that are valid for
2094 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2095 may end with AARCH64_OPND_QLF_NIL. */
2097 static enum aarch64_opnd_qualifier
2098 get_qualifier_from_partial_encoding (aarch64_insn value,
2099 const enum aarch64_opnd_qualifier* \
2100 candidates,
2101 aarch64_insn mask)
2103 int i;
2104 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2105 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2107 aarch64_insn standard_value;
2108 if (candidates[i] == AARCH64_OPND_QLF_NIL)
2109 break;
2110 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2111 if ((standard_value & mask) == (value & mask))
2112 return candidates[i];
2114 return AARCH64_OPND_QLF_NIL;
2117 /* Given a list of qualifier sequences, return all possible valid qualifiers
2118 for operand IDX in QUALIFIERS.
2119 Assume QUALIFIERS is an array whose length is large enough. */
2121 static void
2122 get_operand_possible_qualifiers (int idx,
2123 const aarch64_opnd_qualifier_seq_t *list,
2124 enum aarch64_opnd_qualifier *qualifiers)
2126 int i;
2127 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2128 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2129 break;
2132 /* Decode the size Q field for e.g. SHADD.
2133 We tag one operand with the qualifer according to the code;
2134 whether the qualifier is valid for this opcode or not, it is the
2135 duty of the semantic checking. */
2137 static int
2138 decode_sizeq (aarch64_inst *inst)
2140 int idx;
2141 enum aarch64_opnd_qualifier qualifier;
2142 aarch64_insn code;
2143 aarch64_insn value, mask;
2144 enum aarch64_field_kind fld_sz;
2145 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2147 if (inst->opcode->iclass == asisdlse
2148 || inst->opcode->iclass == asisdlsep
2149 || inst->opcode->iclass == asisdlso
2150 || inst->opcode->iclass == asisdlsop)
2151 fld_sz = FLD_vldst_size;
2152 else
2153 fld_sz = FLD_size;
2155 code = inst->value;
2156 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2157 /* Obtain the info that which bits of fields Q and size are actually
2158 available for operand encoding. Opcodes like FMAXNM and FMLA have
2159 size[1] unavailable. */
2160 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2162 /* The index of the operand we are going to tag a qualifier and the qualifer
2163 itself are reasoned from the value of the size and Q fields and the
2164 possible valid qualifier lists. */
2165 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2166 DEBUG_TRACE ("key idx: %d", idx);
2168 /* For most related instruciton, size:Q are fully available for operand
2169 encoding. */
2170 if (mask == 0x7)
2172 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2173 return 1;
2176 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2177 candidates);
2178 #ifdef DEBUG_AARCH64
2179 if (debug_dump)
2181 int i;
2182 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2183 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2184 DEBUG_TRACE ("qualifier %d: %s", i,
2185 aarch64_get_qualifier_name(candidates[i]));
2186 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2188 #endif /* DEBUG_AARCH64 */
2190 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2192 if (qualifier == AARCH64_OPND_QLF_NIL)
2193 return 0;
2195 inst->operands[idx].qualifier = qualifier;
2196 return 1;
2199 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2200 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2202 static int
2203 decode_asimd_fcvt (aarch64_inst *inst)
2205 aarch64_field field = {0, 0};
2206 aarch64_insn value;
2207 enum aarch64_opnd_qualifier qualifier;
2209 gen_sub_field (FLD_size, 0, 1, &field);
2210 value = extract_field_2 (&field, inst->value, 0);
2211 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2212 : AARCH64_OPND_QLF_V_2D;
2213 switch (inst->opcode->op)
2215 case OP_FCVTN:
2216 case OP_FCVTN2:
2217 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2218 inst->operands[1].qualifier = qualifier;
2219 break;
2220 case OP_FCVTL:
2221 case OP_FCVTL2:
2222 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2223 inst->operands[0].qualifier = qualifier;
2224 break;
2225 default:
2226 return 0;
2229 return 1;
2232 /* Decode size[0], i.e. bit 22, for
2233 e.g. FCVTXN <Vb><d>, <Va><n>. */
2235 static int
2236 decode_asisd_fcvtxn (aarch64_inst *inst)
2238 aarch64_field field = {0, 0};
2239 gen_sub_field (FLD_size, 0, 1, &field);
2240 if (!extract_field_2 (&field, inst->value, 0))
2241 return 0;
2242 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2243 return 1;
2246 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2247 static int
2248 decode_fcvt (aarch64_inst *inst)
2250 enum aarch64_opnd_qualifier qualifier;
2251 aarch64_insn value;
2252 const aarch64_field field = {15, 2};
2254 /* opc dstsize */
2255 value = extract_field_2 (&field, inst->value, 0);
2256 switch (value)
2258 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2259 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2260 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2261 default: return 0;
2263 inst->operands[0].qualifier = qualifier;
2265 return 1;
2268 /* Do miscellaneous decodings that are not common enough to be driven by
2269 flags. */
2271 static int
2272 do_misc_decoding (aarch64_inst *inst)
2274 unsigned int value;
2275 switch (inst->opcode->op)
2277 case OP_FCVT:
2278 return decode_fcvt (inst);
2280 case OP_FCVTN:
2281 case OP_FCVTN2:
2282 case OP_FCVTL:
2283 case OP_FCVTL2:
2284 return decode_asimd_fcvt (inst);
2286 case OP_FCVTXN_S:
2287 return decode_asisd_fcvtxn (inst);
2289 case OP_MOV_P_P:
2290 case OP_MOVS_P_P:
2291 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2292 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2293 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2295 case OP_MOV_Z_P_Z:
2296 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2297 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2299 case OP_MOV_Z_V:
2300 /* Index must be zero. */
2301 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2302 return value > 0 && value <= 16 && value == (value & -value);
2304 case OP_MOV_Z_Z:
2305 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2306 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2308 case OP_MOV_Z_Zi:
2309 /* Index must be nonzero. */
2310 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2311 return value > 0 && value != (value & -value);
2313 case OP_MOVM_P_P_P:
2314 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2315 == extract_field (FLD_SVE_Pm, inst->value, 0));
2317 case OP_MOVZS_P_P_P:
2318 case OP_MOVZ_P_P_P:
2319 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2320 == extract_field (FLD_SVE_Pm, inst->value, 0));
2322 case OP_NOTS_P_P_P_Z:
2323 case OP_NOT_P_P_P_Z:
2324 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2325 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2327 default:
2328 return 0;
2332 /* Opcodes that have fields shared by multiple operands are usually flagged
2333 with flags. In this function, we detect such flags, decode the related
2334 field(s) and store the information in one of the related operands. The
2335 'one' operand is not any operand but one of the operands that can
2336 accommadate all the information that has been decoded. */
2338 static int
2339 do_special_decoding (aarch64_inst *inst)
2341 int idx;
2342 aarch64_insn value;
2343 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2344 if (inst->opcode->flags & F_COND)
2346 value = extract_field (FLD_cond2, inst->value, 0);
2347 inst->cond = get_cond_from_value (value);
2349 /* 'sf' field. */
2350 if (inst->opcode->flags & F_SF)
2352 idx = select_operand_for_sf_field_coding (inst->opcode);
2353 value = extract_field (FLD_sf, inst->value, 0);
2354 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2355 if ((inst->opcode->flags & F_N)
2356 && extract_field (FLD_N, inst->value, 0) != value)
2357 return 0;
2359 /* 'sf' field. */
2360 if (inst->opcode->flags & F_LSE_SZ)
2362 idx = select_operand_for_sf_field_coding (inst->opcode);
2363 value = extract_field (FLD_lse_sz, inst->value, 0);
2364 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2366 /* size:Q fields. */
2367 if (inst->opcode->flags & F_SIZEQ)
2368 return decode_sizeq (inst);
2370 if (inst->opcode->flags & F_FPTYPE)
2372 idx = select_operand_for_fptype_field_coding (inst->opcode);
2373 value = extract_field (FLD_type, inst->value, 0);
2374 switch (value)
2376 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2377 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2378 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2379 default: return 0;
2383 if (inst->opcode->flags & F_SSIZE)
2385 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2386 of the base opcode. */
2387 aarch64_insn mask;
2388 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2389 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2390 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2391 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2392 /* For most related instruciton, the 'size' field is fully available for
2393 operand encoding. */
2394 if (mask == 0x3)
2395 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2396 else
2398 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2399 candidates);
2400 inst->operands[idx].qualifier
2401 = get_qualifier_from_partial_encoding (value, candidates, mask);
2405 if (inst->opcode->flags & F_T)
2407 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2408 int num = 0;
2409 unsigned val, Q;
2410 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2411 == AARCH64_OPND_CLASS_SIMD_REG);
2412 /* imm5<3:0> q <t>
2413 0000 x reserved
2414 xxx1 0 8b
2415 xxx1 1 16b
2416 xx10 0 4h
2417 xx10 1 8h
2418 x100 0 2s
2419 x100 1 4s
2420 1000 0 reserved
2421 1000 1 2d */
2422 val = extract_field (FLD_imm5, inst->value, 0);
2423 while ((val & 0x1) == 0 && ++num <= 3)
2424 val >>= 1;
2425 if (num > 3)
2426 return 0;
2427 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2428 inst->operands[0].qualifier =
2429 get_vreg_qualifier_from_value ((num << 1) | Q);
2432 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2434 /* Use Rt to encode in the case of e.g.
2435 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2436 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2437 if (idx == -1)
2439 /* Otherwise use the result operand, which has to be a integer
2440 register. */
2441 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2442 == AARCH64_OPND_CLASS_INT_REG);
2443 idx = 0;
2445 assert (idx == 0 || idx == 1);
2446 value = extract_field (FLD_Q, inst->value, 0);
2447 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2450 if (inst->opcode->flags & F_LDS_SIZE)
2452 aarch64_field field = {0, 0};
2453 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2454 == AARCH64_OPND_CLASS_INT_REG);
2455 gen_sub_field (FLD_opc, 0, 1, &field);
2456 value = extract_field_2 (&field, inst->value, 0);
2457 inst->operands[0].qualifier
2458 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2461 /* Miscellaneous decoding; done as the last step. */
2462 if (inst->opcode->flags & F_MISC)
2463 return do_misc_decoding (inst);
2465 return 1;
2468 /* Converters converting a real opcode instruction to its alias form. */
2470 /* ROR <Wd>, <Ws>, #<shift>
2471 is equivalent to:
2472 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2473 static int
2474 convert_extr_to_ror (aarch64_inst *inst)
2476 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2478 copy_operand_info (inst, 2, 3);
2479 inst->operands[3].type = AARCH64_OPND_NIL;
2480 return 1;
2482 return 0;
2485 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2486 is equivalent to:
2487 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2488 static int
2489 convert_shll_to_xtl (aarch64_inst *inst)
2491 if (inst->operands[2].imm.value == 0)
2493 inst->operands[2].type = AARCH64_OPND_NIL;
2494 return 1;
2496 return 0;
2499 /* Convert
2500 UBFM <Xd>, <Xn>, #<shift>, #63.
2502 LSR <Xd>, <Xn>, #<shift>. */
2503 static int
2504 convert_bfm_to_sr (aarch64_inst *inst)
2506 int64_t imms, val;
2508 imms = inst->operands[3].imm.value;
2509 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2510 if (imms == val)
2512 inst->operands[3].type = AARCH64_OPND_NIL;
2513 return 1;
2516 return 0;
2519 /* Convert MOV to ORR. */
2520 static int
2521 convert_orr_to_mov (aarch64_inst *inst)
2523 /* MOV <Vd>.<T>, <Vn>.<T>
2524 is equivalent to:
2525 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2526 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2528 inst->operands[2].type = AARCH64_OPND_NIL;
2529 return 1;
2531 return 0;
2534 /* When <imms> >= <immr>, the instruction written:
2535 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2536 is equivalent to:
2537 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2539 static int
2540 convert_bfm_to_bfx (aarch64_inst *inst)
2542 int64_t immr, imms;
2544 immr = inst->operands[2].imm.value;
2545 imms = inst->operands[3].imm.value;
2546 if (imms >= immr)
2548 int64_t lsb = immr;
2549 inst->operands[2].imm.value = lsb;
2550 inst->operands[3].imm.value = imms + 1 - lsb;
2551 /* The two opcodes have different qualifiers for
2552 the immediate operands; reset to help the checking. */
2553 reset_operand_qualifier (inst, 2);
2554 reset_operand_qualifier (inst, 3);
2555 return 1;
2558 return 0;
2561 /* When <imms> < <immr>, the instruction written:
2562 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2563 is equivalent to:
2564 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2566 static int
2567 convert_bfm_to_bfi (aarch64_inst *inst)
2569 int64_t immr, imms, val;
2571 immr = inst->operands[2].imm.value;
2572 imms = inst->operands[3].imm.value;
2573 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2574 if (imms < immr)
2576 inst->operands[2].imm.value = (val - immr) & (val - 1);
2577 inst->operands[3].imm.value = imms + 1;
2578 /* The two opcodes have different qualifiers for
2579 the immediate operands; reset to help the checking. */
2580 reset_operand_qualifier (inst, 2);
2581 reset_operand_qualifier (inst, 3);
2582 return 1;
2585 return 0;
2588 /* The instruction written:
2589 BFC <Xd>, #<lsb>, #<width>
2590 is equivalent to:
2591 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2593 static int
2594 convert_bfm_to_bfc (aarch64_inst *inst)
2596 int64_t immr, imms, val;
2598 /* Should have been assured by the base opcode value. */
2599 assert (inst->operands[1].reg.regno == 0x1f);
2601 immr = inst->operands[2].imm.value;
2602 imms = inst->operands[3].imm.value;
2603 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2604 if (imms < immr)
2606 /* Drop XZR from the second operand. */
2607 copy_operand_info (inst, 1, 2);
2608 copy_operand_info (inst, 2, 3);
2609 inst->operands[3].type = AARCH64_OPND_NIL;
2611 /* Recalculate the immediates. */
2612 inst->operands[1].imm.value = (val - immr) & (val - 1);
2613 inst->operands[2].imm.value = imms + 1;
2615 /* The two opcodes have different qualifiers for the operands; reset to
2616 help the checking. */
2617 reset_operand_qualifier (inst, 1);
2618 reset_operand_qualifier (inst, 2);
2619 reset_operand_qualifier (inst, 3);
2621 return 1;
2624 return 0;
2627 /* The instruction written:
2628 LSL <Xd>, <Xn>, #<shift>
2629 is equivalent to:
2630 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2632 static int
2633 convert_ubfm_to_lsl (aarch64_inst *inst)
2635 int64_t immr = inst->operands[2].imm.value;
2636 int64_t imms = inst->operands[3].imm.value;
2637 int64_t val
2638 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2640 if ((immr == 0 && imms == val) || immr == imms + 1)
2642 inst->operands[3].type = AARCH64_OPND_NIL;
2643 inst->operands[2].imm.value = val - imms;
2644 return 1;
2647 return 0;
2650 /* CINC <Wd>, <Wn>, <cond>
2651 is equivalent to:
2652 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2653 where <cond> is not AL or NV. */
2655 static int
2656 convert_from_csel (aarch64_inst *inst)
2658 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2659 && (inst->operands[3].cond->value & 0xe) != 0xe)
2661 copy_operand_info (inst, 2, 3);
2662 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2663 inst->operands[3].type = AARCH64_OPND_NIL;
2664 return 1;
2666 return 0;
2669 /* CSET <Wd>, <cond>
2670 is equivalent to:
2671 CSINC <Wd>, WZR, WZR, invert(<cond>)
2672 where <cond> is not AL or NV. */
2674 static int
2675 convert_csinc_to_cset (aarch64_inst *inst)
2677 if (inst->operands[1].reg.regno == 0x1f
2678 && inst->operands[2].reg.regno == 0x1f
2679 && (inst->operands[3].cond->value & 0xe) != 0xe)
2681 copy_operand_info (inst, 1, 3);
2682 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2683 inst->operands[3].type = AARCH64_OPND_NIL;
2684 inst->operands[2].type = AARCH64_OPND_NIL;
2685 return 1;
2687 return 0;
2690 /* MOV <Wd>, #<imm>
2691 is equivalent to:
2692 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2694 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2695 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2696 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2697 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2698 machine-instruction mnemonic must be used. */
2700 static int
2701 convert_movewide_to_mov (aarch64_inst *inst)
2703 uint64_t value = inst->operands[1].imm.value;
2704 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2705 if (value == 0 && inst->operands[1].shifter.amount != 0)
2706 return 0;
2707 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2708 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2709 value <<= inst->operands[1].shifter.amount;
2710 /* As an alias convertor, it has to be clear that the INST->OPCODE
2711 is the opcode of the real instruction. */
2712 if (inst->opcode->op == OP_MOVN)
2714 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2715 value = ~value;
2716 /* A MOVN has an immediate that could be encoded by MOVZ. */
2717 if (aarch64_wide_constant_p (value, is32, NULL))
2718 return 0;
2720 inst->operands[1].imm.value = value;
2721 inst->operands[1].shifter.amount = 0;
2722 return 1;
2725 /* MOV <Wd>, #<imm>
2726 is equivalent to:
2727 ORR <Wd>, WZR, #<imm>.
2729 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2730 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2731 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2732 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2733 machine-instruction mnemonic must be used. */
2735 static int
2736 convert_movebitmask_to_mov (aarch64_inst *inst)
2738 int is32;
2739 uint64_t value;
2741 /* Should have been assured by the base opcode value. */
2742 assert (inst->operands[1].reg.regno == 0x1f);
2743 copy_operand_info (inst, 1, 2);
2744 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2745 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2746 value = inst->operands[1].imm.value;
2747 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2748 instruction. */
2749 if (inst->operands[0].reg.regno != 0x1f
2750 && (aarch64_wide_constant_p (value, is32, NULL)
2751 || aarch64_wide_constant_p (~value, is32, NULL)))
2752 return 0;
2754 inst->operands[2].type = AARCH64_OPND_NIL;
2755 return 1;
2758 /* Some alias opcodes are disassembled by being converted from their real-form.
2759 N.B. INST->OPCODE is the real opcode rather than the alias. */
2761 static int
2762 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2764 switch (alias->op)
2766 case OP_ASR_IMM:
2767 case OP_LSR_IMM:
2768 return convert_bfm_to_sr (inst);
2769 case OP_LSL_IMM:
2770 return convert_ubfm_to_lsl (inst);
2771 case OP_CINC:
2772 case OP_CINV:
2773 case OP_CNEG:
2774 return convert_from_csel (inst);
2775 case OP_CSET:
2776 case OP_CSETM:
2777 return convert_csinc_to_cset (inst);
2778 case OP_UBFX:
2779 case OP_BFXIL:
2780 case OP_SBFX:
2781 return convert_bfm_to_bfx (inst);
2782 case OP_SBFIZ:
2783 case OP_BFI:
2784 case OP_UBFIZ:
2785 return convert_bfm_to_bfi (inst);
2786 case OP_BFC:
2787 return convert_bfm_to_bfc (inst);
2788 case OP_MOV_V:
2789 return convert_orr_to_mov (inst);
2790 case OP_MOV_IMM_WIDE:
2791 case OP_MOV_IMM_WIDEN:
2792 return convert_movewide_to_mov (inst);
2793 case OP_MOV_IMM_LOG:
2794 return convert_movebitmask_to_mov (inst);
2795 case OP_ROR_IMM:
2796 return convert_extr_to_ror (inst);
2797 case OP_SXTL:
2798 case OP_SXTL2:
2799 case OP_UXTL:
2800 case OP_UXTL2:
2801 return convert_shll_to_xtl (inst);
2802 default:
2803 return 0;
2807 static bool
2808 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2809 aarch64_inst *, int, aarch64_operand_error *errors);
2811 /* Given the instruction information in *INST, check if the instruction has
2812 any alias form that can be used to represent *INST. If the answer is yes,
2813 update *INST to be in the form of the determined alias. */
2815 /* In the opcode description table, the following flags are used in opcode
2816 entries to help establish the relations between the real and alias opcodes:
2818 F_ALIAS: opcode is an alias
2819 F_HAS_ALIAS: opcode has alias(es)
2820 F_P1
2821 F_P2
2822 F_P3: Disassembly preference priority 1-3 (the larger the
2823 higher). If nothing is specified, it is the priority
2824 0 by default, i.e. the lowest priority.
2826 Although the relation between the machine and the alias instructions are not
2827 explicitly described, it can be easily determined from the base opcode
2828 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2829 description entries:
2831 The mask of an alias opcode must be equal to or a super-set (i.e. more
2832 constrained) of that of the aliased opcode; so is the base opcode value.
2834 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2835 && (opcode->mask & real->mask) == real->mask
2836 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2837 then OPCODE is an alias of, and only of, the REAL instruction
2839 The alias relationship is forced flat-structured to keep related algorithm
2840 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2842 During the disassembling, the decoding decision tree (in
2843 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2844 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2845 not specified), the disassembler will check whether there is any alias
2846 instruction exists for this real instruction. If there is, the disassembler
2847 will try to disassemble the 32-bit binary again using the alias's rule, or
2848 try to convert the IR to the form of the alias. In the case of the multiple
2849 aliases, the aliases are tried one by one from the highest priority
2850 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2851 first succeeds first adopted.
2853 You may ask why there is a need for the conversion of IR from one form to
2854 another in handling certain aliases. This is because on one hand it avoids
2855 adding more operand code to handle unusual encoding/decoding; on other
2856 hand, during the disassembling, the conversion is an effective approach to
2857 check the condition of an alias (as an alias may be adopted only if certain
2858 conditions are met).
2860 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2861 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2862 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2864 static void
2865 determine_disassembling_preference (struct aarch64_inst *inst,
2866 aarch64_operand_error *errors)
2868 const aarch64_opcode *opcode;
2869 const aarch64_opcode *alias;
2871 opcode = inst->opcode;
2873 /* This opcode does not have an alias, so use itself. */
2874 if (!opcode_has_alias (opcode))
2875 return;
2877 alias = aarch64_find_alias_opcode (opcode);
2878 assert (alias);
2880 #ifdef DEBUG_AARCH64
2881 if (debug_dump)
2883 const aarch64_opcode *tmp = alias;
2884 printf ("#### LIST orderd: ");
2885 while (tmp)
2887 printf ("%s, ", tmp->name);
2888 tmp = aarch64_find_next_alias_opcode (tmp);
2890 printf ("\n");
2892 #endif /* DEBUG_AARCH64 */
2894 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2896 DEBUG_TRACE ("try %s", alias->name);
2897 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2899 /* An alias can be a pseudo opcode which will never be used in the
2900 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2901 aliasing AND. */
2902 if (pseudo_opcode_p (alias))
2904 DEBUG_TRACE ("skip pseudo %s", alias->name);
2905 continue;
2908 if ((inst->value & alias->mask) != alias->opcode)
2910 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2911 continue;
2914 if (!AARCH64_CPU_HAS_FEATURE (arch_variant, *alias->avariant))
2916 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
2917 continue;
2920 /* No need to do any complicated transformation on operands, if the alias
2921 opcode does not have any operand. */
2922 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2924 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2925 aarch64_replace_opcode (inst, alias);
2926 return;
2928 if (alias->flags & F_CONV)
2930 aarch64_inst copy;
2931 memcpy (&copy, inst, sizeof (aarch64_inst));
2932 /* ALIAS is the preference as long as the instruction can be
2933 successfully converted to the form of ALIAS. */
2934 if (convert_to_alias (&copy, alias) == 1)
2936 aarch64_replace_opcode (&copy, alias);
2937 if (aarch64_match_operands_constraint (&copy, NULL) != 1)
2939 DEBUG_TRACE ("FAILED with alias %s ", alias->name);
2941 else
2943 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2944 memcpy (inst, &copy, sizeof (aarch64_inst));
2946 return;
2949 else
2951 /* Directly decode the alias opcode. */
2952 aarch64_inst temp;
2953 memset (&temp, '\0', sizeof (aarch64_inst));
2954 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2956 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2957 memcpy (inst, &temp, sizeof (aarch64_inst));
2958 return;
2964 /* Some instructions (including all SVE ones) use the instruction class
2965 to describe how a qualifiers_list index is represented in the instruction
2966 encoding. If INST is such an instruction, decode the appropriate fields
2967 and fill in the operand qualifiers accordingly. Return true if no
2968 problems are found. */
2970 static bool
2971 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2973 int i, variant;
2975 variant = 0;
2976 switch (inst->opcode->iclass)
2978 case sve_cpy:
2979 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2980 break;
2982 case sve_index:
2983 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2984 if ((i & 31) == 0)
2985 return false;
2986 while ((i & 1) == 0)
2988 i >>= 1;
2989 variant += 1;
2991 break;
2993 case sve_limm:
2994 /* Pick the smallest applicable element size. */
2995 if ((inst->value & 0x20600) == 0x600)
2996 variant = 0;
2997 else if ((inst->value & 0x20400) == 0x400)
2998 variant = 1;
2999 else if ((inst->value & 0x20000) == 0)
3000 variant = 2;
3001 else
3002 variant = 3;
3003 break;
3005 case sve_misc:
3006 /* sve_misc instructions have only a single variant. */
3007 break;
3009 case sve_movprfx:
3010 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3011 break;
3013 case sve_pred_zm:
3014 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3015 break;
3017 case sve_shift_pred:
3018 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3019 sve_shift:
3020 if (i == 0)
3021 return false;
3022 while (i != 1)
3024 i >>= 1;
3025 variant += 1;
3027 break;
3029 case sve_shift_unpred:
3030 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3031 goto sve_shift;
3033 case sve_size_bhs:
3034 variant = extract_field (FLD_size, inst->value, 0);
3035 if (variant >= 3)
3036 return false;
3037 break;
3039 case sve_size_bhsd:
3040 variant = extract_field (FLD_size, inst->value, 0);
3041 break;
3043 case sve_size_hsd:
3044 i = extract_field (FLD_size, inst->value, 0);
3045 if (i < 1)
3046 return false;
3047 variant = i - 1;
3048 break;
3050 case sve_size_bh:
3051 case sve_size_sd:
3052 variant = extract_field (FLD_SVE_sz, inst->value, 0);
3053 break;
3055 case sve_size_sd2:
3056 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3057 break;
3059 case sve_size_hsd2:
3060 i = extract_field (FLD_SVE_size, inst->value, 0);
3061 if (i < 1)
3062 return false;
3063 variant = i - 1;
3064 break;
3066 case sve_size_13:
3067 /* Ignore low bit of this field since that is set in the opcode for
3068 instructions of this iclass. */
3069 i = (extract_field (FLD_size, inst->value, 0) & 2);
3070 variant = (i >> 1);
3071 break;
3073 case sve_shift_tsz_bhsd:
3074 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3075 if (i == 0)
3076 return false;
3077 while (i != 1)
3079 i >>= 1;
3080 variant += 1;
3082 break;
3084 case sve_size_tsz_bhs:
3085 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3086 if (i == 0)
3087 return false;
3088 while (i != 1)
3090 if (i & 1)
3091 return false;
3092 i >>= 1;
3093 variant += 1;
3095 break;
3097 case sve_shift_tsz_hsd:
3098 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3099 if (i == 0)
3100 return false;
3101 while (i != 1)
3103 i >>= 1;
3104 variant += 1;
3106 break;
3108 default:
3109 /* No mapping between instruction class and qualifiers. */
3110 return true;
3113 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3114 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3115 return true;
3117 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
3118 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3119 return 1.
3121 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3122 determined and used to disassemble CODE; this is done just before the
3123 return. */
3125 static bool
3126 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3127 aarch64_inst *inst, int noaliases_p,
3128 aarch64_operand_error *errors)
3130 int i;
3132 DEBUG_TRACE ("enter with %s", opcode->name);
3134 assert (opcode && inst);
3136 /* Clear inst. */
3137 memset (inst, '\0', sizeof (aarch64_inst));
3139 /* Check the base opcode. */
3140 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3142 DEBUG_TRACE ("base opcode match FAIL");
3143 goto decode_fail;
3146 inst->opcode = opcode;
3147 inst->value = code;
3149 /* Assign operand codes and indexes. */
3150 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3152 if (opcode->operands[i] == AARCH64_OPND_NIL)
3153 break;
3154 inst->operands[i].type = opcode->operands[i];
3155 inst->operands[i].idx = i;
3158 /* Call the opcode decoder indicated by flags. */
3159 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3161 DEBUG_TRACE ("opcode flag-based decoder FAIL");
3162 goto decode_fail;
3165 /* Possibly use the instruction class to determine the correct
3166 qualifier. */
3167 if (!aarch64_decode_variant_using_iclass (inst))
3169 DEBUG_TRACE ("iclass-based decoder FAIL");
3170 goto decode_fail;
3173 /* Call operand decoders. */
3174 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3176 const aarch64_operand *opnd;
3177 enum aarch64_opnd type;
3179 type = opcode->operands[i];
3180 if (type == AARCH64_OPND_NIL)
3181 break;
3182 opnd = &aarch64_operands[type];
3183 if (operand_has_extractor (opnd)
3184 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3185 errors)))
3187 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3188 goto decode_fail;
3192 /* If the opcode has a verifier, then check it now. */
3193 if (opcode->verifier
3194 && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3196 DEBUG_TRACE ("operand verifier FAIL");
3197 goto decode_fail;
3200 /* Match the qualifiers. */
3201 if (aarch64_match_operands_constraint (inst, NULL) == 1)
3203 /* Arriving here, the CODE has been determined as a valid instruction
3204 of OPCODE and *INST has been filled with information of this OPCODE
3205 instruction. Before the return, check if the instruction has any
3206 alias and should be disassembled in the form of its alias instead.
3207 If the answer is yes, *INST will be updated. */
3208 if (!noaliases_p)
3209 determine_disassembling_preference (inst, errors);
3210 DEBUG_TRACE ("SUCCESS");
3211 return true;
3213 else
3215 DEBUG_TRACE ("constraint matching FAIL");
3218 decode_fail:
3219 return false;
3222 /* This does some user-friendly fix-up to *INST. It is currently focus on
3223 the adjustment of qualifiers to help the printed instruction
3224 recognized/understood more easily. */
3226 static void
3227 user_friendly_fixup (aarch64_inst *inst)
3229 switch (inst->opcode->iclass)
3231 case testbranch:
3232 /* TBNZ Xn|Wn, #uimm6, label
3233 Test and Branch Not Zero: conditionally jumps to label if bit number
3234 uimm6 in register Xn is not zero. The bit number implies the width of
3235 the register, which may be written and should be disassembled as Wn if
3236 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3238 if (inst->operands[1].imm.value < 32)
3239 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3240 break;
3241 default: break;
3245 /* Decode INSN and fill in *INST the instruction information. An alias
3246 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3247 success. */
3249 enum err_type
3250 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3251 bool noaliases_p,
3252 aarch64_operand_error *errors)
3254 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3256 #ifdef DEBUG_AARCH64
3257 if (debug_dump)
3259 const aarch64_opcode *tmp = opcode;
3260 printf ("\n");
3261 DEBUG_TRACE ("opcode lookup:");
3262 while (tmp != NULL)
3264 aarch64_verbose (" %s", tmp->name);
3265 tmp = aarch64_find_next_opcode (tmp);
3268 #endif /* DEBUG_AARCH64 */
3270 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3271 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3272 opcode field and value, apart from the difference that one of them has an
3273 extra field as part of the opcode, but such a field is used for operand
3274 encoding in other opcode(s) ('immh' in the case of the example). */
3275 while (opcode != NULL)
3277 /* But only one opcode can be decoded successfully for, as the
3278 decoding routine will check the constraint carefully. */
3279 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3280 return ERR_OK;
3281 opcode = aarch64_find_next_opcode (opcode);
3284 return ERR_UND;
3287 /* Return a short string to indicate a switch to STYLE. These strings
3288 will be embedded into the disassembled operand text (as produced by
3289 aarch64_print_operand), and then spotted in the print_operands function
3290 so that the disassembler output can be split by style. */
3292 static const char *
3293 get_style_text (enum disassembler_style style)
3295 static bool init = false;
3296 static char formats[16][4];
3297 unsigned num;
3299 /* First time through we build a string for every possible format. This
3300 code relies on there being no more than 16 different styles (there's
3301 an assert below for this). */
3302 if (!init)
3304 int i;
3306 for (i = 0; i <= 0xf; ++i)
3308 int res = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3309 STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3310 assert (res == 3);
3313 init = true;
3316 /* Return the string that marks switching to STYLE. */
3317 num = (unsigned) style;
3318 assert (style <= 0xf);
3319 return formats[num];
3322 /* Callback used by aarch64_print_operand to apply STYLE to the
3323 disassembler output created from FMT and ARGS. The STYLER object holds
3324 any required state. Must return a pointer to a string (created from FMT
3325 and ARGS) that will continue to be valid until the complete disassembled
3326 instruction has been printed.
3328 We return a string that includes two embedded style markers, the first,
3329 places at the start of the string, indicates a switch to STYLE, and the
3330 second, placed at the end of the string, indicates a switch back to the
3331 default text style.
3333 Later, when we print the operand text we take care to collapse any
3334 adjacent style markers, and to ignore any style markers that appear at
3335 the very end of a complete operand string. */
3337 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3338 enum disassembler_style style,
3339 const char *fmt,
3340 va_list args)
3342 int res;
3343 char *ptr, *tmp;
3344 struct obstack *stack = (struct obstack *) styler->state;
3345 va_list ap;
3347 /* These are the two strings for switching styles. */
3348 const char *style_on = get_style_text (style);
3349 const char *style_off = get_style_text (dis_style_text);
3351 /* Calculate space needed once FMT and ARGS are expanded. */
3352 va_copy (ap, args);
3353 res = vsnprintf (NULL, 0, fmt, ap);
3354 va_end (ap);
3355 assert (res >= 0);
3357 /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3358 as the two strings for switching styles, then write all of these
3359 strings onto the obstack. */
3360 ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3361 + strlen (style_off) + 1);
3362 tmp = stpcpy (ptr, style_on);
3363 res = vsnprintf (tmp, (res + 1), fmt, args);
3364 assert (res >= 0);
3365 tmp += res;
3366 strcpy (tmp, style_off);
3368 return ptr;
3371 /* Print operands. */
3373 static void
3374 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3375 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3376 bool *has_notes)
3378 char *notes = NULL;
3379 int i, pcrel_p, num_printed;
3380 struct aarch64_styler styler;
3381 struct obstack content;
3382 obstack_init (&content);
3384 styler.apply_style = aarch64_apply_style;
3385 styler.state = (void *) &content;
3387 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3389 char str[128];
3390 char cmt[128];
3392 /* We regard the opcode operand info more, however we also look into
3393 the inst->operands to support the disassembling of the optional
3394 operand.
3395 The two operand code should be the same in all cases, apart from
3396 when the operand can be optional. */
3397 if (opcode->operands[i] == AARCH64_OPND_NIL
3398 || opnds[i].type == AARCH64_OPND_NIL)
3399 break;
3401 /* Generate the operand string in STR. */
3402 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3403 &info->target, &notes, cmt, sizeof (cmt),
3404 arch_variant, &styler);
3406 /* Print the delimiter (taking account of omitted operand(s)). */
3407 if (str[0] != '\0')
3408 (*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3409 num_printed++ == 0 ? "\t" : ", ");
3411 /* Print the operand. */
3412 if (pcrel_p)
3413 (*info->print_address_func) (info->target, info);
3414 else
3416 /* This operand came from aarch64_print_operand, and will include
3417 embedded strings indicating which style each character should
3418 have. In the following code we split the text based on
3419 CURR_STYLE, and call the styled print callback to print each
3420 block of text in the appropriate style. */
3421 char *start, *curr;
3422 enum disassembler_style curr_style = dis_style_text;
3424 start = curr = str;
3427 if (*curr == '\0'
3428 || (*curr == STYLE_MARKER_CHAR
3429 && ISXDIGIT (*(curr + 1))
3430 && *(curr + 2) == STYLE_MARKER_CHAR))
3432 /* Output content between our START position and CURR. */
3433 int len = curr - start;
3434 if (len > 0)
3436 if ((*info->fprintf_styled_func) (info->stream,
3437 curr_style,
3438 "%.*s",
3439 len, start) < 0)
3440 break;
3443 if (*curr == '\0')
3444 break;
3446 /* Skip over the initial STYLE_MARKER_CHAR. */
3447 ++curr;
3449 /* Update the CURR_STYLE. As there are less than 16
3450 styles, it is possible, that if the input is corrupted
3451 in some way, that we might set CURR_STYLE to an
3452 invalid value. Don't worry though, we check for this
3453 situation. */
3454 if (*curr >= '0' && *curr <= '9')
3455 curr_style = (enum disassembler_style) (*curr - '0');
3456 else if (*curr >= 'a' && *curr <= 'f')
3457 curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3458 else
3459 curr_style = dis_style_text;
3461 /* Check for an invalid style having been selected. This
3462 should never happen, but it doesn't hurt to be a
3463 little paranoid. */
3464 if (curr_style > dis_style_comment_start)
3465 curr_style = dis_style_text;
3467 /* Skip the hex character, and the closing STYLE_MARKER_CHAR. */
3468 curr += 2;
3470 /* Reset the START to after the style marker. */
3471 start = curr;
3473 else
3474 ++curr;
3476 while (true);
3479 /* Print the comment. This works because only the last operand ever
3480 adds a comment. If that ever changes then we'll need to be
3481 smarter here. */
3482 if (cmt[0] != '\0')
3483 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3484 "\t// %s", cmt);
3487 if (notes && !no_notes)
3489 *has_notes = true;
3490 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3491 " // note: %s", notes);
3494 obstack_free (&content, NULL);
3497 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3499 static void
3500 remove_dot_suffix (char *name, const aarch64_inst *inst)
3502 char *ptr;
3503 size_t len;
3505 ptr = strchr (inst->opcode->name, '.');
3506 assert (ptr && inst->cond);
3507 len = ptr - inst->opcode->name;
3508 assert (len < 8);
3509 strncpy (name, inst->opcode->name, len);
3510 name[len] = '\0';
3513 /* Print the instruction mnemonic name. */
3515 static void
3516 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3518 if (inst->opcode->flags & F_COND)
3520 /* For instructions that are truly conditionally executed, e.g. b.cond,
3521 prepare the full mnemonic name with the corresponding condition
3522 suffix. */
3523 char name[8];
3525 remove_dot_suffix (name, inst);
3526 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3527 "%s.%s", name, inst->cond->names[0]);
3529 else
3530 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3531 "%s", inst->opcode->name);
3534 /* Decide whether we need to print a comment after the operands of
3535 instruction INST. */
3537 static void
3538 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3540 if (inst->opcode->flags & F_COND)
3542 char name[8];
3543 unsigned int i, num_conds;
3545 remove_dot_suffix (name, inst);
3546 num_conds = ARRAY_SIZE (inst->cond->names);
3547 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3548 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3549 "%s %s.%s",
3550 i == 1 ? " //" : ",",
3551 name, inst->cond->names[i]);
3555 /* Build notes from verifiers into a string for printing. */
3557 static void
3558 print_verifier_notes (aarch64_operand_error *detail,
3559 struct disassemble_info *info)
3561 if (no_notes)
3562 return;
3564 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3565 would not have succeeded. We can safely ignore these. */
3566 assert (detail->non_fatal);
3568 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3569 " // note: ");
3570 switch (detail->kind)
3572 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
3573 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3574 _("this `%s' should have an immediately"
3575 " preceding `%s'"),
3576 detail->data[0].s, detail->data[1].s);
3577 break;
3579 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
3580 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3581 _("expected `%s' after previous `%s'"),
3582 detail->data[0].s, detail->data[1].s);
3583 break;
3585 default:
3586 assert (detail->error);
3587 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3588 "%s", detail->error);
3589 if (detail->index >= 0)
3590 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3591 " at operand %d", detail->index + 1);
3592 break;
3596 /* Print the instruction according to *INST. */
3598 static void
3599 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3600 const aarch64_insn code,
3601 struct disassemble_info *info,
3602 aarch64_operand_error *mismatch_details)
3604 bool has_notes = false;
3606 print_mnemonic_name (inst, info);
3607 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3608 print_comment (inst, info);
3610 /* We've already printed a note, not enough space to print more so exit.
3611 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3612 from a register and instruction at the same time. */
3613 if (has_notes)
3614 return;
3616 /* Always run constraint verifiers, this is needed because constraints need to
3617 maintain a global state regardless of whether the instruction has the flag
3618 set or not. */
3619 enum err_type result = verify_constraints (inst, code, pc, false,
3620 mismatch_details, &insn_sequence);
3621 switch (result)
3623 case ERR_VFI:
3624 print_verifier_notes (mismatch_details, info);
3625 break;
3626 case ERR_UND:
3627 case ERR_UNP:
3628 case ERR_NYI:
3629 default:
3630 break;
3634 /* Entry-point of the instruction disassembler and printer. */
3636 static void
3637 print_insn_aarch64_word (bfd_vma pc,
3638 uint32_t word,
3639 struct disassemble_info *info,
3640 aarch64_operand_error *errors)
3642 static const char *err_msg[ERR_NR_ENTRIES+1] =
3644 [ERR_OK] = "_",
3645 [ERR_UND] = "undefined",
3646 [ERR_UNP] = "unpredictable",
3647 [ERR_NYI] = "NYI"
3650 enum err_type ret;
3651 aarch64_inst inst;
3653 info->insn_info_valid = 1;
3654 info->branch_delay_insns = 0;
3655 info->data_size = 0;
3656 info->target = 0;
3657 info->target2 = 0;
3659 if (info->flags & INSN_HAS_RELOC)
3660 /* If the instruction has a reloc associated with it, then
3661 the offset field in the instruction will actually be the
3662 addend for the reloc. (If we are using REL type relocs).
3663 In such cases, we can ignore the pc when computing
3664 addresses, since the addend is not currently pc-relative. */
3665 pc = 0;
3667 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3669 if (((word >> 21) & 0x3ff) == 1)
3671 /* RESERVED for ALES. */
3672 assert (ret != ERR_OK);
3673 ret = ERR_NYI;
3676 switch (ret)
3678 case ERR_UND:
3679 case ERR_UNP:
3680 case ERR_NYI:
3681 /* Handle undefined instructions. */
3682 info->insn_type = dis_noninsn;
3683 (*info->fprintf_styled_func) (info->stream,
3684 dis_style_assembler_directive,
3685 ".inst\t");
3686 (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
3687 "0x%08x", word);
3688 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3689 " ; %s", err_msg[ret]);
3690 break;
3691 case ERR_OK:
3692 user_friendly_fixup (&inst);
3693 print_aarch64_insn (pc, &inst, word, info, errors);
3694 break;
3695 default:
3696 abort ();
3700 /* Disallow mapping symbols ($x, $d etc) from
3701 being displayed in symbol relative addresses. */
3703 bool
3704 aarch64_symbol_is_valid (asymbol * sym,
3705 struct disassemble_info * info ATTRIBUTE_UNUSED)
3707 const char * name;
3709 if (sym == NULL)
3710 return false;
3712 name = bfd_asymbol_name (sym);
3714 return name
3715 && (name[0] != '$'
3716 || (name[1] != 'x' && name[1] != 'd')
3717 || (name[2] != '\0' && name[2] != '.'));
3720 /* Print data bytes on INFO->STREAM. */
3722 static void
3723 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3724 uint32_t word,
3725 struct disassemble_info *info,
3726 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3728 switch (info->bytes_per_chunk)
3730 case 1:
3731 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3732 ".byte\t");
3733 info->fprintf_styled_func (info->stream, dis_style_immediate,
3734 "0x%02x", word);
3735 break;
3736 case 2:
3737 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3738 ".short\t");
3739 info->fprintf_styled_func (info->stream, dis_style_immediate,
3740 "0x%04x", word);
3741 break;
3742 case 4:
3743 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3744 ".word\t");
3745 info->fprintf_styled_func (info->stream, dis_style_immediate,
3746 "0x%08x", word);
3747 break;
3748 default:
3749 abort ();
3753 /* Try to infer the code or data type from a symbol.
3754 Returns nonzero if *MAP_TYPE was set. */
3756 static int
3757 get_sym_code_type (struct disassemble_info *info, int n,
3758 enum map_type *map_type)
3760 asymbol * as;
3761 elf_symbol_type *es;
3762 unsigned int type;
3763 const char *name;
3765 /* If the symbol is in a different section, ignore it. */
3766 if (info->section != NULL && info->section != info->symtab[n]->section)
3767 return false;
3769 if (n >= info->symtab_size)
3770 return false;
3772 as = info->symtab[n];
3773 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
3774 return false;
3775 es = (elf_symbol_type *) as;
3777 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3779 /* If the symbol has function type then use that. */
3780 if (type == STT_FUNC)
3782 *map_type = MAP_INSN;
3783 return true;
3786 /* Check for mapping symbols. */
3787 name = bfd_asymbol_name(info->symtab[n]);
3788 if (name[0] == '$'
3789 && (name[1] == 'x' || name[1] == 'd')
3790 && (name[2] == '\0' || name[2] == '.'))
3792 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3793 return true;
3796 return false;
3799 /* Set the feature bits in arch_variant in order to get the correct disassembly
3800 for the chosen architecture variant.
3802 Currently we only restrict disassembly for Armv8-R and otherwise enable all
3803 non-R-profile features. */
3804 static void
3805 select_aarch64_variant (unsigned mach)
3807 switch (mach)
3809 case bfd_mach_aarch64_8R:
3810 arch_variant = AARCH64_ARCH_V8_R;
3811 break;
3812 default:
3813 arch_variant = AARCH64_ANY & ~(AARCH64_FEATURE_V8_R);
3817 /* Entry-point of the AArch64 disassembler. */
3820 print_insn_aarch64 (bfd_vma pc,
3821 struct disassemble_info *info)
3823 bfd_byte buffer[INSNLEN];
3824 int status;
3825 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3826 aarch64_operand_error *);
3827 bool found = false;
3828 unsigned int size = 4;
3829 unsigned long data;
3830 aarch64_operand_error errors;
3831 static bool set_features;
3833 if (info->disassembler_options)
3835 set_default_aarch64_dis_options (info);
3837 parse_aarch64_dis_options (info->disassembler_options);
3839 /* To avoid repeated parsing of these options, we remove them here. */
3840 info->disassembler_options = NULL;
3843 if (!set_features)
3845 select_aarch64_variant (info->mach);
3846 set_features = true;
3849 /* Aarch64 instructions are always little-endian */
3850 info->endian_code = BFD_ENDIAN_LITTLE;
3852 /* Default to DATA. A text section is required by the ABI to contain an
3853 INSN mapping symbol at the start. A data section has no such
3854 requirement, hence if no mapping symbol is found the section must
3855 contain only data. This however isn't very useful if the user has
3856 fully stripped the binaries. If this is the case use the section
3857 attributes to determine the default. If we have no section default to
3858 INSN as well, as we may be disassembling some raw bytes on a baremetal
3859 HEX file or similar. */
3860 enum map_type type = MAP_DATA;
3861 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3862 type = MAP_INSN;
3864 /* First check the full symtab for a mapping symbol, even if there
3865 are no usable non-mapping symbols for this address. */
3866 if (info->symtab_size != 0
3867 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3869 int last_sym = -1;
3870 bfd_vma addr, section_vma = 0;
3871 bool can_use_search_opt_p;
3872 int n;
3874 if (pc <= last_mapping_addr)
3875 last_mapping_sym = -1;
3877 /* Start scanning at the start of the function, or wherever
3878 we finished last time. */
3879 n = info->symtab_pos + 1;
3881 /* If the last stop offset is different from the current one it means we
3882 are disassembling a different glob of bytes. As such the optimization
3883 would not be safe and we should start over. */
3884 can_use_search_opt_p = last_mapping_sym >= 0
3885 && info->stop_offset == last_stop_offset;
3887 if (n >= last_mapping_sym && can_use_search_opt_p)
3888 n = last_mapping_sym;
3890 /* Look down while we haven't passed the location being disassembled.
3891 The reason for this is that there's no defined order between a symbol
3892 and an mapping symbol that may be at the same address. We may have to
3893 look at least one position ahead. */
3894 for (; n < info->symtab_size; n++)
3896 addr = bfd_asymbol_value (info->symtab[n]);
3897 if (addr > pc)
3898 break;
3899 if (get_sym_code_type (info, n, &type))
3901 last_sym = n;
3902 found = true;
3906 if (!found)
3908 n = info->symtab_pos;
3909 if (n >= last_mapping_sym && can_use_search_opt_p)
3910 n = last_mapping_sym;
3912 /* No mapping symbol found at this address. Look backwards
3913 for a preceeding one, but don't go pass the section start
3914 otherwise a data section with no mapping symbol can pick up
3915 a text mapping symbol of a preceeding section. The documentation
3916 says section can be NULL, in which case we will seek up all the
3917 way to the top. */
3918 if (info->section)
3919 section_vma = info->section->vma;
3921 for (; n >= 0; n--)
3923 addr = bfd_asymbol_value (info->symtab[n]);
3924 if (addr < section_vma)
3925 break;
3927 if (get_sym_code_type (info, n, &type))
3929 last_sym = n;
3930 found = true;
3931 break;
3936 last_mapping_sym = last_sym;
3937 last_type = type;
3938 last_stop_offset = info->stop_offset;
3940 /* Look a little bit ahead to see if we should print out
3941 less than four bytes of data. If there's a symbol,
3942 mapping or otherwise, after two bytes then don't
3943 print more. */
3944 if (last_type == MAP_DATA)
3946 size = 4 - (pc & 3);
3947 for (n = last_sym + 1; n < info->symtab_size; n++)
3949 addr = bfd_asymbol_value (info->symtab[n]);
3950 if (addr > pc)
3952 if (addr - pc < size)
3953 size = addr - pc;
3954 break;
3957 /* If the next symbol is after three bytes, we need to
3958 print only part of the data, so that we can use either
3959 .byte or .short. */
3960 if (size == 3)
3961 size = (pc & 1) ? 1 : 2;
3964 else
3965 last_type = type;
3967 /* PR 10263: Disassemble data if requested to do so by the user. */
3968 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3970 /* size was set above. */
3971 info->bytes_per_chunk = size;
3972 info->display_endian = info->endian;
3973 printer = print_insn_data;
3975 else
3977 info->bytes_per_chunk = size = INSNLEN;
3978 info->display_endian = info->endian_code;
3979 printer = print_insn_aarch64_word;
3982 status = (*info->read_memory_func) (pc, buffer, size, info);
3983 if (status != 0)
3985 (*info->memory_error_func) (status, pc, info);
3986 return -1;
3989 data = bfd_get_bits (buffer, size * 8,
3990 info->display_endian == BFD_ENDIAN_BIG);
3992 (*printer) (pc, data, info, &errors);
3994 return size;
3997 void
3998 print_aarch64_disassembler_options (FILE *stream)
4000 fprintf (stream, _("\n\
4001 The following AARCH64 specific disassembler options are supported for use\n\
4002 with the -M switch (multiple options should be separated by commas):\n"));
4004 fprintf (stream, _("\n\
4005 no-aliases Don't print instruction aliases.\n"));
4007 fprintf (stream, _("\n\
4008 aliases Do print instruction aliases.\n"));
4010 fprintf (stream, _("\n\
4011 no-notes Don't print instruction notes.\n"));
4013 fprintf (stream, _("\n\
4014 notes Do print instruction notes.\n"));
4016 #ifdef DEBUG_AARCH64
4017 fprintf (stream, _("\n\
4018 debug_dump Temp switch for debug trace.\n"));
4019 #endif /* DEBUG_AARCH64 */
4021 fprintf (stream, _("\n"));