Fix: A potential null_pointer_deference bug
[binutils-gdb.git] / opcodes / aarch64-opc.c
blob295638d6d8c21b84ec89564199825e90d9f071e5
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
29 #include "opintl.h"
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
59 /* 16-23. */
68 /* 24-31. */
74 "mul4",
75 "mul3",
76 "all"
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
103 operation. */
104 const char *const aarch64_rprfmop_array[64] = {
105 "pldkeep",
106 "pstkeep",
109 "pldstrm",
110 "pststrm"
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
116 "vlx2",
117 "vlx4"
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
123 static inline bool
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
126 return (qualifier >= AARCH64_OPND_QLF_V_8B
127 && qualifier <= AARCH64_OPND_QLF_V_1Q);
130 static inline bool
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
133 return (qualifier >= AARCH64_OPND_QLF_S_B
134 && qualifier <= AARCH64_OPND_QLF_S_Q);
137 enum data_pattern
139 DP_UNKNOWN,
140 DP_VECTOR_3SAME,
141 DP_VECTOR_LONG,
142 DP_VECTOR_WIDE,
143 DP_VECTOR_ACROSS_LANES,
146 static const char significant_operand_index [] =
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
156 the data pattern.
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
163 if (vector_qualifier_p (qualifiers[0]))
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2])
169 && (aarch64_get_qualifier_esize (qualifiers[0])
170 == aarch64_get_qualifier_esize (qualifiers[1]))
171 && (aarch64_get_qualifier_esize (qualifiers[0])
172 == aarch64_get_qualifier_esize (qualifiers[2])))
173 return DP_VECTOR_3SAME;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
176 or v.8h, v.16b. */
177 if (vector_qualifier_p (qualifiers[1])
178 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers[0])
180 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
181 return DP_VECTOR_LONG;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers[0] == qualifiers[1]
184 && vector_qualifier_p (qualifiers[2])
185 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers[0])
187 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers[0])
189 == aarch64_get_qualifier_esize (qualifiers[1])))
190 return DP_VECTOR_WIDE;
192 else if (fp_qualifier_p (qualifiers[0]))
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers[1])
196 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
197 return DP_VECTOR_ACROSS_LANES;
200 return DP_UNKNOWN;
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
209 benefit. */
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
214 return
215 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
218 /* Instruction bit-fields.
219 + Keep synced with 'enum aarch64_field_kind'. */
220 const aarch64_field fields[] =
222 { 0, 0 }, /* NIL. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
225 { 12, 4 }, /* CRn: in the system instructions. */
226 { 10, 8 }, /* CSSC_imm8. */
227 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
228 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
229 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
230 { 22, 1 }, /* N: in logical (immediate) instructions. */
231 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
232 { 10, 5 }, /* Ra: in fp instructions. */
233 { 0, 5 }, /* Rd: in many integer instructions. */
234 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
235 { 5, 5 }, /* Rn: in many integer instructions. */
236 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
237 { 0, 5 }, /* Rt: in load/store instructions. */
238 { 10, 5 }, /* Rt2: in load/store pair instructions. */
239 { 12, 1 }, /* S: in load/store reg offset instructions. */
240 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
241 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
242 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
243 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
244 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
245 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
246 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
247 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
248 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
249 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
250 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
251 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
252 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
253 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
254 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
255 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
256 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
257 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
258 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
259 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
260 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
261 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
262 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
263 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
264 { 12, 2 }, /* SME_size_12: bits [13:12]. */
265 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
266 { 23, 1 }, /* SME_sz_23: bit [23]. */
267 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
268 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
269 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
298 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
299 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
300 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
301 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
302 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
303 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
304 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
305 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
306 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
307 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
308 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
309 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
310 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
311 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
312 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
313 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
314 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
315 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
316 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
317 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
318 { 16, 4 }, /* SVE_tsz: triangular size select. */
319 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
320 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
321 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
322 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
323 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
324 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
325 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
326 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
327 { 19, 5 }, /* b40: in the test bit and branch instructions. */
328 { 31, 1 }, /* b5: in the test bit and branch instructions. */
329 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
330 { 12, 4 }, /* cond: condition flags as a source operand. */
331 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
332 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
333 { 21, 2 }, /* hw: in move wide constant instructions. */
334 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
335 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
336 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
337 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
338 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
339 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
340 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
341 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
342 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
343 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
344 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
345 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
346 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
347 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
348 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
349 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
350 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
351 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
352 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
353 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
354 { 0, 4 }, /* imm4_0: in rmif instructions. */
355 { 5, 4 }, /* imm4_5: in SME instructions. */
356 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
357 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
358 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
359 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
360 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
361 { 15, 6 }, /* imm6_15: in rmif instructions. */
362 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
363 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
364 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
365 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
366 { 5, 14 }, /* imm14: in test bit and branch instructions. */
367 { 0, 16 }, /* imm16_0: in udf instruction. */
368 { 5, 16 }, /* imm16_5: in exception instructions. */
369 { 5, 19 }, /* imm19: e.g. in CBZ. */
370 { 0, 26 }, /* imm26: in unconditional branch instructions. */
371 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
372 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
373 { 5, 19 }, /* immhi: e.g. in ADRP. */
374 { 29, 2 }, /* immlo: e.g. in ADRP. */
375 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
376 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
377 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
378 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
379 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
380 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
381 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
382 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
383 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
384 { 19, 2 }, /* op0: in the system instructions. */
385 { 16, 3 }, /* op1: in the system instructions. */
386 { 5, 3 }, /* op2: in the system instructions. */
387 { 22, 2 }, /* opc: in load/store reg offset instructions. */
388 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
389 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
390 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
391 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
392 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
393 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
394 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
395 { 31, 1 }, /* sf: in integer data processing instructions. */
396 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
397 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
398 { 22, 1 }, /* sz: 1-bit element size select. */
399 { 22, 2 }, /* type: floating point type field in fp data inst. */
400 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
403 enum aarch64_operand_class
404 aarch64_get_operand_class (enum aarch64_opnd type)
406 return aarch64_operands[type].op_class;
409 const char *
410 aarch64_get_operand_name (enum aarch64_opnd type)
412 return aarch64_operands[type].name;
415 /* Get operand description string.
416 This is usually for the diagnosis purpose. */
417 const char *
418 aarch64_get_operand_desc (enum aarch64_opnd type)
420 return aarch64_operands[type].desc;
423 /* Table of all conditional affixes. */
424 const aarch64_cond aarch64_conds[16] =
426 {{"eq", "none"}, 0x0},
427 {{"ne", "any"}, 0x1},
428 {{"cs", "hs", "nlast"}, 0x2},
429 {{"cc", "lo", "ul", "last"}, 0x3},
430 {{"mi", "first"}, 0x4},
431 {{"pl", "nfrst"}, 0x5},
432 {{"vs"}, 0x6},
433 {{"vc"}, 0x7},
434 {{"hi", "pmore"}, 0x8},
435 {{"ls", "plast"}, 0x9},
436 {{"ge", "tcont"}, 0xa},
437 {{"lt", "tstop"}, 0xb},
438 {{"gt"}, 0xc},
439 {{"le"}, 0xd},
440 {{"al"}, 0xe},
441 {{"nv"}, 0xf},
444 const aarch64_cond *
445 get_cond_from_value (aarch64_insn value)
447 assert (value < 16);
448 return &aarch64_conds[(unsigned int) value];
451 const aarch64_cond *
452 get_inverted_cond (const aarch64_cond *cond)
454 return &aarch64_conds[cond->value ^ 0x1];
457 /* Table describing the operand extension/shifting operators; indexed by
458 enum aarch64_modifier_kind.
460 The value column provides the most common values for encoding modifiers,
461 which enables table-driven encoding/decoding for the modifiers. */
462 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
464 {"none", 0x0},
465 {"msl", 0x0},
466 {"ror", 0x3},
467 {"asr", 0x2},
468 {"lsr", 0x1},
469 {"lsl", 0x0},
470 {"uxtb", 0x0},
471 {"uxth", 0x1},
472 {"uxtw", 0x2},
473 {"uxtx", 0x3},
474 {"sxtb", 0x4},
475 {"sxth", 0x5},
476 {"sxtw", 0x6},
477 {"sxtx", 0x7},
478 {"mul", 0x0},
479 {"mul vl", 0x0},
480 {NULL, 0},
483 enum aarch64_modifier_kind
484 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
486 return desc - aarch64_operand_modifiers;
489 aarch64_insn
490 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
492 return aarch64_operand_modifiers[kind].value;
495 enum aarch64_modifier_kind
496 aarch64_get_operand_modifier_from_value (aarch64_insn value,
497 bool extend_p)
499 if (extend_p)
500 return AARCH64_MOD_UXTB + value;
501 else
502 return AARCH64_MOD_LSL - value;
505 bool
506 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
508 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
511 static inline bool
512 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
514 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
517 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
519 { "#0x00", 0x0 },
520 { "oshld", 0x1 },
521 { "oshst", 0x2 },
522 { "osh", 0x3 },
523 { "#0x04", 0x4 },
524 { "nshld", 0x5 },
525 { "nshst", 0x6 },
526 { "nsh", 0x7 },
527 { "#0x08", 0x8 },
528 { "ishld", 0x9 },
529 { "ishst", 0xa },
530 { "ish", 0xb },
531 { "#0x0c", 0xc },
532 { "ld", 0xd },
533 { "st", 0xe },
534 { "sy", 0xf },
537 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
538 { /* CRm<3:2> #imm */
539 { "oshnxs", 16 }, /* 00 16 */
540 { "nshnxs", 20 }, /* 01 20 */
541 { "ishnxs", 24 }, /* 10 24 */
542 { "synxs", 28 }, /* 11 28 */
545 /* Table describing the operands supported by the aliases of the HINT
546 instruction.
548 The name column is the operand that is accepted for the alias. The value
549 column is the hint number of the alias. The list of operands is terminated
550 by NULL in the name column. */
552 const struct aarch64_name_value_pair aarch64_hint_options[] =
554 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
555 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
556 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
557 { "c", HINT_OPD_C }, /* BTI C. */
558 { "j", HINT_OPD_J }, /* BTI J. */
559 { "jc", HINT_OPD_JC }, /* BTI JC. */
560 { NULL, HINT_OPD_NULL },
563 /* op -> op: load = 0 instruction = 1 store = 2
564 l -> level: 1-3
565 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
566 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
567 const struct aarch64_name_value_pair aarch64_prfops[32] =
569 { "pldl1keep", B(0, 1, 0) },
570 { "pldl1strm", B(0, 1, 1) },
571 { "pldl2keep", B(0, 2, 0) },
572 { "pldl2strm", B(0, 2, 1) },
573 { "pldl3keep", B(0, 3, 0) },
574 { "pldl3strm", B(0, 3, 1) },
575 { NULL, 0x06 },
576 { NULL, 0x07 },
577 { "plil1keep", B(1, 1, 0) },
578 { "plil1strm", B(1, 1, 1) },
579 { "plil2keep", B(1, 2, 0) },
580 { "plil2strm", B(1, 2, 1) },
581 { "plil3keep", B(1, 3, 0) },
582 { "plil3strm", B(1, 3, 1) },
583 { NULL, 0x0e },
584 { NULL, 0x0f },
585 { "pstl1keep", B(2, 1, 0) },
586 { "pstl1strm", B(2, 1, 1) },
587 { "pstl2keep", B(2, 2, 0) },
588 { "pstl2strm", B(2, 2, 1) },
589 { "pstl3keep", B(2, 3, 0) },
590 { "pstl3strm", B(2, 3, 1) },
591 { NULL, 0x16 },
592 { NULL, 0x17 },
593 { NULL, 0x18 },
594 { NULL, 0x19 },
595 { NULL, 0x1a },
596 { NULL, 0x1b },
597 { NULL, 0x1c },
598 { NULL, 0x1d },
599 { NULL, 0x1e },
600 { NULL, 0x1f },
602 #undef B
604 /* Utilities on value constraint. */
606 static inline int
607 value_in_range_p (int64_t value, int low, int high)
609 return (value >= low && value <= high) ? 1 : 0;
612 /* Return true if VALUE is a multiple of ALIGN. */
613 static inline int
614 value_aligned_p (int64_t value, int align)
616 return (value % align) == 0;
619 /* A signed value fits in a field. */
620 static inline int
621 value_fit_signed_field_p (int64_t value, unsigned width)
623 assert (width < 32);
624 if (width < sizeof (value) * 8)
626 int64_t lim = (uint64_t) 1 << (width - 1);
627 if (value >= -lim && value < lim)
628 return 1;
630 return 0;
633 /* An unsigned value fits in a field. */
634 static inline int
635 value_fit_unsigned_field_p (int64_t value, unsigned width)
637 assert (width < 32);
638 if (width < sizeof (value) * 8)
640 int64_t lim = (uint64_t) 1 << width;
641 if (value >= 0 && value < lim)
642 return 1;
644 return 0;
647 /* Return 1 if OPERAND is SP or WSP. */
649 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
651 return ((aarch64_get_operand_class (operand->type)
652 == AARCH64_OPND_CLASS_INT_REG)
653 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
654 && operand->reg.regno == 31);
657 /* Return 1 if OPERAND is XZR or WZP. */
659 aarch64_zero_register_p (const aarch64_opnd_info *operand)
661 return ((aarch64_get_operand_class (operand->type)
662 == AARCH64_OPND_CLASS_INT_REG)
663 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
664 && operand->reg.regno == 31);
667 /* Return true if the operand *OPERAND that has the operand code
668 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
669 qualified by the qualifier TARGET. */
671 static inline int
672 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
673 aarch64_opnd_qualifier_t target)
675 switch (operand->qualifier)
677 case AARCH64_OPND_QLF_W:
678 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
679 return 1;
680 break;
681 case AARCH64_OPND_QLF_X:
682 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
683 return 1;
684 break;
685 case AARCH64_OPND_QLF_WSP:
686 if (target == AARCH64_OPND_QLF_W
687 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
688 return 1;
689 break;
690 case AARCH64_OPND_QLF_SP:
691 if (target == AARCH64_OPND_QLF_X
692 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
693 return 1;
694 break;
695 default:
696 break;
699 return 0;
702 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
703 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
705 Return NIL if more than one expected qualifiers are found. */
707 aarch64_opnd_qualifier_t
708 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
709 int idx,
710 const aarch64_opnd_qualifier_t known_qlf,
711 int known_idx)
713 int i, saved_i;
715 /* Special case.
717 When the known qualifier is NIL, we have to assume that there is only
718 one qualifier sequence in the *QSEQ_LIST and return the corresponding
719 qualifier directly. One scenario is that for instruction
720 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
721 which has only one possible valid qualifier sequence
722 NIL, S_D
723 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
724 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
726 Because the qualifier NIL has dual roles in the qualifier sequence:
727 it can mean no qualifier for the operand, or the qualifer sequence is
728 not in use (when all qualifiers in the sequence are NILs), we have to
729 handle this special case here. */
730 if (known_qlf == AARCH64_OPND_NIL)
732 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
733 return qseq_list[0][idx];
736 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
738 if (qseq_list[i][known_idx] == known_qlf)
740 if (saved_i != -1)
741 /* More than one sequences are found to have KNOWN_QLF at
742 KNOWN_IDX. */
743 return AARCH64_OPND_NIL;
744 saved_i = i;
748 return qseq_list[saved_i][idx];
751 enum operand_qualifier_kind
753 OQK_NIL,
754 OQK_OPD_VARIANT,
755 OQK_VALUE_IN_RANGE,
756 OQK_MISC,
759 /* Operand qualifier description. */
760 struct operand_qualifier_data
762 /* The usage of the three data fields depends on the qualifier kind. */
763 int data0;
764 int data1;
765 int data2;
766 /* Description. */
767 const char *desc;
768 /* Kind. */
769 enum operand_qualifier_kind kind;
772 /* Indexed by the operand qualifier enumerators. */
773 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
775 {0, 0, 0, "NIL", OQK_NIL},
777 /* Operand variant qualifiers.
778 First 3 fields:
779 element size, number of elements and common value for encoding. */
781 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
782 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
783 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
784 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
786 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
787 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
788 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
789 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
790 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
791 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
792 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
794 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
795 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
796 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
797 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
798 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
799 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
800 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
801 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
802 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
803 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
804 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
806 {0, 0, 0, "z", OQK_OPD_VARIANT},
807 {0, 0, 0, "m", OQK_OPD_VARIANT},
809 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
810 {16, 0, 0, "tag", OQK_OPD_VARIANT},
812 /* Qualifiers constraining the value range.
813 First 3 fields:
814 Lower bound, higher bound, unused. */
816 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
817 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
818 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
819 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
820 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
821 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
822 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
824 /* Qualifiers for miscellaneous purpose.
825 First 3 fields:
826 unused, unused and unused. */
828 {0, 0, 0, "lsl", 0},
829 {0, 0, 0, "msl", 0},
831 {0, 0, 0, "retrieving", 0},
834 static inline bool
835 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
837 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
840 static inline bool
841 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
843 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
846 const char*
847 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
849 return aarch64_opnd_qualifiers[qualifier].desc;
852 /* Given an operand qualifier, return the expected data element size
853 of a qualified operand. */
854 unsigned char
855 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
857 assert (operand_variant_qualifier_p (qualifier));
858 return aarch64_opnd_qualifiers[qualifier].data0;
861 unsigned char
862 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
864 assert (operand_variant_qualifier_p (qualifier));
865 return aarch64_opnd_qualifiers[qualifier].data1;
868 aarch64_insn
869 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
871 assert (operand_variant_qualifier_p (qualifier));
872 return aarch64_opnd_qualifiers[qualifier].data2;
875 static int
876 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
878 assert (qualifier_value_in_range_constraint_p (qualifier));
879 return aarch64_opnd_qualifiers[qualifier].data0;
882 static int
883 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
885 assert (qualifier_value_in_range_constraint_p (qualifier));
886 return aarch64_opnd_qualifiers[qualifier].data1;
889 #ifdef DEBUG_AARCH64
890 void
891 aarch64_verbose (const char *str, ...)
893 va_list ap;
894 va_start (ap, str);
895 printf ("#### ");
896 vprintf (str, ap);
897 printf ("\n");
898 va_end (ap);
901 static inline void
902 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
904 int i;
905 printf ("#### \t");
906 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
907 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
908 printf ("\n");
911 static void
912 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
913 const aarch64_opnd_qualifier_t *qualifier)
915 int i;
916 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
918 aarch64_verbose ("dump_match_qualifiers:");
919 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
920 curr[i] = opnd[i].qualifier;
921 dump_qualifier_sequence (curr);
922 aarch64_verbose ("against");
923 dump_qualifier_sequence (qualifier);
925 #endif /* DEBUG_AARCH64 */
927 /* This function checks if the given instruction INSN is a destructive
928 instruction based on the usage of the registers. It does not recognize
929 unary destructive instructions. */
930 bool
931 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
933 int i = 0;
934 const enum aarch64_opnd *opnds = opcode->operands;
936 if (opnds[0] == AARCH64_OPND_NIL)
937 return false;
939 while (opnds[++i] != AARCH64_OPND_NIL)
940 if (opnds[i] == opnds[0])
941 return true;
943 return false;
946 /* TODO improve this, we can have an extra field at the runtime to
947 store the number of operands rather than calculating it every time. */
950 aarch64_num_of_operands (const aarch64_opcode *opcode)
952 int i = 0;
953 const enum aarch64_opnd *opnds = opcode->operands;
954 while (opnds[i++] != AARCH64_OPND_NIL)
956 --i;
957 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
958 return i;
961 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
962 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
964 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
965 This is always 0 if the function succeeds.
967 N.B. on the entry, it is very likely that only some operands in *INST
968 have had their qualifiers been established.
970 If STOP_AT is not -1, the function will only try to match
971 the qualifier sequence for operands before and including the operand
972 of index STOP_AT; and on success *RET will only be filled with the first
973 (STOP_AT+1) qualifiers.
975 A couple examples of the matching algorithm:
977 X,W,NIL should match
978 X,W,NIL
980 NIL,NIL should match
981 X ,NIL
983 Apart from serving the main encoding routine, this can also be called
984 during or after the operand decoding. */
987 aarch64_find_best_match (const aarch64_inst *inst,
988 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
989 int stop_at, aarch64_opnd_qualifier_t *ret,
990 int *invalid_count)
992 int i, num_opnds, invalid, min_invalid;
993 const aarch64_opnd_qualifier_t *qualifiers;
995 num_opnds = aarch64_num_of_operands (inst->opcode);
996 if (num_opnds == 0)
998 DEBUG_TRACE ("SUCCEED: no operand");
999 *invalid_count = 0;
1000 return 1;
1003 if (stop_at < 0 || stop_at >= num_opnds)
1004 stop_at = num_opnds - 1;
1006 /* For each pattern. */
1007 min_invalid = num_opnds;
1008 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1010 int j;
1011 qualifiers = *qualifiers_list;
1013 /* Start as positive. */
1014 invalid = 0;
1016 DEBUG_TRACE ("%d", i);
1017 #ifdef DEBUG_AARCH64
1018 if (debug_dump)
1019 dump_match_qualifiers (inst->operands, qualifiers);
1020 #endif
1022 /* The first entry should be taken literally, even if it's an empty
1023 qualifier sequence. (This matters for strict testing.) In other
1024 positions an empty sequence acts as a terminator. */
1025 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1026 break;
1028 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1030 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1031 && !(inst->opcode->flags & F_STRICT))
1033 /* Either the operand does not have qualifier, or the qualifier
1034 for the operand needs to be deduced from the qualifier
1035 sequence.
1036 In the latter case, any constraint checking related with
1037 the obtained qualifier should be done later in
1038 operand_general_constraint_met_p. */
1039 continue;
1041 else if (*qualifiers != inst->operands[j].qualifier)
1043 /* Unless the target qualifier can also qualify the operand
1044 (which has already had a non-nil qualifier), non-equal
1045 qualifiers are generally un-matched. */
1046 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1047 continue;
1048 else
1049 invalid += 1;
1051 else
1052 continue; /* Equal qualifiers are certainly matched. */
1055 if (min_invalid > invalid)
1056 min_invalid = invalid;
1058 /* Qualifiers established. */
1059 if (min_invalid == 0)
1060 break;
1063 *invalid_count = min_invalid;
1064 if (min_invalid == 0)
1066 /* Fill the result in *RET. */
1067 int j;
1068 qualifiers = *qualifiers_list;
1070 DEBUG_TRACE ("complete qualifiers using list %d", i);
1071 #ifdef DEBUG_AARCH64
1072 if (debug_dump)
1073 dump_qualifier_sequence (qualifiers);
1074 #endif
1076 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1077 ret[j] = *qualifiers;
1078 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1079 ret[j] = AARCH64_OPND_QLF_NIL;
1081 DEBUG_TRACE ("SUCCESS");
1082 return 1;
1085 DEBUG_TRACE ("FAIL");
1086 return 0;
1089 /* Operand qualifier matching and resolving.
1091 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1092 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1094 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1095 This is always 0 if the function succeeds.
1097 if UPDATE_P, update the qualifier(s) in *INST after the matching
1098 succeeds. */
1100 static int
1101 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1102 int *invalid_count)
1104 int i;
1105 aarch64_opnd_qualifier_seq_t qualifiers;
1107 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1108 qualifiers, invalid_count))
1110 DEBUG_TRACE ("matching FAIL");
1111 return 0;
1114 /* Update the qualifiers. */
1115 if (update_p)
1116 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1118 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1119 break;
1120 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1121 "update %s with %s for operand %d",
1122 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1123 aarch64_get_qualifier_name (qualifiers[i]), i);
1124 inst->operands[i].qualifier = qualifiers[i];
1127 DEBUG_TRACE ("matching SUCCESS");
1128 return 1;
1131 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1132 register by MOVZ.
1134 IS32 indicates whether value is a 32-bit immediate or not.
1135 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1136 amount will be returned in *SHIFT_AMOUNT. */
1138 bool
1139 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1141 int amount;
1143 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1145 if (is32)
1147 /* Allow all zeros or all ones in top 32-bits, so that
1148 32-bit constant expressions like ~0x80000000 are
1149 permitted. */
1150 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1151 /* Immediate out of range. */
1152 return false;
1153 value &= 0xffffffff;
1156 /* first, try movz then movn */
1157 amount = -1;
1158 if ((value & ((uint64_t) 0xffff << 0)) == value)
1159 amount = 0;
1160 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1161 amount = 16;
1162 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1163 amount = 32;
1164 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1165 amount = 48;
1167 if (amount == -1)
1169 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1170 return false;
1173 if (shift_amount != NULL)
1174 *shift_amount = amount;
1176 DEBUG_TRACE ("exit true with amount %d", amount);
1178 return true;
1181 /* Build the accepted values for immediate logical SIMD instructions.
1183 The standard encodings of the immediate value are:
1184 N imms immr SIMD size R S
1185 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1186 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1187 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1188 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1189 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1190 0 11110s 00000r 2 UInt(r) UInt(s)
1191 where all-ones value of S is reserved.
1193 Let's call E the SIMD size.
1195 The immediate value is: S+1 bits '1' rotated to the right by R.
1197 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1198 (remember S != E - 1). */
1200 #define TOTAL_IMM_NB 5334
1202 typedef struct
1204 uint64_t imm;
1205 aarch64_insn encoding;
1206 } simd_imm_encoding;
1208 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1210 static int
1211 simd_imm_encoding_cmp(const void *i1, const void *i2)
1213 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1214 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1216 if (imm1->imm < imm2->imm)
1217 return -1;
1218 if (imm1->imm > imm2->imm)
1219 return +1;
1220 return 0;
1223 /* immediate bitfield standard encoding
1224 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1225 1 ssssss rrrrrr 64 rrrrrr ssssss
1226 0 0sssss 0rrrrr 32 rrrrr sssss
1227 0 10ssss 00rrrr 16 rrrr ssss
1228 0 110sss 000rrr 8 rrr sss
1229 0 1110ss 0000rr 4 rr ss
1230 0 11110s 00000r 2 r s */
1231 static inline int
1232 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1234 return (is64 << 12) | (r << 6) | s;
1237 static void
1238 build_immediate_table (void)
1240 uint32_t log_e, e, s, r, s_mask;
1241 uint64_t mask, imm;
1242 int nb_imms;
1243 int is64;
1245 nb_imms = 0;
1246 for (log_e = 1; log_e <= 6; log_e++)
1248 /* Get element size. */
1249 e = 1u << log_e;
1250 if (log_e == 6)
1252 is64 = 1;
1253 mask = 0xffffffffffffffffull;
1254 s_mask = 0;
1256 else
1258 is64 = 0;
1259 mask = (1ull << e) - 1;
1260 /* log_e s_mask
1261 1 ((1 << 4) - 1) << 2 = 111100
1262 2 ((1 << 3) - 1) << 3 = 111000
1263 3 ((1 << 2) - 1) << 4 = 110000
1264 4 ((1 << 1) - 1) << 5 = 100000
1265 5 ((1 << 0) - 1) << 6 = 000000 */
1266 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1268 for (s = 0; s < e - 1; s++)
1269 for (r = 0; r < e; r++)
1271 /* s+1 consecutive bits to 1 (s < 63) */
1272 imm = (1ull << (s + 1)) - 1;
1273 /* rotate right by r */
1274 if (r != 0)
1275 imm = (imm >> r) | ((imm << (e - r)) & mask);
1276 /* replicate the constant depending on SIMD size */
1277 switch (log_e)
1279 case 1: imm = (imm << 2) | imm;
1280 /* Fall through. */
1281 case 2: imm = (imm << 4) | imm;
1282 /* Fall through. */
1283 case 3: imm = (imm << 8) | imm;
1284 /* Fall through. */
1285 case 4: imm = (imm << 16) | imm;
1286 /* Fall through. */
1287 case 5: imm = (imm << 32) | imm;
1288 /* Fall through. */
1289 case 6: break;
1290 default: abort ();
1292 simd_immediates[nb_imms].imm = imm;
1293 simd_immediates[nb_imms].encoding =
1294 encode_immediate_bitfield(is64, s | s_mask, r);
1295 nb_imms++;
1298 assert (nb_imms == TOTAL_IMM_NB);
1299 qsort(simd_immediates, nb_imms,
1300 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1303 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1304 be accepted by logical (immediate) instructions
1305 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1307 ESIZE is the number of bytes in the decoded immediate value.
1308 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1309 VALUE will be returned in *ENCODING. */
1311 bool
1312 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1314 simd_imm_encoding imm_enc;
1315 const simd_imm_encoding *imm_encoding;
1316 static bool initialized = false;
1317 uint64_t upper;
1318 int i;
1320 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1321 value, esize);
1323 if (!initialized)
1325 build_immediate_table ();
1326 initialized = true;
1329 /* Allow all zeros or all ones in top bits, so that
1330 constant expressions like ~1 are permitted. */
1331 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1332 if ((value & ~upper) != value && (value | upper) != value)
1333 return false;
1335 /* Replicate to a full 64-bit value. */
1336 value &= ~upper;
1337 for (i = esize * 8; i < 64; i *= 2)
1338 value |= (value << i);
1340 imm_enc.imm = value;
1341 imm_encoding = (const simd_imm_encoding *)
1342 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1343 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1344 if (imm_encoding == NULL)
1346 DEBUG_TRACE ("exit with false");
1347 return false;
1349 if (encoding != NULL)
1350 *encoding = imm_encoding->encoding;
1351 DEBUG_TRACE ("exit with true");
1352 return true;
1355 /* If 64-bit immediate IMM is in the format of
1356 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1357 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1358 of value "abcdefgh". Otherwise return -1. */
1360 aarch64_shrink_expanded_imm8 (uint64_t imm)
1362 int i, ret;
1363 uint32_t byte;
1365 ret = 0;
1366 for (i = 0; i < 8; i++)
1368 byte = (imm >> (8 * i)) & 0xff;
1369 if (byte == 0xff)
1370 ret |= 1 << i;
1371 else if (byte != 0x00)
1372 return -1;
1374 return ret;
1377 /* Utility inline functions for operand_general_constraint_met_p. */
1379 static inline void
1380 set_error (aarch64_operand_error *mismatch_detail,
1381 enum aarch64_operand_error_kind kind, int idx,
1382 const char* error)
1384 if (mismatch_detail == NULL)
1385 return;
1386 mismatch_detail->kind = kind;
1387 mismatch_detail->index = idx;
1388 mismatch_detail->error = error;
1391 static inline void
1392 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1393 const char* error)
1395 if (mismatch_detail == NULL)
1396 return;
1397 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1400 static inline void
1401 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1402 const char *prefix, int lower_bound, int upper_bound)
1404 if (mismatch_detail == NULL)
1405 return;
1406 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1407 mismatch_detail->data[0].s = prefix;
1408 mismatch_detail->data[1].i = lower_bound;
1409 mismatch_detail->data[2].i = upper_bound;
1412 static inline void
1413 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1414 int idx, int lower_bound, int upper_bound,
1415 const char* error)
1417 if (mismatch_detail == NULL)
1418 return;
1419 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1420 mismatch_detail->data[0].i = lower_bound;
1421 mismatch_detail->data[1].i = upper_bound;
1424 static inline void
1425 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1426 int idx, int lower_bound, int upper_bound)
1428 if (mismatch_detail == NULL)
1429 return;
1430 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1431 _("immediate value"));
1434 static inline void
1435 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1436 int idx, int lower_bound, int upper_bound)
1438 if (mismatch_detail == NULL)
1439 return;
1440 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1441 _("immediate offset"));
1444 static inline void
1445 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1446 int idx, int lower_bound, int upper_bound)
1448 if (mismatch_detail == NULL)
1449 return;
1450 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1451 _("register number"));
1454 static inline void
1455 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1456 int idx, int lower_bound, int upper_bound)
1458 if (mismatch_detail == NULL)
1459 return;
1460 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1461 _("register element index"));
1464 static inline void
1465 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1466 int idx, int lower_bound, int upper_bound)
1468 if (mismatch_detail == NULL)
1469 return;
1470 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1471 _("shift amount"));
1474 /* Report that the MUL modifier in operand IDX should be in the range
1475 [LOWER_BOUND, UPPER_BOUND]. */
1476 static inline void
1477 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1478 int idx, int lower_bound, int upper_bound)
1480 if (mismatch_detail == NULL)
1481 return;
1482 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1483 _("multiplier"));
1486 static inline void
1487 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1488 int alignment)
1490 if (mismatch_detail == NULL)
1491 return;
1492 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1493 mismatch_detail->data[0].i = alignment;
1496 static inline void
1497 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1498 int expected_num)
1500 if (mismatch_detail == NULL)
1501 return;
1502 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1503 mismatch_detail->data[0].i = 1 << expected_num;
1506 static inline void
1507 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1508 int expected_num)
1510 if (mismatch_detail == NULL)
1511 return;
1512 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1513 mismatch_detail->data[0].i = 1 << expected_num;
1516 static inline void
1517 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1518 int idx, int expected)
1520 if (mismatch_detail == NULL)
1521 return;
1522 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1523 mismatch_detail->data[0].i = expected;
1526 static inline void
1527 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1528 const char* error)
1530 if (mismatch_detail == NULL)
1531 return;
1532 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1535 /* Check that indexed register operand OPND has a register in the range
1536 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1537 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1539 static bool
1540 check_reglane (const aarch64_opnd_info *opnd,
1541 aarch64_operand_error *mismatch_detail, int idx,
1542 const char *prefix, int min_regno, int max_regno,
1543 int min_index, int max_index)
1545 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1547 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1548 max_regno);
1549 return false;
1551 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1553 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1554 max_index);
1555 return false;
1557 return true;
1560 /* Check that register list operand OPND has NUM_REGS registers and a
1561 register stride of STRIDE. */
1563 static bool
1564 check_reglist (const aarch64_opnd_info *opnd,
1565 aarch64_operand_error *mismatch_detail, int idx,
1566 int num_regs, int stride)
1568 if (opnd->reglist.num_regs != num_regs)
1570 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1571 return false;
1573 if (opnd->reglist.stride != stride)
1575 set_reg_list_stride_error (mismatch_detail, idx, stride);
1576 return false;
1578 return true;
1581 /* Check that indexed ZA operand OPND has:
1583 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1585 - RANGE_SIZE consecutive immediate offsets.
1587 - an initial immediate offset that is a multiple of RANGE_SIZE
1588 in the range [0, MAX_VALUE * RANGE_SIZE]
1590 - a vector group size of GROUP_SIZE. */
1592 static bool
1593 check_za_access (const aarch64_opnd_info *opnd,
1594 aarch64_operand_error *mismatch_detail, int idx,
1595 int min_wreg, int max_value, unsigned int range_size,
1596 int group_size)
1598 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1600 if (min_wreg == 12)
1601 set_other_error (mismatch_detail, idx,
1602 _("expected a selection register in the"
1603 " range w12-w15"));
1604 else if (min_wreg == 8)
1605 set_other_error (mismatch_detail, idx,
1606 _("expected a selection register in the"
1607 " range w8-w11"));
1608 else
1609 abort ();
1610 return false;
1613 int max_index = max_value * range_size;
1614 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1616 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1617 return false;
1620 if ((opnd->indexed_za.index.imm % range_size) != 0)
1622 assert (range_size == 2 || range_size == 4);
1623 set_other_error (mismatch_detail, idx,
1624 range_size == 2
1625 ? _("starting offset is not a multiple of 2")
1626 : _("starting offset is not a multiple of 4"));
1627 return false;
1630 if (opnd->indexed_za.index.countm1 != range_size - 1)
1632 if (range_size == 1)
1633 set_other_error (mismatch_detail, idx,
1634 _("expected a single offset rather than"
1635 " a range"));
1636 else if (range_size == 2)
1637 set_other_error (mismatch_detail, idx,
1638 _("expected a range of two offsets"));
1639 else if (range_size == 4)
1640 set_other_error (mismatch_detail, idx,
1641 _("expected a range of four offsets"));
1642 else
1643 abort ();
1644 return false;
1647 /* The vector group specifier is optional in assembly code. */
1648 if (opnd->indexed_za.group_size != 0
1649 && opnd->indexed_za.group_size != group_size)
1651 set_invalid_vg_size (mismatch_detail, idx, group_size);
1652 return false;
1655 return true;
1658 /* General constraint checking based on operand code.
1660 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1661 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1663 This function has to be called after the qualifiers for all operands
1664 have been resolved.
1666 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1667 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1668 of error message during the disassembling where error message is not
1669 wanted. We avoid the dynamic construction of strings of error messages
1670 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1671 use a combination of error code, static string and some integer data to
1672 represent an error. */
1674 static int
1675 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1676 enum aarch64_opnd type,
1677 const aarch64_opcode *opcode,
1678 aarch64_operand_error *mismatch_detail)
1680 unsigned num, modifiers, shift;
1681 unsigned char size;
1682 int64_t imm, min_value, max_value;
1683 uint64_t uvalue, mask;
1684 const aarch64_opnd_info *opnd = opnds + idx;
1685 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1686 int i;
1688 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1690 switch (aarch64_operands[type].op_class)
1692 case AARCH64_OPND_CLASS_INT_REG:
1693 /* Check pair reg constraints for cas* instructions. */
1694 if (type == AARCH64_OPND_PAIRREG)
1696 assert (idx == 1 || idx == 3);
1697 if (opnds[idx - 1].reg.regno % 2 != 0)
1699 set_syntax_error (mismatch_detail, idx - 1,
1700 _("reg pair must start from even reg"));
1701 return 0;
1703 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1705 set_syntax_error (mismatch_detail, idx,
1706 _("reg pair must be contiguous"));
1707 return 0;
1709 break;
1712 /* <Xt> may be optional in some IC and TLBI instructions. */
1713 if (type == AARCH64_OPND_Rt_SYS)
1715 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1716 == AARCH64_OPND_CLASS_SYSTEM));
1717 if (opnds[1].present
1718 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1720 set_other_error (mismatch_detail, idx, _("extraneous register"));
1721 return 0;
1723 if (!opnds[1].present
1724 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1726 set_other_error (mismatch_detail, idx, _("missing register"));
1727 return 0;
1730 switch (qualifier)
1732 case AARCH64_OPND_QLF_WSP:
1733 case AARCH64_OPND_QLF_SP:
1734 if (!aarch64_stack_pointer_p (opnd))
1736 set_other_error (mismatch_detail, idx,
1737 _("stack pointer register expected"));
1738 return 0;
1740 break;
1741 default:
1742 break;
1744 break;
1746 case AARCH64_OPND_CLASS_SVE_REG:
1747 switch (type)
1749 case AARCH64_OPND_SVE_Zm3_INDEX:
1750 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1751 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1752 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1753 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1754 case AARCH64_OPND_SVE_Zm4_INDEX:
1755 size = get_operand_fields_width (get_operand_from_code (type));
1756 shift = get_operand_specific_data (&aarch64_operands[type]);
1757 if (!check_reglane (opnd, mismatch_detail, idx,
1758 "z", 0, (1 << shift) - 1,
1759 0, (1u << (size - shift)) - 1))
1760 return 0;
1761 break;
1763 case AARCH64_OPND_SVE_Zn_INDEX:
1764 size = aarch64_get_qualifier_esize (opnd->qualifier);
1765 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1766 0, 64 / size - 1))
1767 return 0;
1768 break;
1770 case AARCH64_OPND_SME_PNn3_INDEX1:
1771 case AARCH64_OPND_SME_PNn3_INDEX2:
1772 size = get_operand_field_width (get_operand_from_code (type), 1);
1773 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1774 0, (1 << size) - 1))
1775 return 0;
1776 break;
1778 case AARCH64_OPND_SME_Zn_INDEX1_16:
1779 case AARCH64_OPND_SME_Zn_INDEX2_15:
1780 case AARCH64_OPND_SME_Zn_INDEX2_16:
1781 case AARCH64_OPND_SME_Zn_INDEX3_14:
1782 case AARCH64_OPND_SME_Zn_INDEX3_15:
1783 case AARCH64_OPND_SME_Zn_INDEX4_14:
1784 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1785 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1786 0, (1 << size) - 1))
1787 return 0;
1788 break;
1790 case AARCH64_OPND_SME_Zm_INDEX1:
1791 case AARCH64_OPND_SME_Zm_INDEX2:
1792 case AARCH64_OPND_SME_Zm_INDEX3_1:
1793 case AARCH64_OPND_SME_Zm_INDEX3_2:
1794 case AARCH64_OPND_SME_Zm_INDEX3_10:
1795 case AARCH64_OPND_SME_Zm_INDEX4_1:
1796 case AARCH64_OPND_SME_Zm_INDEX4_10:
1797 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1798 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1799 0, (1 << size) - 1))
1800 return 0;
1801 break;
1803 case AARCH64_OPND_SME_Zm:
1804 if (opnd->reg.regno > 15)
1806 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1807 return 0;
1809 break;
1811 case AARCH64_OPND_SME_PnT_Wm_imm:
1812 size = aarch64_get_qualifier_esize (opnd->qualifier);
1813 max_value = 16 / size - 1;
1814 if (!check_za_access (opnd, mismatch_detail, idx,
1815 12, max_value, 1, 0))
1816 return 0;
1817 break;
1819 default:
1820 break;
1822 break;
1824 case AARCH64_OPND_CLASS_SVE_REGLIST:
1825 switch (type)
1827 case AARCH64_OPND_SME_Pdx2:
1828 case AARCH64_OPND_SME_Zdnx2:
1829 case AARCH64_OPND_SME_Zdnx4:
1830 case AARCH64_OPND_SME_Zmx2:
1831 case AARCH64_OPND_SME_Zmx4:
1832 case AARCH64_OPND_SME_Znx2:
1833 case AARCH64_OPND_SME_Znx4:
1834 num = get_operand_specific_data (&aarch64_operands[type]);
1835 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1836 return 0;
1837 if ((opnd->reglist.first_regno % num) != 0)
1839 set_other_error (mismatch_detail, idx,
1840 _("start register out of range"));
1841 return 0;
1843 break;
1845 case AARCH64_OPND_SME_Ztx2_STRIDED:
1846 case AARCH64_OPND_SME_Ztx4_STRIDED:
1847 /* 2-register lists have a stride of 8 and 4-register lists
1848 have a stride of 4. */
1849 num = get_operand_specific_data (&aarch64_operands[type]);
1850 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1851 return 0;
1852 num = 16 | (opnd->reglist.stride - 1);
1853 if ((opnd->reglist.first_regno & ~num) != 0)
1855 set_other_error (mismatch_detail, idx,
1856 _("start register out of range"));
1857 return 0;
1859 break;
1861 case AARCH64_OPND_SME_PdxN:
1862 case AARCH64_OPND_SVE_ZnxN:
1863 case AARCH64_OPND_SVE_ZtxN:
1864 num = get_opcode_dependent_value (opcode);
1865 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1866 return 0;
1867 break;
1869 default:
1870 abort ();
1872 break;
1874 case AARCH64_OPND_CLASS_ZA_ACCESS:
1875 switch (type)
1877 case AARCH64_OPND_SME_ZA_HV_idx_src:
1878 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1879 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1880 size = aarch64_get_qualifier_esize (opnd->qualifier);
1881 max_value = 16 / size - 1;
1882 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1883 get_opcode_dependent_value (opcode)))
1884 return 0;
1885 break;
1887 case AARCH64_OPND_SME_ZA_array_off4:
1888 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1889 get_opcode_dependent_value (opcode)))
1890 return 0;
1891 break;
1893 case AARCH64_OPND_SME_ZA_array_off3_0:
1894 case AARCH64_OPND_SME_ZA_array_off3_5:
1895 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1896 get_opcode_dependent_value (opcode)))
1897 return 0;
1898 break;
1900 case AARCH64_OPND_SME_ZA_array_off1x4:
1901 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
1902 get_opcode_dependent_value (opcode)))
1903 return 0;
1904 break;
1906 case AARCH64_OPND_SME_ZA_array_off2x2:
1907 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
1908 get_opcode_dependent_value (opcode)))
1909 return 0;
1910 break;
1912 case AARCH64_OPND_SME_ZA_array_off2x4:
1913 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
1914 get_opcode_dependent_value (opcode)))
1915 return 0;
1916 break;
1918 case AARCH64_OPND_SME_ZA_array_off3x2:
1919 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
1920 get_opcode_dependent_value (opcode)))
1921 return 0;
1922 break;
1924 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
1925 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
1926 size = aarch64_get_qualifier_esize (opnd->qualifier);
1927 num = get_opcode_dependent_value (opcode);
1928 max_value = 16 / num / size;
1929 if (max_value > 0)
1930 max_value -= 1;
1931 if (!check_za_access (opnd, mismatch_detail, idx,
1932 12, max_value, num, 0))
1933 return 0;
1934 break;
1936 default:
1937 abort ();
1939 break;
1941 case AARCH64_OPND_CLASS_PRED_REG:
1942 switch (type)
1944 case AARCH64_OPND_SME_PNd3:
1945 case AARCH64_OPND_SME_PNg3:
1946 if (opnd->reg.regno < 8)
1948 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
1949 return 0;
1951 break;
1953 default:
1954 if (opnd->reg.regno >= 8
1955 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1957 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
1958 return 0;
1960 break;
1962 break;
1964 case AARCH64_OPND_CLASS_COND:
1965 if (type == AARCH64_OPND_COND1
1966 && (opnds[idx].cond->value & 0xe) == 0xe)
1968 /* Not allow AL or NV. */
1969 set_syntax_error (mismatch_detail, idx, NULL);
1971 break;
1973 case AARCH64_OPND_CLASS_ADDRESS:
1974 /* Check writeback. */
1975 switch (opcode->iclass)
1977 case ldst_pos:
1978 case ldst_unscaled:
1979 case ldstnapair_offs:
1980 case ldstpair_off:
1981 case ldst_unpriv:
1982 if (opnd->addr.writeback == 1)
1984 set_syntax_error (mismatch_detail, idx,
1985 _("unexpected address writeback"));
1986 return 0;
1988 break;
1989 case ldst_imm10:
1990 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1992 set_syntax_error (mismatch_detail, idx,
1993 _("unexpected address writeback"));
1994 return 0;
1996 break;
1997 case ldst_imm9:
1998 case ldstpair_indexed:
1999 case asisdlsep:
2000 case asisdlsop:
2001 if (opnd->addr.writeback == 0)
2003 set_syntax_error (mismatch_detail, idx,
2004 _("address writeback expected"));
2005 return 0;
2007 break;
2008 default:
2009 assert (opnd->addr.writeback == 0);
2010 break;
2012 switch (type)
2014 case AARCH64_OPND_ADDR_SIMM7:
2015 /* Scaled signed 7 bits immediate offset. */
2016 /* Get the size of the data element that is accessed, which may be
2017 different from that of the source register size,
2018 e.g. in strb/ldrb. */
2019 size = aarch64_get_qualifier_esize (opnd->qualifier);
2020 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2022 set_offset_out_of_range_error (mismatch_detail, idx,
2023 -64 * size, 63 * size);
2024 return 0;
2026 if (!value_aligned_p (opnd->addr.offset.imm, size))
2028 set_unaligned_error (mismatch_detail, idx, size);
2029 return 0;
2031 break;
2032 case AARCH64_OPND_ADDR_OFFSET:
2033 case AARCH64_OPND_ADDR_SIMM9:
2034 /* Unscaled signed 9 bits immediate offset. */
2035 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2037 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2038 return 0;
2040 break;
2042 case AARCH64_OPND_ADDR_SIMM9_2:
2043 /* Unscaled signed 9 bits immediate offset, which has to be negative
2044 or unaligned. */
2045 size = aarch64_get_qualifier_esize (qualifier);
2046 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2047 && !value_aligned_p (opnd->addr.offset.imm, size))
2048 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2049 return 1;
2050 set_other_error (mismatch_detail, idx,
2051 _("negative or unaligned offset expected"));
2052 return 0;
2054 case AARCH64_OPND_ADDR_SIMM10:
2055 /* Scaled signed 10 bits immediate offset. */
2056 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2058 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2059 return 0;
2061 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2063 set_unaligned_error (mismatch_detail, idx, 8);
2064 return 0;
2066 break;
2068 case AARCH64_OPND_ADDR_SIMM11:
2069 /* Signed 11 bits immediate offset (multiple of 16). */
2070 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2072 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2073 return 0;
2076 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2078 set_unaligned_error (mismatch_detail, idx, 16);
2079 return 0;
2081 break;
2083 case AARCH64_OPND_ADDR_SIMM13:
2084 /* Signed 13 bits immediate offset (multiple of 16). */
2085 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2087 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2088 return 0;
2091 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2093 set_unaligned_error (mismatch_detail, idx, 16);
2094 return 0;
2096 break;
2098 case AARCH64_OPND_SIMD_ADDR_POST:
2099 /* AdvSIMD load/store multiple structures, post-index. */
2100 assert (idx == 1);
2101 if (opnd->addr.offset.is_reg)
2103 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2104 return 1;
2105 else
2107 set_other_error (mismatch_detail, idx,
2108 _("invalid register offset"));
2109 return 0;
2112 else
2114 const aarch64_opnd_info *prev = &opnds[idx-1];
2115 unsigned num_bytes; /* total number of bytes transferred. */
2116 /* The opcode dependent area stores the number of elements in
2117 each structure to be loaded/stored. */
2118 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2119 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2120 /* Special handling of loading single structure to all lane. */
2121 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2122 * aarch64_get_qualifier_esize (prev->qualifier);
2123 else
2124 num_bytes = prev->reglist.num_regs
2125 * aarch64_get_qualifier_esize (prev->qualifier)
2126 * aarch64_get_qualifier_nelem (prev->qualifier);
2127 if ((int) num_bytes != opnd->addr.offset.imm)
2129 set_other_error (mismatch_detail, idx,
2130 _("invalid post-increment amount"));
2131 return 0;
2134 break;
2136 case AARCH64_OPND_ADDR_REGOFF:
2137 /* Get the size of the data element that is accessed, which may be
2138 different from that of the source register size,
2139 e.g. in strb/ldrb. */
2140 size = aarch64_get_qualifier_esize (opnd->qualifier);
2141 /* It is either no shift or shift by the binary logarithm of SIZE. */
2142 if (opnd->shifter.amount != 0
2143 && opnd->shifter.amount != (int)get_logsz (size))
2145 set_other_error (mismatch_detail, idx,
2146 _("invalid shift amount"));
2147 return 0;
2149 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2150 operators. */
2151 switch (opnd->shifter.kind)
2153 case AARCH64_MOD_UXTW:
2154 case AARCH64_MOD_LSL:
2155 case AARCH64_MOD_SXTW:
2156 case AARCH64_MOD_SXTX: break;
2157 default:
2158 set_other_error (mismatch_detail, idx,
2159 _("invalid extend/shift operator"));
2160 return 0;
2162 break;
2164 case AARCH64_OPND_ADDR_UIMM12:
2165 imm = opnd->addr.offset.imm;
2166 /* Get the size of the data element that is accessed, which may be
2167 different from that of the source register size,
2168 e.g. in strb/ldrb. */
2169 size = aarch64_get_qualifier_esize (qualifier);
2170 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2172 set_offset_out_of_range_error (mismatch_detail, idx,
2173 0, 4095 * size);
2174 return 0;
2176 if (!value_aligned_p (opnd->addr.offset.imm, size))
2178 set_unaligned_error (mismatch_detail, idx, size);
2179 return 0;
2181 break;
2183 case AARCH64_OPND_ADDR_PCREL14:
2184 case AARCH64_OPND_ADDR_PCREL19:
2185 case AARCH64_OPND_ADDR_PCREL21:
2186 case AARCH64_OPND_ADDR_PCREL26:
2187 imm = opnd->imm.value;
2188 if (operand_need_shift_by_two (get_operand_from_code (type)))
2190 /* The offset value in a PC-relative branch instruction is alway
2191 4-byte aligned and is encoded without the lowest 2 bits. */
2192 if (!value_aligned_p (imm, 4))
2194 set_unaligned_error (mismatch_detail, idx, 4);
2195 return 0;
2197 /* Right shift by 2 so that we can carry out the following check
2198 canonically. */
2199 imm >>= 2;
2201 size = get_operand_fields_width (get_operand_from_code (type));
2202 if (!value_fit_signed_field_p (imm, size))
2204 set_other_error (mismatch_detail, idx,
2205 _("immediate out of range"));
2206 return 0;
2208 break;
2210 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2211 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2213 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2214 return 0;
2216 break;
2218 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2219 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2220 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2221 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2222 min_value = -8;
2223 max_value = 7;
2224 sve_imm_offset_vl:
2225 assert (!opnd->addr.offset.is_reg);
2226 assert (opnd->addr.preind);
2227 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2228 min_value *= num;
2229 max_value *= num;
2230 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2231 || (opnd->shifter.operator_present
2232 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2234 set_other_error (mismatch_detail, idx,
2235 _("invalid addressing mode"));
2236 return 0;
2238 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2240 set_offset_out_of_range_error (mismatch_detail, idx,
2241 min_value, max_value);
2242 return 0;
2244 if (!value_aligned_p (opnd->addr.offset.imm, num))
2246 set_unaligned_error (mismatch_detail, idx, num);
2247 return 0;
2249 break;
2251 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2252 min_value = -32;
2253 max_value = 31;
2254 goto sve_imm_offset_vl;
2256 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2257 min_value = -256;
2258 max_value = 255;
2259 goto sve_imm_offset_vl;
2261 case AARCH64_OPND_SVE_ADDR_RI_U6:
2262 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2263 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2264 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2265 min_value = 0;
2266 max_value = 63;
2267 sve_imm_offset:
2268 assert (!opnd->addr.offset.is_reg);
2269 assert (opnd->addr.preind);
2270 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2271 min_value *= num;
2272 max_value *= num;
2273 if (opnd->shifter.operator_present
2274 || opnd->shifter.amount_present)
2276 set_other_error (mismatch_detail, idx,
2277 _("invalid addressing mode"));
2278 return 0;
2280 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2282 set_offset_out_of_range_error (mismatch_detail, idx,
2283 min_value, max_value);
2284 return 0;
2286 if (!value_aligned_p (opnd->addr.offset.imm, num))
2288 set_unaligned_error (mismatch_detail, idx, num);
2289 return 0;
2291 break;
2293 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2294 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2295 min_value = -8;
2296 max_value = 7;
2297 goto sve_imm_offset;
2299 case AARCH64_OPND_SVE_ADDR_ZX:
2300 /* Everything is already ensured by parse_operands or
2301 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2302 argument type). */
2303 assert (opnd->addr.offset.is_reg);
2304 assert (opnd->addr.preind);
2305 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2306 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2307 assert (opnd->shifter.operator_present == 0);
2308 break;
2310 case AARCH64_OPND_SVE_ADDR_R:
2311 case AARCH64_OPND_SVE_ADDR_RR:
2312 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2313 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2314 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2315 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2316 case AARCH64_OPND_SVE_ADDR_RX:
2317 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2318 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2319 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2320 case AARCH64_OPND_SVE_ADDR_RZ:
2321 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2322 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2323 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2324 modifiers = 1 << AARCH64_MOD_LSL;
2325 sve_rr_operand:
2326 assert (opnd->addr.offset.is_reg);
2327 assert (opnd->addr.preind);
2328 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2329 && opnd->addr.offset.regno == 31)
2331 set_other_error (mismatch_detail, idx,
2332 _("index register xzr is not allowed"));
2333 return 0;
2335 if (((1 << opnd->shifter.kind) & modifiers) == 0
2336 || (opnd->shifter.amount
2337 != get_operand_specific_data (&aarch64_operands[type])))
2339 set_other_error (mismatch_detail, idx,
2340 _("invalid addressing mode"));
2341 return 0;
2343 break;
2345 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2346 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2347 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2348 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2349 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2350 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2351 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2352 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2353 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2354 goto sve_rr_operand;
2356 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2357 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2358 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2359 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2360 min_value = 0;
2361 max_value = 31;
2362 goto sve_imm_offset;
2364 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2365 modifiers = 1 << AARCH64_MOD_LSL;
2366 sve_zz_operand:
2367 assert (opnd->addr.offset.is_reg);
2368 assert (opnd->addr.preind);
2369 if (((1 << opnd->shifter.kind) & modifiers) == 0
2370 || opnd->shifter.amount < 0
2371 || opnd->shifter.amount > 3)
2373 set_other_error (mismatch_detail, idx,
2374 _("invalid addressing mode"));
2375 return 0;
2377 break;
2379 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2380 modifiers = (1 << AARCH64_MOD_SXTW);
2381 goto sve_zz_operand;
2383 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2384 modifiers = 1 << AARCH64_MOD_UXTW;
2385 goto sve_zz_operand;
2387 default:
2388 break;
2390 break;
2392 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2393 if (type == AARCH64_OPND_LEt)
2395 /* Get the upper bound for the element index. */
2396 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2397 if (!value_in_range_p (opnd->reglist.index, 0, num))
2399 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2400 return 0;
2403 /* The opcode dependent area stores the number of elements in
2404 each structure to be loaded/stored. */
2405 num = get_opcode_dependent_value (opcode);
2406 switch (type)
2408 case AARCH64_OPND_LVt:
2409 assert (num >= 1 && num <= 4);
2410 /* Unless LD1/ST1, the number of registers should be equal to that
2411 of the structure elements. */
2412 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2413 return 0;
2414 break;
2415 case AARCH64_OPND_LVt_AL:
2416 case AARCH64_OPND_LEt:
2417 assert (num >= 1 && num <= 4);
2418 /* The number of registers should be equal to that of the structure
2419 elements. */
2420 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2421 return 0;
2422 break;
2423 default:
2424 break;
2426 if (opnd->reglist.stride != 1)
2428 set_reg_list_stride_error (mismatch_detail, idx, 1);
2429 return 0;
2431 break;
2433 case AARCH64_OPND_CLASS_IMMEDIATE:
2434 /* Constraint check on immediate operand. */
2435 imm = opnd->imm.value;
2436 /* E.g. imm_0_31 constrains value to be 0..31. */
2437 if (qualifier_value_in_range_constraint_p (qualifier)
2438 && !value_in_range_p (imm, get_lower_bound (qualifier),
2439 get_upper_bound (qualifier)))
2441 set_imm_out_of_range_error (mismatch_detail, idx,
2442 get_lower_bound (qualifier),
2443 get_upper_bound (qualifier));
2444 return 0;
2447 switch (type)
2449 case AARCH64_OPND_AIMM:
2450 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2452 set_other_error (mismatch_detail, idx,
2453 _("invalid shift operator"));
2454 return 0;
2456 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2458 set_other_error (mismatch_detail, idx,
2459 _("shift amount must be 0 or 12"));
2460 return 0;
2462 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2464 set_other_error (mismatch_detail, idx,
2465 _("immediate out of range"));
2466 return 0;
2468 break;
2470 case AARCH64_OPND_HALF:
2471 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2472 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2474 set_other_error (mismatch_detail, idx,
2475 _("invalid shift operator"));
2476 return 0;
2478 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2479 if (!value_aligned_p (opnd->shifter.amount, 16))
2481 set_other_error (mismatch_detail, idx,
2482 _("shift amount must be a multiple of 16"));
2483 return 0;
2485 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2487 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2488 0, size * 8 - 16);
2489 return 0;
2491 if (opnd->imm.value < 0)
2493 set_other_error (mismatch_detail, idx,
2494 _("negative immediate value not allowed"));
2495 return 0;
2497 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2499 set_other_error (mismatch_detail, idx,
2500 _("immediate out of range"));
2501 return 0;
2503 break;
2505 case AARCH64_OPND_IMM_MOV:
2507 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2508 imm = opnd->imm.value;
2509 assert (idx == 1);
2510 switch (opcode->op)
2512 case OP_MOV_IMM_WIDEN:
2513 imm = ~imm;
2514 /* Fall through. */
2515 case OP_MOV_IMM_WIDE:
2516 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2518 set_other_error (mismatch_detail, idx,
2519 _("immediate out of range"));
2520 return 0;
2522 break;
2523 case OP_MOV_IMM_LOG:
2524 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2526 set_other_error (mismatch_detail, idx,
2527 _("immediate out of range"));
2528 return 0;
2530 break;
2531 default:
2532 assert (0);
2533 return 0;
2536 break;
2538 case AARCH64_OPND_NZCV:
2539 case AARCH64_OPND_CCMP_IMM:
2540 case AARCH64_OPND_EXCEPTION:
2541 case AARCH64_OPND_UNDEFINED:
2542 case AARCH64_OPND_TME_UIMM16:
2543 case AARCH64_OPND_UIMM4:
2544 case AARCH64_OPND_UIMM4_ADDG:
2545 case AARCH64_OPND_UIMM7:
2546 case AARCH64_OPND_UIMM3_OP1:
2547 case AARCH64_OPND_UIMM3_OP2:
2548 case AARCH64_OPND_SVE_UIMM3:
2549 case AARCH64_OPND_SVE_UIMM7:
2550 case AARCH64_OPND_SVE_UIMM8:
2551 case AARCH64_OPND_SVE_UIMM8_53:
2552 case AARCH64_OPND_CSSC_UIMM8:
2553 size = get_operand_fields_width (get_operand_from_code (type));
2554 assert (size < 32);
2555 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2557 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2558 (1u << size) - 1);
2559 return 0;
2561 break;
2563 case AARCH64_OPND_UIMM10:
2564 /* Scaled unsigned 10 bits immediate offset. */
2565 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2567 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2568 return 0;
2571 if (!value_aligned_p (opnd->imm.value, 16))
2573 set_unaligned_error (mismatch_detail, idx, 16);
2574 return 0;
2576 break;
2578 case AARCH64_OPND_SIMM5:
2579 case AARCH64_OPND_SVE_SIMM5:
2580 case AARCH64_OPND_SVE_SIMM5B:
2581 case AARCH64_OPND_SVE_SIMM6:
2582 case AARCH64_OPND_SVE_SIMM8:
2583 case AARCH64_OPND_CSSC_SIMM8:
2584 size = get_operand_fields_width (get_operand_from_code (type));
2585 assert (size < 32);
2586 if (!value_fit_signed_field_p (opnd->imm.value, size))
2588 set_imm_out_of_range_error (mismatch_detail, idx,
2589 -(1 << (size - 1)),
2590 (1 << (size - 1)) - 1);
2591 return 0;
2593 break;
2595 case AARCH64_OPND_WIDTH:
2596 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2597 && opnds[0].type == AARCH64_OPND_Rd);
2598 size = get_upper_bound (qualifier);
2599 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2600 /* lsb+width <= reg.size */
2602 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2603 size - opnds[idx-1].imm.value);
2604 return 0;
2606 break;
2608 case AARCH64_OPND_LIMM:
2609 case AARCH64_OPND_SVE_LIMM:
2611 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2612 uint64_t uimm = opnd->imm.value;
2613 if (opcode->op == OP_BIC)
2614 uimm = ~uimm;
2615 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2617 set_other_error (mismatch_detail, idx,
2618 _("immediate out of range"));
2619 return 0;
2622 break;
2624 case AARCH64_OPND_IMM0:
2625 case AARCH64_OPND_FPIMM0:
2626 if (opnd->imm.value != 0)
2628 set_other_error (mismatch_detail, idx,
2629 _("immediate zero expected"));
2630 return 0;
2632 break;
2634 case AARCH64_OPND_IMM_ROT1:
2635 case AARCH64_OPND_IMM_ROT2:
2636 case AARCH64_OPND_SVE_IMM_ROT2:
2637 if (opnd->imm.value != 0
2638 && opnd->imm.value != 90
2639 && opnd->imm.value != 180
2640 && opnd->imm.value != 270)
2642 set_other_error (mismatch_detail, idx,
2643 _("rotate expected to be 0, 90, 180 or 270"));
2644 return 0;
2646 break;
2648 case AARCH64_OPND_IMM_ROT3:
2649 case AARCH64_OPND_SVE_IMM_ROT1:
2650 case AARCH64_OPND_SVE_IMM_ROT3:
2651 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2653 set_other_error (mismatch_detail, idx,
2654 _("rotate expected to be 90 or 270"));
2655 return 0;
2657 break;
2659 case AARCH64_OPND_SHLL_IMM:
2660 assert (idx == 2);
2661 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2662 if (opnd->imm.value != size)
2664 set_other_error (mismatch_detail, idx,
2665 _("invalid shift amount"));
2666 return 0;
2668 break;
2670 case AARCH64_OPND_IMM_VLSL:
2671 size = aarch64_get_qualifier_esize (qualifier);
2672 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2674 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2675 size * 8 - 1);
2676 return 0;
2678 break;
2680 case AARCH64_OPND_IMM_VLSR:
2681 size = aarch64_get_qualifier_esize (qualifier);
2682 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2684 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2685 return 0;
2687 break;
2689 case AARCH64_OPND_SIMD_IMM:
2690 case AARCH64_OPND_SIMD_IMM_SFT:
2691 /* Qualifier check. */
2692 switch (qualifier)
2694 case AARCH64_OPND_QLF_LSL:
2695 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2697 set_other_error (mismatch_detail, idx,
2698 _("invalid shift operator"));
2699 return 0;
2701 break;
2702 case AARCH64_OPND_QLF_MSL:
2703 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2705 set_other_error (mismatch_detail, idx,
2706 _("invalid shift operator"));
2707 return 0;
2709 break;
2710 case AARCH64_OPND_QLF_NIL:
2711 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2713 set_other_error (mismatch_detail, idx,
2714 _("shift is not permitted"));
2715 return 0;
2717 break;
2718 default:
2719 assert (0);
2720 return 0;
2722 /* Is the immediate valid? */
2723 assert (idx == 1);
2724 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2726 /* uimm8 or simm8 */
2727 if (!value_in_range_p (opnd->imm.value, -128, 255))
2729 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2730 return 0;
2733 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2735 /* uimm64 is not
2736 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2737 ffffffffgggggggghhhhhhhh'. */
2738 set_other_error (mismatch_detail, idx,
2739 _("invalid value for immediate"));
2740 return 0;
2742 /* Is the shift amount valid? */
2743 switch (opnd->shifter.kind)
2745 case AARCH64_MOD_LSL:
2746 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2747 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2749 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2750 (size - 1) * 8);
2751 return 0;
2753 if (!value_aligned_p (opnd->shifter.amount, 8))
2755 set_unaligned_error (mismatch_detail, idx, 8);
2756 return 0;
2758 break;
2759 case AARCH64_MOD_MSL:
2760 /* Only 8 and 16 are valid shift amount. */
2761 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2763 set_other_error (mismatch_detail, idx,
2764 _("shift amount must be 0 or 16"));
2765 return 0;
2767 break;
2768 default:
2769 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2771 set_other_error (mismatch_detail, idx,
2772 _("invalid shift operator"));
2773 return 0;
2775 break;
2777 break;
2779 case AARCH64_OPND_FPIMM:
2780 case AARCH64_OPND_SIMD_FPIMM:
2781 case AARCH64_OPND_SVE_FPIMM8:
2782 if (opnd->imm.is_fp == 0)
2784 set_other_error (mismatch_detail, idx,
2785 _("floating-point immediate expected"));
2786 return 0;
2788 /* The value is expected to be an 8-bit floating-point constant with
2789 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2790 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2791 instruction). */
2792 if (!value_in_range_p (opnd->imm.value, 0, 255))
2794 set_other_error (mismatch_detail, idx,
2795 _("immediate out of range"));
2796 return 0;
2798 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2800 set_other_error (mismatch_detail, idx,
2801 _("invalid shift operator"));
2802 return 0;
2804 break;
2806 case AARCH64_OPND_SVE_AIMM:
2807 min_value = 0;
2808 sve_aimm:
2809 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2810 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2811 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2812 uvalue = opnd->imm.value;
2813 shift = opnd->shifter.amount;
2814 if (size == 1)
2816 if (shift != 0)
2818 set_other_error (mismatch_detail, idx,
2819 _("no shift amount allowed for"
2820 " 8-bit constants"));
2821 return 0;
2824 else
2826 if (shift != 0 && shift != 8)
2828 set_other_error (mismatch_detail, idx,
2829 _("shift amount must be 0 or 8"));
2830 return 0;
2832 if (shift == 0 && (uvalue & 0xff) == 0)
2834 shift = 8;
2835 uvalue = (int64_t) uvalue / 256;
2838 mask >>= shift;
2839 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2841 set_other_error (mismatch_detail, idx,
2842 _("immediate too big for element size"));
2843 return 0;
2845 uvalue = (uvalue - min_value) & mask;
2846 if (uvalue > 0xff)
2848 set_other_error (mismatch_detail, idx,
2849 _("invalid arithmetic immediate"));
2850 return 0;
2852 break;
2854 case AARCH64_OPND_SVE_ASIMM:
2855 min_value = -128;
2856 goto sve_aimm;
2858 case AARCH64_OPND_SVE_I1_HALF_ONE:
2859 assert (opnd->imm.is_fp);
2860 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2862 set_other_error (mismatch_detail, idx,
2863 _("floating-point value must be 0.5 or 1.0"));
2864 return 0;
2866 break;
2868 case AARCH64_OPND_SVE_I1_HALF_TWO:
2869 assert (opnd->imm.is_fp);
2870 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2872 set_other_error (mismatch_detail, idx,
2873 _("floating-point value must be 0.5 or 2.0"));
2874 return 0;
2876 break;
2878 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2879 assert (opnd->imm.is_fp);
2880 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2882 set_other_error (mismatch_detail, idx,
2883 _("floating-point value must be 0.0 or 1.0"));
2884 return 0;
2886 break;
2888 case AARCH64_OPND_SVE_INV_LIMM:
2890 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2891 uint64_t uimm = ~opnd->imm.value;
2892 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2894 set_other_error (mismatch_detail, idx,
2895 _("immediate out of range"));
2896 return 0;
2899 break;
2901 case AARCH64_OPND_SVE_LIMM_MOV:
2903 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2904 uint64_t uimm = opnd->imm.value;
2905 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2907 set_other_error (mismatch_detail, idx,
2908 _("immediate out of range"));
2909 return 0;
2911 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2913 set_other_error (mismatch_detail, idx,
2914 _("invalid replicated MOV immediate"));
2915 return 0;
2918 break;
2920 case AARCH64_OPND_SVE_PATTERN_SCALED:
2921 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2922 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2924 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2925 return 0;
2927 break;
2929 case AARCH64_OPND_SVE_SHLIMM_PRED:
2930 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2931 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2932 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2933 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2935 set_imm_out_of_range_error (mismatch_detail, idx,
2936 0, 8 * size - 1);
2937 return 0;
2939 break;
2941 case AARCH64_OPND_SME_SHRIMM4:
2942 size = 1 << get_operand_fields_width (get_operand_from_code (type));
2943 if (!value_in_range_p (opnd->imm.value, 1, size))
2945 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
2946 return 0;
2948 break;
2950 case AARCH64_OPND_SME_SHRIMM5:
2951 case AARCH64_OPND_SVE_SHRIMM_PRED:
2952 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2953 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2954 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2955 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2956 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2958 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2959 return 0;
2961 break;
2963 case AARCH64_OPND_SME_ZT0_INDEX:
2964 if (!value_in_range_p (opnd->imm.value, 0, 56))
2966 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
2967 return 0;
2969 if (opnd->imm.value % 8 != 0)
2971 set_other_error (mismatch_detail, idx,
2972 _("byte index must be a multiple of 8"));
2973 return 0;
2975 break;
2977 default:
2978 break;
2980 break;
2982 case AARCH64_OPND_CLASS_SYSTEM:
2983 switch (type)
2985 case AARCH64_OPND_PSTATEFIELD:
2986 for (i = 0; aarch64_pstatefields[i].name; ++i)
2987 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2988 break;
2989 assert (aarch64_pstatefields[i].name);
2990 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2991 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2992 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2994 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2995 return 0;
2997 break;
2998 case AARCH64_OPND_PRFOP:
2999 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3001 set_other_error (mismatch_detail, idx,
3002 _("the register-index form of PRFM does"
3003 " not accept opcodes in the range 24-31"));
3004 return 0;
3006 break;
3007 default:
3008 break;
3010 break;
3012 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3013 /* Get the upper bound for the element index. */
3014 if (opcode->op == OP_FCMLA_ELEM)
3015 /* FCMLA index range depends on the vector size of other operands
3016 and is halfed because complex numbers take two elements. */
3017 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3018 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3019 else
3020 num = 16;
3021 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3022 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3024 /* Index out-of-range. */
3025 if (!value_in_range_p (opnd->reglane.index, 0, num))
3027 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3028 return 0;
3030 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3031 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3032 number is encoded in "size:M:Rm":
3033 size <Vm>
3034 00 RESERVED
3035 01 0:Rm
3036 10 M:Rm
3037 11 RESERVED */
3038 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
3039 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3041 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3042 return 0;
3044 break;
3046 case AARCH64_OPND_CLASS_MODIFIED_REG:
3047 assert (idx == 1 || idx == 2);
3048 switch (type)
3050 case AARCH64_OPND_Rm_EXT:
3051 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3052 && opnd->shifter.kind != AARCH64_MOD_LSL)
3054 set_other_error (mismatch_detail, idx,
3055 _("extend operator expected"));
3056 return 0;
3058 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3059 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3060 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3061 case. */
3062 if (!aarch64_stack_pointer_p (opnds + 0)
3063 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3065 if (!opnd->shifter.operator_present)
3067 set_other_error (mismatch_detail, idx,
3068 _("missing extend operator"));
3069 return 0;
3071 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3073 set_other_error (mismatch_detail, idx,
3074 _("'LSL' operator not allowed"));
3075 return 0;
3078 assert (opnd->shifter.operator_present /* Default to LSL. */
3079 || opnd->shifter.kind == AARCH64_MOD_LSL);
3080 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3082 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3083 return 0;
3085 /* In the 64-bit form, the final register operand is written as Wm
3086 for all but the (possibly omitted) UXTX/LSL and SXTX
3087 operators.
3088 N.B. GAS allows X register to be used with any operator as a
3089 programming convenience. */
3090 if (qualifier == AARCH64_OPND_QLF_X
3091 && opnd->shifter.kind != AARCH64_MOD_LSL
3092 && opnd->shifter.kind != AARCH64_MOD_UXTX
3093 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3095 set_other_error (mismatch_detail, idx, _("W register expected"));
3096 return 0;
3098 break;
3100 case AARCH64_OPND_Rm_SFT:
3101 /* ROR is not available to the shifted register operand in
3102 arithmetic instructions. */
3103 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3105 set_other_error (mismatch_detail, idx,
3106 _("shift operator expected"));
3107 return 0;
3109 if (opnd->shifter.kind == AARCH64_MOD_ROR
3110 && opcode->iclass != log_shift)
3112 set_other_error (mismatch_detail, idx,
3113 _("'ROR' operator not allowed"));
3114 return 0;
3116 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3117 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3119 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3120 return 0;
3122 break;
3124 default:
3125 break;
3127 break;
3129 default:
3130 break;
3133 return 1;
3136 /* Main entrypoint for the operand constraint checking.
3138 Return 1 if operands of *INST meet the constraint applied by the operand
3139 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3140 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3141 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3142 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3143 error kind when it is notified that an instruction does not pass the check).
3145 Un-determined operand qualifiers may get established during the process. */
3148 aarch64_match_operands_constraint (aarch64_inst *inst,
3149 aarch64_operand_error *mismatch_detail)
3151 int i;
3153 DEBUG_TRACE ("enter");
3155 i = inst->opcode->tied_operand;
3157 if (i > 0)
3159 /* Check for tied_operands with specific opcode iclass. */
3160 switch (inst->opcode->iclass)
3162 /* For SME LDR and STR instructions #imm must have the same numerical
3163 value for both operands.
3165 case sme_ldr:
3166 case sme_str:
3167 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3168 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3169 if (inst->operands[0].indexed_za.index.imm
3170 != inst->operands[1].addr.offset.imm)
3172 if (mismatch_detail)
3174 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3175 mismatch_detail->index = i;
3177 return 0;
3179 break;
3181 default:
3183 /* Check for cases where a source register needs to be the
3184 same as the destination register. Do this before
3185 matching qualifiers since if an instruction has both
3186 invalid tying and invalid qualifiers, the error about
3187 qualifiers would suggest several alternative instructions
3188 that also have invalid tying. */
3189 enum aarch64_operand_class op_class1
3190 = aarch64_get_operand_class (inst->operands[0].type);
3191 enum aarch64_operand_class op_class2
3192 = aarch64_get_operand_class (inst->operands[i].type);
3193 assert (op_class1 == op_class2);
3194 if (op_class1 == AARCH64_OPND_CLASS_SVE_REGLIST
3195 ? ((inst->operands[0].reglist.first_regno
3196 != inst->operands[i].reglist.first_regno)
3197 || (inst->operands[0].reglist.num_regs
3198 != inst->operands[i].reglist.num_regs)
3199 || (inst->operands[0].reglist.stride
3200 != inst->operands[i].reglist.stride))
3201 : (inst->operands[0].reg.regno
3202 != inst->operands[i].reg.regno))
3204 if (mismatch_detail)
3206 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3207 mismatch_detail->index = i;
3208 mismatch_detail->error = NULL;
3210 return 0;
3212 break;
3217 /* Match operands' qualifier.
3218 *INST has already had qualifier establish for some, if not all, of
3219 its operands; we need to find out whether these established
3220 qualifiers match one of the qualifier sequence in
3221 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3222 with the corresponding qualifier in such a sequence.
3223 Only basic operand constraint checking is done here; the more thorough
3224 constraint checking will carried out by operand_general_constraint_met_p,
3225 which has be to called after this in order to get all of the operands'
3226 qualifiers established. */
3227 int invalid_count;
3228 if (match_operands_qualifier (inst, true /* update_p */,
3229 &invalid_count) == 0)
3231 DEBUG_TRACE ("FAIL on operand qualifier matching");
3232 if (mismatch_detail)
3234 /* Return an error type to indicate that it is the qualifier
3235 matching failure; we don't care about which operand as there
3236 are enough information in the opcode table to reproduce it. */
3237 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3238 mismatch_detail->index = -1;
3239 mismatch_detail->error = NULL;
3240 mismatch_detail->data[0].i = invalid_count;
3242 return 0;
3245 /* Match operands' constraint. */
3246 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3248 enum aarch64_opnd type = inst->opcode->operands[i];
3249 if (type == AARCH64_OPND_NIL)
3250 break;
3251 if (inst->operands[i].skip)
3253 DEBUG_TRACE ("skip the incomplete operand %d", i);
3254 continue;
3256 if (operand_general_constraint_met_p (inst->operands, i, type,
3257 inst->opcode, mismatch_detail) == 0)
3259 DEBUG_TRACE ("FAIL on operand %d", i);
3260 return 0;
3264 DEBUG_TRACE ("PASS");
3266 return 1;
3269 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3270 Also updates the TYPE of each INST->OPERANDS with the corresponding
3271 value of OPCODE->OPERANDS.
3273 Note that some operand qualifiers may need to be manually cleared by
3274 the caller before it further calls the aarch64_opcode_encode; by
3275 doing this, it helps the qualifier matching facilities work
3276 properly. */
3278 const aarch64_opcode*
3279 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3281 int i;
3282 const aarch64_opcode *old = inst->opcode;
3284 inst->opcode = opcode;
3286 /* Update the operand types. */
3287 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3289 inst->operands[i].type = opcode->operands[i];
3290 if (opcode->operands[i] == AARCH64_OPND_NIL)
3291 break;
3294 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3296 return old;
3300 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3302 int i;
3303 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3304 if (operands[i] == operand)
3305 return i;
3306 else if (operands[i] == AARCH64_OPND_NIL)
3307 break;
3308 return -1;
3311 /* R0...R30, followed by FOR31. */
3312 #define BANK(R, FOR31) \
3313 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3314 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3315 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3316 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3317 /* [0][0] 32-bit integer regs with sp Wn
3318 [0][1] 64-bit integer regs with sp Xn sf=1
3319 [1][0] 32-bit integer regs with #0 Wn
3320 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3321 static const char *int_reg[2][2][32] = {
3322 #define R32(X) "w" #X
3323 #define R64(X) "x" #X
3324 { BANK (R32, "wsp"), BANK (R64, "sp") },
3325 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3326 #undef R64
3327 #undef R32
3330 /* Names of the SVE vector registers, first with .S suffixes,
3331 then with .D suffixes. */
3333 static const char *sve_reg[2][32] = {
3334 #define ZS(X) "z" #X ".s"
3335 #define ZD(X) "z" #X ".d"
3336 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3337 #undef ZD
3338 #undef ZS
3340 #undef BANK
3342 /* Return the integer register name.
3343 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3345 static inline const char *
3346 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3348 const int has_zr = sp_reg_p ? 0 : 1;
3349 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3350 return int_reg[has_zr][is_64][regno];
3353 /* Like get_int_reg_name, but IS_64 is always 1. */
3355 static inline const char *
3356 get_64bit_int_reg_name (int regno, int sp_reg_p)
3358 const int has_zr = sp_reg_p ? 0 : 1;
3359 return int_reg[has_zr][1][regno];
3362 /* Get the name of the integer offset register in OPND, using the shift type
3363 to decide whether it's a word or doubleword. */
3365 static inline const char *
3366 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3368 switch (opnd->shifter.kind)
3370 case AARCH64_MOD_UXTW:
3371 case AARCH64_MOD_SXTW:
3372 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3374 case AARCH64_MOD_LSL:
3375 case AARCH64_MOD_SXTX:
3376 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3378 default:
3379 abort ();
3383 /* Get the name of the SVE vector offset register in OPND, using the operand
3384 qualifier to decide whether the suffix should be .S or .D. */
3386 static inline const char *
3387 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3389 assert (qualifier == AARCH64_OPND_QLF_S_S
3390 || qualifier == AARCH64_OPND_QLF_S_D);
3391 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3394 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3396 typedef union
3398 uint64_t i;
3399 double d;
3400 } double_conv_t;
3402 typedef union
3404 uint32_t i;
3405 float f;
3406 } single_conv_t;
3408 typedef union
3410 uint32_t i;
3411 float f;
3412 } half_conv_t;
3414 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3415 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3416 (depending on the type of the instruction). IMM8 will be expanded to a
3417 single-precision floating-point value (SIZE == 4) or a double-precision
3418 floating-point value (SIZE == 8). A half-precision floating-point value
3419 (SIZE == 2) is expanded to a single-precision floating-point value. The
3420 expanded value is returned. */
3422 static uint64_t
3423 expand_fp_imm (int size, uint32_t imm8)
3425 uint64_t imm = 0;
3426 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3428 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3429 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3430 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3431 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3432 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3433 if (size == 8)
3435 imm = (imm8_7 << (63-32)) /* imm8<7> */
3436 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3437 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3438 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3439 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3440 imm <<= 32;
3442 else if (size == 4 || size == 2)
3444 imm = (imm8_7 << 31) /* imm8<7> */
3445 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3446 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3447 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3449 else
3451 /* An unsupported size. */
3452 assert (0);
3455 return imm;
3458 /* Return a string based on FMT with the register style applied. */
3460 static const char *
3461 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3463 const char *txt;
3464 va_list ap;
3466 va_start (ap, fmt);
3467 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3468 va_end (ap);
3470 return txt;
3473 /* Return a string based on FMT with the immediate style applied. */
3475 static const char *
3476 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3478 const char *txt;
3479 va_list ap;
3481 va_start (ap, fmt);
3482 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3483 va_end (ap);
3485 return txt;
3488 /* Return a string based on FMT with the sub-mnemonic style applied. */
3490 static const char *
3491 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3493 const char *txt;
3494 va_list ap;
3496 va_start (ap, fmt);
3497 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3498 va_end (ap);
3500 return txt;
3503 /* Return a string based on FMT with the address style applied. */
3505 static const char *
3506 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3508 const char *txt;
3509 va_list ap;
3511 va_start (ap, fmt);
3512 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3513 va_end (ap);
3515 return txt;
3518 /* Produce the string representation of the register list operand *OPND
3519 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3520 the register name that comes before the register number, such as "v". */
3521 static void
3522 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3523 const char *prefix, struct aarch64_styler *styler)
3525 const int mask = (prefix[0] == 'p' ? 15 : 31);
3526 const int num_regs = opnd->reglist.num_regs;
3527 const int stride = opnd->reglist.stride;
3528 const int first_reg = opnd->reglist.first_regno;
3529 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3530 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3531 char tb[16]; /* Temporary buffer. */
3533 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3534 assert (num_regs >= 1 && num_regs <= 4);
3536 /* Prepare the index if any. */
3537 if (opnd->reglist.has_index)
3538 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3539 snprintf (tb, sizeof (tb), "[%s]",
3540 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3541 else
3542 tb[0] = '\0';
3544 /* The hyphenated form is preferred for disassembly if there are
3545 more than two registers in the list, and the register numbers
3546 are monotonically increasing in increments of one. */
3547 if (stride == 1 && num_regs > 1)
3548 snprintf (buf, size, "{%s-%s}%s",
3549 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3550 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3551 else
3553 const int reg0 = first_reg;
3554 const int reg1 = (first_reg + stride) & mask;
3555 const int reg2 = (first_reg + stride * 2) & mask;
3556 const int reg3 = (first_reg + stride * 3) & mask;
3558 switch (num_regs)
3560 case 1:
3561 snprintf (buf, size, "{%s}%s",
3562 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3563 tb);
3564 break;
3565 case 2:
3566 snprintf (buf, size, "{%s, %s}%s",
3567 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3568 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3569 tb);
3570 break;
3571 case 3:
3572 snprintf (buf, size, "{%s, %s, %s}%s",
3573 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3574 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3575 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3576 tb);
3577 break;
3578 case 4:
3579 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3580 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3581 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3582 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3583 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3584 tb);
3585 break;
3590 /* Print the register+immediate address in OPND to BUF, which has SIZE
3591 characters. BASE is the name of the base register. */
3593 static void
3594 print_immediate_offset_address (char *buf, size_t size,
3595 const aarch64_opnd_info *opnd,
3596 const char *base,
3597 struct aarch64_styler *styler)
3599 if (opnd->addr.writeback)
3601 if (opnd->addr.preind)
3603 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3604 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3605 else
3606 snprintf (buf, size, "[%s, %s]!",
3607 style_reg (styler, base),
3608 style_imm (styler, "#%d", opnd->addr.offset.imm));
3610 else
3611 snprintf (buf, size, "[%s], %s",
3612 style_reg (styler, base),
3613 style_imm (styler, "#%d", opnd->addr.offset.imm));
3615 else
3617 if (opnd->shifter.operator_present)
3619 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3620 snprintf (buf, size, "[%s, %s, %s]",
3621 style_reg (styler, base),
3622 style_imm (styler, "#%d", opnd->addr.offset.imm),
3623 style_sub_mnem (styler, "mul vl"));
3625 else if (opnd->addr.offset.imm)
3626 snprintf (buf, size, "[%s, %s]",
3627 style_reg (styler, base),
3628 style_imm (styler, "#%d", opnd->addr.offset.imm));
3629 else
3630 snprintf (buf, size, "[%s]", style_reg (styler, base));
3634 /* Produce the string representation of the register offset address operand
3635 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3636 the names of the base and offset registers. */
3637 static void
3638 print_register_offset_address (char *buf, size_t size,
3639 const aarch64_opnd_info *opnd,
3640 const char *base, const char *offset,
3641 struct aarch64_styler *styler)
3643 char tb[32]; /* Temporary buffer. */
3644 bool print_extend_p = true;
3645 bool print_amount_p = true;
3646 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3648 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3649 || !opnd->shifter.amount_present))
3651 /* Not print the shift/extend amount when the amount is zero and
3652 when it is not the special case of 8-bit load/store instruction. */
3653 print_amount_p = false;
3654 /* Likewise, no need to print the shift operator LSL in such a
3655 situation. */
3656 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3657 print_extend_p = false;
3660 /* Prepare for the extend/shift. */
3661 if (print_extend_p)
3663 if (print_amount_p)
3664 snprintf (tb, sizeof (tb), ", %s %s",
3665 style_sub_mnem (styler, shift_name),
3666 style_imm (styler, "#%" PRIi64,
3667 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3668 (opnd->shifter.amount % 100)));
3669 else
3670 snprintf (tb, sizeof (tb), ", %s",
3671 style_sub_mnem (styler, shift_name));
3673 else
3674 tb[0] = '\0';
3676 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3677 style_reg (styler, offset), tb);
3680 /* Print ZA tiles from imm8 in ZERO instruction.
3682 The preferred disassembly of this instruction uses the shortest list of tile
3683 names that represent the encoded immediate mask.
3685 For example:
3686 * An all-ones immediate is disassembled as {ZA}.
3687 * An all-zeros immediate is disassembled as an empty list { }.
3689 static void
3690 print_sme_za_list (char *buf, size_t size, int mask,
3691 struct aarch64_styler *styler)
3693 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3694 "za1.s", "za2.s", "za3.s", "za0.d",
3695 "za1.d", "za2.d", "za3.d", "za4.d",
3696 "za5.d", "za6.d", "za7.d", " " };
3697 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3698 0x22, 0x44, 0x88, 0x01,
3699 0x02, 0x04, 0x08, 0x10,
3700 0x20, 0x40, 0x80, 0x00 };
3701 int i, k;
3702 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3704 k = snprintf (buf, size, "{");
3705 for (i = 0; i < ZAN_SIZE; i++)
3707 if ((mask & zan_v[i]) == zan_v[i])
3709 mask &= ~zan_v[i];
3710 if (k > 1)
3711 k += snprintf (buf + k, size - k, ", ");
3713 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3715 if (mask == 0)
3716 break;
3718 snprintf (buf + k, size - k, "}");
3721 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3722 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3723 PC, PCREL_P and ADDRESS are used to pass in and return information about
3724 the PC-relative address calculation, where the PC value is passed in
3725 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3726 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3727 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3729 The function serves both the disassembler and the assembler diagnostics
3730 issuer, which is the reason why it lives in this file. */
3732 void
3733 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3734 const aarch64_opcode *opcode,
3735 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3736 bfd_vma *address, char** notes,
3737 char *comment, size_t comment_size,
3738 aarch64_feature_set features,
3739 struct aarch64_styler *styler)
3741 unsigned int i, num_conds;
3742 const char *name = NULL;
3743 const aarch64_opnd_info *opnd = opnds + idx;
3744 enum aarch64_modifier_kind kind;
3745 uint64_t addr, enum_value;
3747 if (comment != NULL)
3749 assert (comment_size > 0);
3750 comment[0] = '\0';
3752 else
3753 assert (comment_size == 0);
3755 buf[0] = '\0';
3756 if (pcrel_p)
3757 *pcrel_p = 0;
3759 switch (opnd->type)
3761 case AARCH64_OPND_Rd:
3762 case AARCH64_OPND_Rn:
3763 case AARCH64_OPND_Rm:
3764 case AARCH64_OPND_Rt:
3765 case AARCH64_OPND_Rt2:
3766 case AARCH64_OPND_Rs:
3767 case AARCH64_OPND_Ra:
3768 case AARCH64_OPND_Rt_LS64:
3769 case AARCH64_OPND_Rt_SYS:
3770 case AARCH64_OPND_PAIRREG:
3771 case AARCH64_OPND_SVE_Rm:
3772 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3773 the <ic_op>, therefore we use opnd->present to override the
3774 generic optional-ness information. */
3775 if (opnd->type == AARCH64_OPND_Rt_SYS)
3777 if (!opnd->present)
3778 break;
3780 /* Omit the operand, e.g. RET. */
3781 else if (optional_operand_p (opcode, idx)
3782 && (opnd->reg.regno
3783 == get_optional_operand_default_value (opcode)))
3784 break;
3785 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3786 || opnd->qualifier == AARCH64_OPND_QLF_X);
3787 snprintf (buf, size, "%s",
3788 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3789 opnd->qualifier, 0)));
3790 break;
3792 case AARCH64_OPND_Rd_SP:
3793 case AARCH64_OPND_Rn_SP:
3794 case AARCH64_OPND_Rt_SP:
3795 case AARCH64_OPND_SVE_Rn_SP:
3796 case AARCH64_OPND_Rm_SP:
3797 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3798 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3799 || opnd->qualifier == AARCH64_OPND_QLF_X
3800 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3801 snprintf (buf, size, "%s",
3802 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3803 opnd->qualifier, 1)));
3804 break;
3806 case AARCH64_OPND_Rm_EXT:
3807 kind = opnd->shifter.kind;
3808 assert (idx == 1 || idx == 2);
3809 if ((aarch64_stack_pointer_p (opnds)
3810 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3811 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3812 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3813 && kind == AARCH64_MOD_UXTW)
3814 || (opnd->qualifier == AARCH64_OPND_QLF_X
3815 && kind == AARCH64_MOD_UXTX)))
3817 /* 'LSL' is the preferred form in this case. */
3818 kind = AARCH64_MOD_LSL;
3819 if (opnd->shifter.amount == 0)
3821 /* Shifter omitted. */
3822 snprintf (buf, size, "%s",
3823 style_reg (styler,
3824 get_int_reg_name (opnd->reg.regno,
3825 opnd->qualifier, 0)));
3826 break;
3829 if (opnd->shifter.amount)
3830 snprintf (buf, size, "%s, %s %s",
3831 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3832 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3833 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3834 else
3835 snprintf (buf, size, "%s, %s",
3836 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3837 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3838 break;
3840 case AARCH64_OPND_Rm_SFT:
3841 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3842 || opnd->qualifier == AARCH64_OPND_QLF_X);
3843 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3844 snprintf (buf, size, "%s",
3845 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3846 opnd->qualifier, 0)));
3847 else
3848 snprintf (buf, size, "%s, %s %s",
3849 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3850 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3851 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3852 break;
3854 case AARCH64_OPND_Fd:
3855 case AARCH64_OPND_Fn:
3856 case AARCH64_OPND_Fm:
3857 case AARCH64_OPND_Fa:
3858 case AARCH64_OPND_Ft:
3859 case AARCH64_OPND_Ft2:
3860 case AARCH64_OPND_Sd:
3861 case AARCH64_OPND_Sn:
3862 case AARCH64_OPND_Sm:
3863 case AARCH64_OPND_SVE_VZn:
3864 case AARCH64_OPND_SVE_Vd:
3865 case AARCH64_OPND_SVE_Vm:
3866 case AARCH64_OPND_SVE_Vn:
3867 snprintf (buf, size, "%s",
3868 style_reg (styler, "%s%d",
3869 aarch64_get_qualifier_name (opnd->qualifier),
3870 opnd->reg.regno));
3871 break;
3873 case AARCH64_OPND_Va:
3874 case AARCH64_OPND_Vd:
3875 case AARCH64_OPND_Vn:
3876 case AARCH64_OPND_Vm:
3877 snprintf (buf, size, "%s",
3878 style_reg (styler, "v%d.%s", opnd->reg.regno,
3879 aarch64_get_qualifier_name (opnd->qualifier)));
3880 break;
3882 case AARCH64_OPND_Ed:
3883 case AARCH64_OPND_En:
3884 case AARCH64_OPND_Em:
3885 case AARCH64_OPND_Em16:
3886 case AARCH64_OPND_SM3_IMM2:
3887 snprintf (buf, size, "%s[%s]",
3888 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3889 aarch64_get_qualifier_name (opnd->qualifier)),
3890 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3891 break;
3893 case AARCH64_OPND_VdD1:
3894 case AARCH64_OPND_VnD1:
3895 snprintf (buf, size, "%s[%s]",
3896 style_reg (styler, "v%d.d", opnd->reg.regno),
3897 style_imm (styler, "1"));
3898 break;
3900 case AARCH64_OPND_LVn:
3901 case AARCH64_OPND_LVt:
3902 case AARCH64_OPND_LVt_AL:
3903 case AARCH64_OPND_LEt:
3904 print_register_list (buf, size, opnd, "v", styler);
3905 break;
3907 case AARCH64_OPND_SVE_Pd:
3908 case AARCH64_OPND_SVE_Pg3:
3909 case AARCH64_OPND_SVE_Pg4_5:
3910 case AARCH64_OPND_SVE_Pg4_10:
3911 case AARCH64_OPND_SVE_Pg4_16:
3912 case AARCH64_OPND_SVE_Pm:
3913 case AARCH64_OPND_SVE_Pn:
3914 case AARCH64_OPND_SVE_Pt:
3915 case AARCH64_OPND_SME_Pm:
3916 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3917 snprintf (buf, size, "%s",
3918 style_reg (styler, "p%d", opnd->reg.regno));
3919 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3920 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3921 snprintf (buf, size, "%s",
3922 style_reg (styler, "p%d/%s", opnd->reg.regno,
3923 aarch64_get_qualifier_name (opnd->qualifier)));
3924 else
3925 snprintf (buf, size, "%s",
3926 style_reg (styler, "p%d.%s", opnd->reg.regno,
3927 aarch64_get_qualifier_name (opnd->qualifier)));
3928 break;
3930 case AARCH64_OPND_SVE_PNd:
3931 case AARCH64_OPND_SVE_PNg4_10:
3932 case AARCH64_OPND_SVE_PNn:
3933 case AARCH64_OPND_SVE_PNt:
3934 case AARCH64_OPND_SME_PNd3:
3935 case AARCH64_OPND_SME_PNg3:
3936 case AARCH64_OPND_SME_PNn:
3937 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3938 snprintf (buf, size, "%s",
3939 style_reg (styler, "pn%d", opnd->reg.regno));
3940 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3941 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3942 snprintf (buf, size, "%s",
3943 style_reg (styler, "pn%d/%s", opnd->reg.regno,
3944 aarch64_get_qualifier_name (opnd->qualifier)));
3945 else
3946 snprintf (buf, size, "%s",
3947 style_reg (styler, "pn%d.%s", opnd->reg.regno,
3948 aarch64_get_qualifier_name (opnd->qualifier)));
3949 break;
3951 case AARCH64_OPND_SME_Pdx2:
3952 case AARCH64_OPND_SME_PdxN:
3953 print_register_list (buf, size, opnd, "p", styler);
3954 break;
3956 case AARCH64_OPND_SME_PNn3_INDEX1:
3957 case AARCH64_OPND_SME_PNn3_INDEX2:
3958 snprintf (buf, size, "%s[%s]",
3959 style_reg (styler, "pn%d", opnd->reglane.regno),
3960 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3961 break;
3963 case AARCH64_OPND_SVE_Za_5:
3964 case AARCH64_OPND_SVE_Za_16:
3965 case AARCH64_OPND_SVE_Zd:
3966 case AARCH64_OPND_SVE_Zm_5:
3967 case AARCH64_OPND_SVE_Zm_16:
3968 case AARCH64_OPND_SVE_Zn:
3969 case AARCH64_OPND_SVE_Zt:
3970 case AARCH64_OPND_SME_Zm:
3971 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3972 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3973 else
3974 snprintf (buf, size, "%s",
3975 style_reg (styler, "z%d.%s", opnd->reg.regno,
3976 aarch64_get_qualifier_name (opnd->qualifier)));
3977 break;
3979 case AARCH64_OPND_SVE_ZnxN:
3980 case AARCH64_OPND_SVE_ZtxN:
3981 case AARCH64_OPND_SME_Zdnx2:
3982 case AARCH64_OPND_SME_Zdnx4:
3983 case AARCH64_OPND_SME_Zmx2:
3984 case AARCH64_OPND_SME_Zmx4:
3985 case AARCH64_OPND_SME_Znx2:
3986 case AARCH64_OPND_SME_Znx4:
3987 case AARCH64_OPND_SME_Ztx2_STRIDED:
3988 case AARCH64_OPND_SME_Ztx4_STRIDED:
3989 print_register_list (buf, size, opnd, "z", styler);
3990 break;
3992 case AARCH64_OPND_SVE_Zm3_INDEX:
3993 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3994 case AARCH64_OPND_SVE_Zm3_19_INDEX:
3995 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3996 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3997 case AARCH64_OPND_SVE_Zm4_INDEX:
3998 case AARCH64_OPND_SVE_Zn_INDEX:
3999 case AARCH64_OPND_SME_Zm_INDEX1:
4000 case AARCH64_OPND_SME_Zm_INDEX2:
4001 case AARCH64_OPND_SME_Zm_INDEX3_1:
4002 case AARCH64_OPND_SME_Zm_INDEX3_2:
4003 case AARCH64_OPND_SME_Zm_INDEX3_10:
4004 case AARCH64_OPND_SME_Zm_INDEX4_1:
4005 case AARCH64_OPND_SME_Zm_INDEX4_10:
4006 case AARCH64_OPND_SME_Zn_INDEX1_16:
4007 case AARCH64_OPND_SME_Zn_INDEX2_15:
4008 case AARCH64_OPND_SME_Zn_INDEX2_16:
4009 case AARCH64_OPND_SME_Zn_INDEX3_14:
4010 case AARCH64_OPND_SME_Zn_INDEX3_15:
4011 case AARCH64_OPND_SME_Zn_INDEX4_14:
4012 snprintf (buf, size, "%s[%s]",
4013 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4014 ? style_reg (styler, "z%d", opnd->reglane.regno)
4015 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4016 aarch64_get_qualifier_name (opnd->qualifier))),
4017 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4018 break;
4020 case AARCH64_OPND_SME_ZAda_2b:
4021 case AARCH64_OPND_SME_ZAda_3b:
4022 snprintf (buf, size, "%s",
4023 style_reg (styler, "za%d.%s", opnd->reg.regno,
4024 aarch64_get_qualifier_name (opnd->qualifier)));
4025 break;
4027 case AARCH64_OPND_SME_ZA_HV_idx_src:
4028 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4029 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4030 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4031 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4032 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4033 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4034 style_reg (styler, "za%d%c.%s",
4035 opnd->indexed_za.regno,
4036 opnd->indexed_za.v == 1 ? 'v' : 'h',
4037 aarch64_get_qualifier_name (opnd->qualifier)),
4038 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4039 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4040 opnd->indexed_za.index.countm1 ? ":" : "",
4041 (opnd->indexed_za.index.countm1
4042 ? style_imm (styler, "%d",
4043 opnd->indexed_za.index.imm
4044 + opnd->indexed_za.index.countm1)
4045 : ""),
4046 opnd->indexed_za.group_size ? ", " : "",
4047 opnd->indexed_za.group_size == 2
4048 ? style_sub_mnem (styler, "vgx2")
4049 : opnd->indexed_za.group_size == 4
4050 ? style_sub_mnem (styler, "vgx4") : "",
4051 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4052 break;
4054 case AARCH64_OPND_SME_list_of_64bit_tiles:
4055 print_sme_za_list (buf, size, opnd->reg.regno, styler);
4056 break;
4058 case AARCH64_OPND_SME_ZA_array_off1x4:
4059 case AARCH64_OPND_SME_ZA_array_off2x2:
4060 case AARCH64_OPND_SME_ZA_array_off2x4:
4061 case AARCH64_OPND_SME_ZA_array_off3_0:
4062 case AARCH64_OPND_SME_ZA_array_off3_5:
4063 case AARCH64_OPND_SME_ZA_array_off3x2:
4064 case AARCH64_OPND_SME_ZA_array_off4:
4065 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4066 style_reg (styler, "za%s%s",
4067 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4068 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4069 ? ""
4070 : aarch64_get_qualifier_name (opnd->qualifier))),
4071 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4072 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4073 opnd->indexed_za.index.countm1 ? ":" : "",
4074 (opnd->indexed_za.index.countm1
4075 ? style_imm (styler, "%d",
4076 opnd->indexed_za.index.imm
4077 + opnd->indexed_za.index.countm1)
4078 : ""),
4079 opnd->indexed_za.group_size ? ", " : "",
4080 opnd->indexed_za.group_size == 2
4081 ? style_sub_mnem (styler, "vgx2")
4082 : opnd->indexed_za.group_size == 4
4083 ? style_sub_mnem (styler, "vgx4") : "");
4084 break;
4086 case AARCH64_OPND_SME_SM_ZA:
4087 snprintf (buf, size, "%s",
4088 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4089 break;
4091 case AARCH64_OPND_SME_PnT_Wm_imm:
4092 snprintf (buf, size, "%s[%s, %s]",
4093 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4094 aarch64_get_qualifier_name (opnd->qualifier)),
4095 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4096 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4097 break;
4099 case AARCH64_OPND_SME_VLxN_10:
4100 case AARCH64_OPND_SME_VLxN_13:
4101 enum_value = opnd->imm.value;
4102 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4103 snprintf (buf, size, "%s",
4104 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4105 break;
4107 case AARCH64_OPND_CRn:
4108 case AARCH64_OPND_CRm:
4109 snprintf (buf, size, "%s",
4110 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4111 break;
4113 case AARCH64_OPND_IDX:
4114 case AARCH64_OPND_MASK:
4115 case AARCH64_OPND_IMM:
4116 case AARCH64_OPND_IMM_2:
4117 case AARCH64_OPND_WIDTH:
4118 case AARCH64_OPND_UIMM3_OP1:
4119 case AARCH64_OPND_UIMM3_OP2:
4120 case AARCH64_OPND_BIT_NUM:
4121 case AARCH64_OPND_IMM_VLSL:
4122 case AARCH64_OPND_IMM_VLSR:
4123 case AARCH64_OPND_SHLL_IMM:
4124 case AARCH64_OPND_IMM0:
4125 case AARCH64_OPND_IMMR:
4126 case AARCH64_OPND_IMMS:
4127 case AARCH64_OPND_UNDEFINED:
4128 case AARCH64_OPND_FBITS:
4129 case AARCH64_OPND_TME_UIMM16:
4130 case AARCH64_OPND_SIMM5:
4131 case AARCH64_OPND_SME_SHRIMM4:
4132 case AARCH64_OPND_SME_SHRIMM5:
4133 case AARCH64_OPND_SVE_SHLIMM_PRED:
4134 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4135 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4136 case AARCH64_OPND_SVE_SHRIMM_PRED:
4137 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4138 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4139 case AARCH64_OPND_SVE_SIMM5:
4140 case AARCH64_OPND_SVE_SIMM5B:
4141 case AARCH64_OPND_SVE_SIMM6:
4142 case AARCH64_OPND_SVE_SIMM8:
4143 case AARCH64_OPND_SVE_UIMM3:
4144 case AARCH64_OPND_SVE_UIMM7:
4145 case AARCH64_OPND_SVE_UIMM8:
4146 case AARCH64_OPND_SVE_UIMM8_53:
4147 case AARCH64_OPND_IMM_ROT1:
4148 case AARCH64_OPND_IMM_ROT2:
4149 case AARCH64_OPND_IMM_ROT3:
4150 case AARCH64_OPND_SVE_IMM_ROT1:
4151 case AARCH64_OPND_SVE_IMM_ROT2:
4152 case AARCH64_OPND_SVE_IMM_ROT3:
4153 case AARCH64_OPND_CSSC_SIMM8:
4154 case AARCH64_OPND_CSSC_UIMM8:
4155 snprintf (buf, size, "%s",
4156 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4157 break;
4159 case AARCH64_OPND_SVE_I1_HALF_ONE:
4160 case AARCH64_OPND_SVE_I1_HALF_TWO:
4161 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4163 single_conv_t c;
4164 c.i = opnd->imm.value;
4165 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4166 break;
4169 case AARCH64_OPND_SVE_PATTERN:
4170 if (optional_operand_p (opcode, idx)
4171 && opnd->imm.value == get_optional_operand_default_value (opcode))
4172 break;
4173 enum_value = opnd->imm.value;
4174 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4175 if (aarch64_sve_pattern_array[enum_value])
4176 snprintf (buf, size, "%s",
4177 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4178 else
4179 snprintf (buf, size, "%s",
4180 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4181 break;
4183 case AARCH64_OPND_SVE_PATTERN_SCALED:
4184 if (optional_operand_p (opcode, idx)
4185 && !opnd->shifter.operator_present
4186 && opnd->imm.value == get_optional_operand_default_value (opcode))
4187 break;
4188 enum_value = opnd->imm.value;
4189 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4190 if (aarch64_sve_pattern_array[opnd->imm.value])
4191 snprintf (buf, size, "%s",
4192 style_reg (styler,
4193 aarch64_sve_pattern_array[opnd->imm.value]));
4194 else
4195 snprintf (buf, size, "%s",
4196 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4197 if (opnd->shifter.operator_present)
4199 size_t len = strlen (buf);
4200 const char *shift_name
4201 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4202 snprintf (buf + len, size - len, ", %s %s",
4203 style_sub_mnem (styler, shift_name),
4204 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4206 break;
4208 case AARCH64_OPND_SVE_PRFOP:
4209 enum_value = opnd->imm.value;
4210 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4211 if (aarch64_sve_prfop_array[enum_value])
4212 snprintf (buf, size, "%s",
4213 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4214 else
4215 snprintf (buf, size, "%s",
4216 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4217 break;
4219 case AARCH64_OPND_IMM_MOV:
4220 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4222 case 4: /* e.g. MOV Wd, #<imm32>. */
4224 int imm32 = opnd->imm.value;
4225 snprintf (buf, size, "%s",
4226 style_imm (styler, "#0x%-20x", imm32));
4227 snprintf (comment, comment_size, "#%d", imm32);
4229 break;
4230 case 8: /* e.g. MOV Xd, #<imm64>. */
4231 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4232 opnd->imm.value));
4233 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4234 break;
4235 default:
4236 snprintf (buf, size, "<invalid>");
4237 break;
4239 break;
4241 case AARCH64_OPND_FPIMM0:
4242 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4243 break;
4245 case AARCH64_OPND_LIMM:
4246 case AARCH64_OPND_AIMM:
4247 case AARCH64_OPND_HALF:
4248 case AARCH64_OPND_SVE_INV_LIMM:
4249 case AARCH64_OPND_SVE_LIMM:
4250 case AARCH64_OPND_SVE_LIMM_MOV:
4251 if (opnd->shifter.amount)
4252 snprintf (buf, size, "%s, %s %s",
4253 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4254 style_sub_mnem (styler, "lsl"),
4255 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4256 else
4257 snprintf (buf, size, "%s",
4258 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4259 break;
4261 case AARCH64_OPND_SIMD_IMM:
4262 case AARCH64_OPND_SIMD_IMM_SFT:
4263 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4264 || opnd->shifter.kind == AARCH64_MOD_NONE)
4265 snprintf (buf, size, "%s",
4266 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4267 else
4268 snprintf (buf, size, "%s, %s %s",
4269 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4270 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4271 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4272 break;
4274 case AARCH64_OPND_SVE_AIMM:
4275 case AARCH64_OPND_SVE_ASIMM:
4276 if (opnd->shifter.amount)
4277 snprintf (buf, size, "%s, %s %s",
4278 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4279 style_sub_mnem (styler, "lsl"),
4280 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4281 else
4282 snprintf (buf, size, "%s",
4283 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4284 break;
4286 case AARCH64_OPND_FPIMM:
4287 case AARCH64_OPND_SIMD_FPIMM:
4288 case AARCH64_OPND_SVE_FPIMM8:
4289 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4291 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4293 half_conv_t c;
4294 c.i = expand_fp_imm (2, opnd->imm.value);
4295 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4297 break;
4298 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4300 single_conv_t c;
4301 c.i = expand_fp_imm (4, opnd->imm.value);
4302 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4304 break;
4305 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4307 double_conv_t c;
4308 c.i = expand_fp_imm (8, opnd->imm.value);
4309 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4311 break;
4312 default:
4313 snprintf (buf, size, "<invalid>");
4314 break;
4316 break;
4318 case AARCH64_OPND_CCMP_IMM:
4319 case AARCH64_OPND_NZCV:
4320 case AARCH64_OPND_EXCEPTION:
4321 case AARCH64_OPND_UIMM4:
4322 case AARCH64_OPND_UIMM4_ADDG:
4323 case AARCH64_OPND_UIMM7:
4324 case AARCH64_OPND_UIMM10:
4325 if (optional_operand_p (opcode, idx)
4326 && (opnd->imm.value ==
4327 (int64_t) get_optional_operand_default_value (opcode)))
4328 /* Omit the operand, e.g. DCPS1. */
4329 break;
4330 snprintf (buf, size, "%s",
4331 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4332 break;
4334 case AARCH64_OPND_COND:
4335 case AARCH64_OPND_COND1:
4336 snprintf (buf, size, "%s",
4337 style_sub_mnem (styler, opnd->cond->names[0]));
4338 num_conds = ARRAY_SIZE (opnd->cond->names);
4339 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4341 size_t len = comment != NULL ? strlen (comment) : 0;
4342 if (i == 1)
4343 snprintf (comment + len, comment_size - len, "%s = %s",
4344 opnd->cond->names[0], opnd->cond->names[i]);
4345 else
4346 snprintf (comment + len, comment_size - len, ", %s",
4347 opnd->cond->names[i]);
4349 break;
4351 case AARCH64_OPND_ADDR_ADRP:
4352 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4353 + opnd->imm.value;
4354 if (pcrel_p)
4355 *pcrel_p = 1;
4356 if (address)
4357 *address = addr;
4358 /* This is not necessary during the disassembling, as print_address_func
4359 in the disassemble_info will take care of the printing. But some
4360 other callers may be still interested in getting the string in *STR,
4361 so here we do snprintf regardless. */
4362 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4363 break;
4365 case AARCH64_OPND_ADDR_PCREL14:
4366 case AARCH64_OPND_ADDR_PCREL19:
4367 case AARCH64_OPND_ADDR_PCREL21:
4368 case AARCH64_OPND_ADDR_PCREL26:
4369 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4370 if (pcrel_p)
4371 *pcrel_p = 1;
4372 if (address)
4373 *address = addr;
4374 /* This is not necessary during the disassembling, as print_address_func
4375 in the disassemble_info will take care of the printing. But some
4376 other callers may be still interested in getting the string in *STR,
4377 so here we do snprintf regardless. */
4378 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4379 break;
4381 case AARCH64_OPND_ADDR_SIMPLE:
4382 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4383 case AARCH64_OPND_SIMD_ADDR_POST:
4384 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4385 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4387 if (opnd->addr.offset.is_reg)
4388 snprintf (buf, size, "[%s], %s",
4389 style_reg (styler, name),
4390 style_reg (styler, "x%d", opnd->addr.offset.regno));
4391 else
4392 snprintf (buf, size, "[%s], %s",
4393 style_reg (styler, name),
4394 style_imm (styler, "#%d", opnd->addr.offset.imm));
4396 else
4397 snprintf (buf, size, "[%s]", style_reg (styler, name));
4398 break;
4400 case AARCH64_OPND_ADDR_REGOFF:
4401 case AARCH64_OPND_SVE_ADDR_R:
4402 case AARCH64_OPND_SVE_ADDR_RR:
4403 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4404 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4405 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4406 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4407 case AARCH64_OPND_SVE_ADDR_RX:
4408 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4409 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4410 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4411 print_register_offset_address
4412 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4413 get_offset_int_reg_name (opnd), styler);
4414 break;
4416 case AARCH64_OPND_SVE_ADDR_ZX:
4417 print_register_offset_address
4418 (buf, size, opnd,
4419 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4420 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4421 break;
4423 case AARCH64_OPND_SVE_ADDR_RZ:
4424 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4425 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4426 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4427 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4428 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4429 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4430 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4431 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4432 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4433 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4434 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4435 print_register_offset_address
4436 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4437 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4438 styler);
4439 break;
4441 case AARCH64_OPND_ADDR_SIMM7:
4442 case AARCH64_OPND_ADDR_SIMM9:
4443 case AARCH64_OPND_ADDR_SIMM9_2:
4444 case AARCH64_OPND_ADDR_SIMM10:
4445 case AARCH64_OPND_ADDR_SIMM11:
4446 case AARCH64_OPND_ADDR_SIMM13:
4447 case AARCH64_OPND_ADDR_OFFSET:
4448 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4449 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4450 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4451 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4452 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4453 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4454 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4455 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4456 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4457 case AARCH64_OPND_SVE_ADDR_RI_U6:
4458 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4459 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4460 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4461 print_immediate_offset_address
4462 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4463 styler);
4464 break;
4466 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4467 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4468 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4469 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4470 print_immediate_offset_address
4471 (buf, size, opnd,
4472 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4473 styler);
4474 break;
4476 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4477 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4478 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4479 print_register_offset_address
4480 (buf, size, opnd,
4481 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4482 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4483 styler);
4484 break;
4486 case AARCH64_OPND_ADDR_UIMM12:
4487 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4488 if (opnd->addr.offset.imm)
4489 snprintf (buf, size, "[%s, %s]",
4490 style_reg (styler, name),
4491 style_imm (styler, "#%d", opnd->addr.offset.imm));
4492 else
4493 snprintf (buf, size, "[%s]", style_reg (styler, name));
4494 break;
4496 case AARCH64_OPND_SYSREG:
4497 for (i = 0; aarch64_sys_regs[i].name; ++i)
4499 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4501 bool exact_match
4502 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4503 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4504 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4506 /* Try and find an exact match, But if that fails, return the first
4507 partial match that was found. */
4508 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4509 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4510 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4511 && (name == NULL || exact_match))
4513 name = aarch64_sys_regs[i].name;
4514 if (exact_match)
4516 if (notes)
4517 *notes = NULL;
4518 break;
4521 /* If we didn't match exactly, that means the presense of a flag
4522 indicates what we didn't want for this instruction. e.g. If
4523 F_REG_READ is there, that means we were looking for a write
4524 register. See aarch64_ext_sysreg. */
4525 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4526 *notes = _("reading from a write-only register");
4527 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4528 *notes = _("writing to a read-only register");
4532 if (name)
4533 snprintf (buf, size, "%s", style_reg (styler, name));
4534 else
4536 /* Implementation defined system register. */
4537 unsigned int value = opnd->sysreg.value;
4538 snprintf (buf, size, "%s",
4539 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4540 (value >> 14) & 0x3, (value >> 11) & 0x7,
4541 (value >> 7) & 0xf, (value >> 3) & 0xf,
4542 value & 0x7));
4544 break;
4546 case AARCH64_OPND_PSTATEFIELD:
4547 for (i = 0; aarch64_pstatefields[i].name; ++i)
4548 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4550 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4551 SVCRZA and SVCRSMZA. */
4552 uint32_t flags = aarch64_pstatefields[i].flags;
4553 if (flags & F_REG_IN_CRM
4554 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4555 != PSTATE_DECODE_CRM (flags)))
4556 continue;
4557 break;
4559 assert (aarch64_pstatefields[i].name);
4560 snprintf (buf, size, "%s",
4561 style_reg (styler, aarch64_pstatefields[i].name));
4562 break;
4564 case AARCH64_OPND_SYSREG_AT:
4565 case AARCH64_OPND_SYSREG_DC:
4566 case AARCH64_OPND_SYSREG_IC:
4567 case AARCH64_OPND_SYSREG_TLBI:
4568 case AARCH64_OPND_SYSREG_SR:
4569 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4570 break;
4572 case AARCH64_OPND_BARRIER:
4573 case AARCH64_OPND_BARRIER_DSB_NXS:
4575 if (opnd->barrier->name[0] == '#')
4576 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4577 else
4578 snprintf (buf, size, "%s",
4579 style_sub_mnem (styler, opnd->barrier->name));
4581 break;
4583 case AARCH64_OPND_BARRIER_ISB:
4584 /* Operand can be omitted, e.g. in DCPS1. */
4585 if (! optional_operand_p (opcode, idx)
4586 || (opnd->barrier->value
4587 != get_optional_operand_default_value (opcode)))
4588 snprintf (buf, size, "%s",
4589 style_imm (styler, "#0x%x", opnd->barrier->value));
4590 break;
4592 case AARCH64_OPND_PRFOP:
4593 if (opnd->prfop->name != NULL)
4594 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4595 else
4596 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4597 opnd->prfop->value));
4598 break;
4600 case AARCH64_OPND_RPRFMOP:
4601 enum_value = opnd->imm.value;
4602 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
4603 && aarch64_rprfmop_array[enum_value])
4604 snprintf (buf, size, "%s",
4605 style_reg (styler, aarch64_rprfmop_array[enum_value]));
4606 else
4607 snprintf (buf, size, "%s",
4608 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4609 break;
4611 case AARCH64_OPND_BARRIER_PSB:
4612 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4613 break;
4615 case AARCH64_OPND_SME_ZT0:
4616 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4617 break;
4619 case AARCH64_OPND_SME_ZT0_INDEX:
4620 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4621 style_imm (styler, "%d", (int) opnd->imm.value));
4622 break;
4624 case AARCH64_OPND_SME_ZT0_LIST:
4625 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4626 break;
4628 case AARCH64_OPND_BTI_TARGET:
4629 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4630 snprintf (buf, size, "%s",
4631 style_sub_mnem (styler, opnd->hint_option->name));
4632 break;
4634 case AARCH64_OPND_MOPS_ADDR_Rd:
4635 case AARCH64_OPND_MOPS_ADDR_Rs:
4636 snprintf (buf, size, "[%s]!",
4637 style_reg (styler,
4638 get_int_reg_name (opnd->reg.regno,
4639 AARCH64_OPND_QLF_X, 0)));
4640 break;
4642 case AARCH64_OPND_MOPS_WB_Rn:
4643 snprintf (buf, size, "%s!",
4644 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4645 AARCH64_OPND_QLF_X, 0)));
4646 break;
4648 default:
4649 snprintf (buf, size, "<invalid>");
4650 break;
4654 #define CPENC(op0,op1,crn,crm,op2) \
4655 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4656 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4657 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4658 /* for 3.9.10 System Instructions */
4659 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4661 #define C0 0
4662 #define C1 1
4663 #define C2 2
4664 #define C3 3
4665 #define C4 4
4666 #define C5 5
4667 #define C6 6
4668 #define C7 7
4669 #define C8 8
4670 #define C9 9
4671 #define C10 10
4672 #define C11 11
4673 #define C12 12
4674 #define C13 13
4675 #define C14 14
4676 #define C15 15
4678 /* TODO there is one more issues need to be resolved
4679 1. handle cpu-implementation-defined system registers.
4681 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4682 respectively. If neither of these are set then the register is read-write. */
4683 const aarch64_sys_reg aarch64_sys_regs [] =
4685 #define SYSREG(name, encoding, flags, features) \
4686 { name, encoding, flags, features },
4687 #include "aarch64-sys-regs.def"
4688 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
4689 #undef SYSREG
4692 bool
4693 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4695 return (reg_flags & F_DEPRECATED) != 0;
4698 bool
4699 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
4701 return (reg_flags & F_REG_ALIAS) != 0;
4704 /* The CPENC below is fairly misleading, the fields
4705 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4706 by ins_pstatefield, which just shifts the value by the width of the fields
4707 in a loop. So if you CPENC them only the first value will be set, the rest
4708 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4709 value of 0b110000000001000000 (0x30040) while what you want is
4710 0b011010 (0x1a). */
4711 const aarch64_sys_reg aarch64_pstatefields [] =
4713 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
4714 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4715 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4716 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
4717 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4718 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
4719 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
4720 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4721 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4722 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4723 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4724 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4725 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4726 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4727 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
4728 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
4731 bool
4732 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4733 const aarch64_sys_reg *reg)
4735 if (!(reg->flags & F_ARCHEXT))
4736 return true;
4738 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4741 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4743 { "ialluis", CPENS(0,C7,C1,0), 0 },
4744 { "iallu", CPENS(0,C7,C5,0), 0 },
4745 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4746 { 0, CPENS(0,0,0,0), 0 }
4749 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4751 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4752 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4753 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4754 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4755 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4756 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4757 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4758 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4759 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4760 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4761 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4762 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4763 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4764 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4765 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4766 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4767 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4768 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4769 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4770 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4771 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4772 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4773 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4774 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4775 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4776 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4777 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4778 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4779 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
4780 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
4781 { 0, CPENS(0,0,0,0), 0 }
4784 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4786 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4787 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4788 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4789 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4790 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4791 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4792 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4793 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4794 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4795 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4796 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4797 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4798 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4799 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4800 { 0, CPENS(0,0,0,0), 0 }
4803 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4805 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4806 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4807 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4808 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4809 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4810 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4811 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4812 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4813 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4814 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4815 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4816 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4817 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4818 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4819 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4820 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4821 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4822 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4823 { "alle2", CPENS(4,C8,C7,0), 0 },
4824 { "alle2is", CPENS(4,C8,C3,0), 0 },
4825 { "alle1", CPENS(4,C8,C7,4), 0 },
4826 { "alle1is", CPENS(4,C8,C3,4), 0 },
4827 { "alle3", CPENS(6,C8,C7,0), 0 },
4828 { "alle3is", CPENS(6,C8,C3,0), 0 },
4829 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4830 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4831 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4832 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4833 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4834 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4835 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4836 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4838 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4839 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4840 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4841 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4842 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4843 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4844 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4845 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4846 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4847 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4848 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4849 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4850 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4851 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4852 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4853 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4855 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4856 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4857 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4858 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4859 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4860 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4861 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4862 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4863 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4864 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4865 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4866 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4867 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4868 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4869 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4870 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4871 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4872 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4873 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4874 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4875 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4876 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4877 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4878 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4879 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4880 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4881 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4882 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4883 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4884 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4886 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
4887 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
4888 { "paallos", CPENS (6, C8, C1, 4), 0},
4889 { "paall", CPENS (6, C8, C7, 4), 0},
4891 { 0, CPENS(0,0,0,0), 0 }
4894 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4896 /* RCTX is somewhat unique in a way that it has different values
4897 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4898 Thus op2 is masked out and instead encoded directly in the
4899 aarch64_opcode_table entries for the respective instructions. */
4900 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4902 { 0, CPENS(0,0,0,0), 0 }
4905 bool
4906 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4908 return (sys_ins_reg->flags & F_HASXT) != 0;
4911 extern bool
4912 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4913 const char *reg_name,
4914 aarch64_insn reg_value,
4915 uint32_t reg_flags,
4916 const aarch64_feature_set *reg_features)
4918 /* Armv8-R has no EL3. */
4919 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
4921 const char *suffix = strrchr (reg_name, '_');
4922 if (suffix && !strcmp (suffix, "_el3"))
4923 return false;
4926 if (!(reg_flags & F_ARCHEXT))
4927 return true;
4929 if (reg_features
4930 && AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features))
4931 return true;
4933 /* ARMv8.4 TLB instructions. */
4934 if ((reg_value == CPENS (0, C8, C1, 0)
4935 || reg_value == CPENS (0, C8, C1, 1)
4936 || reg_value == CPENS (0, C8, C1, 2)
4937 || reg_value == CPENS (0, C8, C1, 3)
4938 || reg_value == CPENS (0, C8, C1, 5)
4939 || reg_value == CPENS (0, C8, C1, 7)
4940 || reg_value == CPENS (4, C8, C4, 0)
4941 || reg_value == CPENS (4, C8, C4, 4)
4942 || reg_value == CPENS (4, C8, C1, 1)
4943 || reg_value == CPENS (4, C8, C1, 5)
4944 || reg_value == CPENS (4, C8, C1, 6)
4945 || reg_value == CPENS (6, C8, C1, 1)
4946 || reg_value == CPENS (6, C8, C1, 5)
4947 || reg_value == CPENS (4, C8, C1, 0)
4948 || reg_value == CPENS (4, C8, C1, 4)
4949 || reg_value == CPENS (6, C8, C1, 0)
4950 || reg_value == CPENS (0, C8, C6, 1)
4951 || reg_value == CPENS (0, C8, C6, 3)
4952 || reg_value == CPENS (0, C8, C6, 5)
4953 || reg_value == CPENS (0, C8, C6, 7)
4954 || reg_value == CPENS (0, C8, C2, 1)
4955 || reg_value == CPENS (0, C8, C2, 3)
4956 || reg_value == CPENS (0, C8, C2, 5)
4957 || reg_value == CPENS (0, C8, C2, 7)
4958 || reg_value == CPENS (0, C8, C5, 1)
4959 || reg_value == CPENS (0, C8, C5, 3)
4960 || reg_value == CPENS (0, C8, C5, 5)
4961 || reg_value == CPENS (0, C8, C5, 7)
4962 || reg_value == CPENS (4, C8, C0, 2)
4963 || reg_value == CPENS (4, C8, C0, 6)
4964 || reg_value == CPENS (4, C8, C4, 2)
4965 || reg_value == CPENS (4, C8, C4, 6)
4966 || reg_value == CPENS (4, C8, C4, 3)
4967 || reg_value == CPENS (4, C8, C4, 7)
4968 || reg_value == CPENS (4, C8, C6, 1)
4969 || reg_value == CPENS (4, C8, C6, 5)
4970 || reg_value == CPENS (4, C8, C2, 1)
4971 || reg_value == CPENS (4, C8, C2, 5)
4972 || reg_value == CPENS (4, C8, C5, 1)
4973 || reg_value == CPENS (4, C8, C5, 5)
4974 || reg_value == CPENS (6, C8, C6, 1)
4975 || reg_value == CPENS (6, C8, C6, 5)
4976 || reg_value == CPENS (6, C8, C2, 1)
4977 || reg_value == CPENS (6, C8, C2, 5)
4978 || reg_value == CPENS (6, C8, C5, 1)
4979 || reg_value == CPENS (6, C8, C5, 5))
4980 && AARCH64_CPU_HAS_FEATURE (features, V8_4A))
4981 return true;
4983 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4984 if (reg_value == CPENS (3, C7, C12, 1)
4985 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
4986 return true;
4988 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4989 if (reg_value == CPENS (3, C7, C13, 1)
4990 && AARCH64_CPU_HAS_FEATURE (features, CVADP))
4991 return true;
4993 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4994 if ((reg_value == CPENS (0, C7, C6, 3)
4995 || reg_value == CPENS (0, C7, C6, 4)
4996 || reg_value == CPENS (0, C7, C10, 4)
4997 || reg_value == CPENS (0, C7, C14, 4)
4998 || reg_value == CPENS (3, C7, C10, 3)
4999 || reg_value == CPENS (3, C7, C12, 3)
5000 || reg_value == CPENS (3, C7, C13, 3)
5001 || reg_value == CPENS (3, C7, C14, 3)
5002 || reg_value == CPENS (3, C7, C4, 3)
5003 || reg_value == CPENS (0, C7, C6, 5)
5004 || reg_value == CPENS (0, C7, C6, 6)
5005 || reg_value == CPENS (0, C7, C10, 6)
5006 || reg_value == CPENS (0, C7, C14, 6)
5007 || reg_value == CPENS (3, C7, C10, 5)
5008 || reg_value == CPENS (3, C7, C12, 5)
5009 || reg_value == CPENS (3, C7, C13, 5)
5010 || reg_value == CPENS (3, C7, C14, 5)
5011 || reg_value == CPENS (3, C7, C4, 4))
5012 && AARCH64_CPU_HAS_FEATURE (features, MEMTAG))
5013 return true;
5015 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5016 if ((reg_value == CPENS (0, C7, C9, 0)
5017 || reg_value == CPENS (0, C7, C9, 1))
5018 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
5019 return true;
5021 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5022 if (reg_value == CPENS (3, C7, C3, 0)
5023 && AARCH64_CPU_HAS_FEATURE (features, PREDRES))
5024 return true;
5026 return false;
5029 #undef C0
5030 #undef C1
5031 #undef C2
5032 #undef C3
5033 #undef C4
5034 #undef C5
5035 #undef C6
5036 #undef C7
5037 #undef C8
5038 #undef C9
5039 #undef C10
5040 #undef C11
5041 #undef C12
5042 #undef C13
5043 #undef C14
5044 #undef C15
5046 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5047 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5049 static enum err_type
5050 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5051 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5052 bool encoding ATTRIBUTE_UNUSED,
5053 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5054 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5056 int t = BITS (insn, 4, 0);
5057 int n = BITS (insn, 9, 5);
5058 int t2 = BITS (insn, 14, 10);
5060 if (BIT (insn, 23))
5062 /* Write back enabled. */
5063 if ((t == n || t2 == n) && n != 31)
5064 return ERR_UND;
5067 if (BIT (insn, 22))
5069 /* Load */
5070 if (t == t2)
5071 return ERR_UND;
5074 return ERR_OK;
5077 /* Verifier for vector by element 3 operands functions where the
5078 conditions `if sz:L == 11 then UNDEFINED` holds. */
5080 static enum err_type
5081 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5082 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5083 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5084 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5086 const aarch64_insn undef_pattern = 0x3;
5087 aarch64_insn value;
5089 assert (inst->opcode);
5090 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5091 value = encoding ? inst->value : insn;
5092 assert (value);
5094 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5095 return ERR_UND;
5097 return ERR_OK;
5100 /* Check an instruction that takes three register operands and that
5101 requires the register numbers to be distinct from one another. */
5103 static enum err_type
5104 verify_three_different_regs (const struct aarch64_inst *inst,
5105 const aarch64_insn insn ATTRIBUTE_UNUSED,
5106 bfd_vma pc ATTRIBUTE_UNUSED,
5107 bool encoding ATTRIBUTE_UNUSED,
5108 aarch64_operand_error *mismatch_detail
5109 ATTRIBUTE_UNUSED,
5110 aarch64_instr_sequence *insn_sequence
5111 ATTRIBUTE_UNUSED)
5113 int rd, rs, rn;
5115 rd = inst->operands[0].reg.regno;
5116 rs = inst->operands[1].reg.regno;
5117 rn = inst->operands[2].reg.regno;
5118 if (rd == rs || rd == rn || rs == rn)
5120 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5121 mismatch_detail->error
5122 = _("the three register operands must be distinct from one another");
5123 mismatch_detail->index = -1;
5124 return ERR_UND;
5127 return ERR_OK;
5130 /* Add INST to the end of INSN_SEQUENCE. */
5132 static void
5133 add_insn_to_sequence (const struct aarch64_inst *inst,
5134 aarch64_instr_sequence *insn_sequence)
5136 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5139 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5140 If INST is NULL the given insn_sequence is cleared and the sequence is left
5141 uninitialized. */
5143 void
5144 init_insn_sequence (const struct aarch64_inst *inst,
5145 aarch64_instr_sequence *insn_sequence)
5147 int num_req_entries = 0;
5149 if (insn_sequence->instr)
5151 XDELETE (insn_sequence->instr);
5152 insn_sequence->instr = NULL;
5155 /* Handle all the cases here. May need to think of something smarter than
5156 a giant if/else chain if this grows. At that time, a lookup table may be
5157 best. */
5158 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5159 num_req_entries = 1;
5160 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5161 num_req_entries = 2;
5163 insn_sequence->num_added_insns = 0;
5164 insn_sequence->num_allocated_insns = num_req_entries;
5166 if (num_req_entries != 0)
5168 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5169 add_insn_to_sequence (inst, insn_sequence);
5173 /* Subroutine of verify_constraints. Check whether the instruction
5174 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5175 expectations are met. Return true if the check passes, otherwise
5176 describe the problem in MISMATCH_DETAIL.
5178 IS_NEW_SECTION is true if INST is assumed to start a new section.
5179 The other arguments are as for verify_constraints. */
5181 static bool
5182 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5183 bool is_new_section,
5184 aarch64_operand_error *mismatch_detail,
5185 aarch64_instr_sequence *insn_sequence)
5187 const struct aarch64_opcode *opcode;
5188 const struct aarch64_inst *prev_insn;
5189 int i;
5191 opcode = inst->opcode;
5192 if (insn_sequence->instr)
5193 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5194 else
5195 prev_insn = NULL;
5197 if (prev_insn
5198 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5199 && prev_insn->opcode != opcode - 1)
5201 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5202 mismatch_detail->error = NULL;
5203 mismatch_detail->index = -1;
5204 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5205 mismatch_detail->data[1].s = prev_insn->opcode->name;
5206 mismatch_detail->non_fatal = true;
5207 return false;
5210 if (opcode->constraints & C_SCAN_MOPS_PME)
5212 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5214 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5215 mismatch_detail->error = NULL;
5216 mismatch_detail->index = -1;
5217 mismatch_detail->data[0].s = opcode->name;
5218 mismatch_detail->data[1].s = opcode[-1].name;
5219 mismatch_detail->non_fatal = true;
5220 return false;
5223 for (i = 0; i < 3; ++i)
5224 /* There's no specific requirement for the data register to be
5225 the same between consecutive SET* instructions. */
5226 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5227 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5228 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5229 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5231 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5232 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5233 mismatch_detail->error = _("destination register differs from "
5234 "preceding instruction");
5235 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5236 mismatch_detail->error = _("source register differs from "
5237 "preceding instruction");
5238 else
5239 mismatch_detail->error = _("size register differs from "
5240 "preceding instruction");
5241 mismatch_detail->index = i;
5242 mismatch_detail->non_fatal = true;
5243 return false;
5247 return true;
5250 /* This function verifies that the instruction INST adheres to its specified
5251 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5252 returned and MISMATCH_DETAIL contains the reason why verification failed.
5254 The function is called both during assembly and disassembly. If assembling
5255 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5256 and will contain the PC of the current instruction w.r.t to the section.
5258 If ENCODING and PC=0 then you are at a start of a section. The constraints
5259 are verified against the given state insn_sequence which is updated as it
5260 transitions through the verification. */
5262 enum err_type
5263 verify_constraints (const struct aarch64_inst *inst,
5264 const aarch64_insn insn ATTRIBUTE_UNUSED,
5265 bfd_vma pc,
5266 bool encoding,
5267 aarch64_operand_error *mismatch_detail,
5268 aarch64_instr_sequence *insn_sequence)
5270 assert (inst);
5271 assert (inst->opcode);
5273 const struct aarch64_opcode *opcode = inst->opcode;
5274 if (!opcode->constraints && !insn_sequence->instr)
5275 return ERR_OK;
5277 assert (insn_sequence);
5279 enum err_type res = ERR_OK;
5281 /* This instruction puts a constraint on the insn_sequence. */
5282 if (opcode->flags & F_SCAN)
5284 if (insn_sequence->instr)
5286 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5287 mismatch_detail->error = _("instruction opens new dependency "
5288 "sequence without ending previous one");
5289 mismatch_detail->index = -1;
5290 mismatch_detail->non_fatal = true;
5291 res = ERR_VFI;
5294 init_insn_sequence (inst, insn_sequence);
5295 return res;
5298 bool is_new_section = (!encoding && pc == 0);
5299 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5300 insn_sequence))
5302 res = ERR_VFI;
5303 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5304 init_insn_sequence (NULL, insn_sequence);
5307 /* Verify constraints on an existing sequence. */
5308 if (insn_sequence->instr)
5310 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5311 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5312 closed a previous one that we should have. */
5313 if (is_new_section && res == ERR_OK)
5315 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5316 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5317 mismatch_detail->index = -1;
5318 mismatch_detail->non_fatal = true;
5319 res = ERR_VFI;
5320 /* Reset the sequence. */
5321 init_insn_sequence (NULL, insn_sequence);
5322 return res;
5325 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5326 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5328 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5329 instruction for better error messages. */
5330 if (!opcode->avariant
5331 || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
5332 && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)))
5334 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5335 mismatch_detail->error = _("SVE instruction expected after "
5336 "`movprfx'");
5337 mismatch_detail->index = -1;
5338 mismatch_detail->non_fatal = true;
5339 res = ERR_VFI;
5340 goto done;
5343 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5344 instruction that is allowed to be used with a MOVPRFX. */
5345 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5347 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5348 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5349 "expected");
5350 mismatch_detail->index = -1;
5351 mismatch_detail->non_fatal = true;
5352 res = ERR_VFI;
5353 goto done;
5356 /* Next check for usage of the predicate register. */
5357 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5358 aarch64_opnd_info blk_pred, inst_pred;
5359 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5360 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5361 bool predicated = false;
5362 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5364 /* Determine if the movprfx instruction used is predicated or not. */
5365 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5367 predicated = true;
5368 blk_pred = insn_sequence->instr->operands[1];
5371 unsigned char max_elem_size = 0;
5372 unsigned char current_elem_size;
5373 int num_op_used = 0, last_op_usage = 0;
5374 int i, inst_pred_idx = -1;
5375 int num_ops = aarch64_num_of_operands (opcode);
5376 for (i = 0; i < num_ops; i++)
5378 aarch64_opnd_info inst_op = inst->operands[i];
5379 switch (inst_op.type)
5381 case AARCH64_OPND_SVE_Zd:
5382 case AARCH64_OPND_SVE_Zm_5:
5383 case AARCH64_OPND_SVE_Zm_16:
5384 case AARCH64_OPND_SVE_Zn:
5385 case AARCH64_OPND_SVE_Zt:
5386 case AARCH64_OPND_SVE_Vm:
5387 case AARCH64_OPND_SVE_Vn:
5388 case AARCH64_OPND_Va:
5389 case AARCH64_OPND_Vn:
5390 case AARCH64_OPND_Vm:
5391 case AARCH64_OPND_Sn:
5392 case AARCH64_OPND_Sm:
5393 if (inst_op.reg.regno == blk_dest.reg.regno)
5395 num_op_used++;
5396 last_op_usage = i;
5398 current_elem_size
5399 = aarch64_get_qualifier_esize (inst_op.qualifier);
5400 if (current_elem_size > max_elem_size)
5401 max_elem_size = current_elem_size;
5402 break;
5403 case AARCH64_OPND_SVE_Pd:
5404 case AARCH64_OPND_SVE_Pg3:
5405 case AARCH64_OPND_SVE_Pg4_5:
5406 case AARCH64_OPND_SVE_Pg4_10:
5407 case AARCH64_OPND_SVE_Pg4_16:
5408 case AARCH64_OPND_SVE_Pm:
5409 case AARCH64_OPND_SVE_Pn:
5410 case AARCH64_OPND_SVE_Pt:
5411 case AARCH64_OPND_SME_Pm:
5412 inst_pred = inst_op;
5413 inst_pred_idx = i;
5414 break;
5415 default:
5416 break;
5420 assert (max_elem_size != 0);
5421 aarch64_opnd_info inst_dest = inst->operands[0];
5422 /* Determine the size that should be used to compare against the
5423 movprfx size. */
5424 current_elem_size
5425 = opcode->constraints & C_MAX_ELEM
5426 ? max_elem_size
5427 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5429 /* If movprfx is predicated do some extra checks. */
5430 if (predicated)
5432 /* The instruction must be predicated. */
5433 if (inst_pred_idx < 0)
5435 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5436 mismatch_detail->error = _("predicated instruction expected "
5437 "after `movprfx'");
5438 mismatch_detail->index = -1;
5439 mismatch_detail->non_fatal = true;
5440 res = ERR_VFI;
5441 goto done;
5444 /* The instruction must have a merging predicate. */
5445 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5447 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5448 mismatch_detail->error = _("merging predicate expected due "
5449 "to preceding `movprfx'");
5450 mismatch_detail->index = inst_pred_idx;
5451 mismatch_detail->non_fatal = true;
5452 res = ERR_VFI;
5453 goto done;
5456 /* The same register must be used in instruction. */
5457 if (blk_pred.reg.regno != inst_pred.reg.regno)
5459 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5460 mismatch_detail->error = _("predicate register differs "
5461 "from that in preceding "
5462 "`movprfx'");
5463 mismatch_detail->index = inst_pred_idx;
5464 mismatch_detail->non_fatal = true;
5465 res = ERR_VFI;
5466 goto done;
5470 /* Destructive operations by definition must allow one usage of the
5471 same register. */
5472 int allowed_usage
5473 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5475 /* Operand is not used at all. */
5476 if (num_op_used == 0)
5478 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5479 mismatch_detail->error = _("output register of preceding "
5480 "`movprfx' not used in current "
5481 "instruction");
5482 mismatch_detail->index = 0;
5483 mismatch_detail->non_fatal = true;
5484 res = ERR_VFI;
5485 goto done;
5488 /* We now know it's used, now determine exactly where it's used. */
5489 if (blk_dest.reg.regno != inst_dest.reg.regno)
5491 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5492 mismatch_detail->error = _("output register of preceding "
5493 "`movprfx' expected as output");
5494 mismatch_detail->index = 0;
5495 mismatch_detail->non_fatal = true;
5496 res = ERR_VFI;
5497 goto done;
5500 /* Operand used more than allowed for the specific opcode type. */
5501 if (num_op_used > allowed_usage)
5503 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5504 mismatch_detail->error = _("output register of preceding "
5505 "`movprfx' used as input");
5506 mismatch_detail->index = last_op_usage;
5507 mismatch_detail->non_fatal = true;
5508 res = ERR_VFI;
5509 goto done;
5512 /* Now the only thing left is the qualifiers checks. The register
5513 must have the same maximum element size. */
5514 if (inst_dest.qualifier
5515 && blk_dest.qualifier
5516 && current_elem_size
5517 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5519 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5520 mismatch_detail->error = _("register size not compatible with "
5521 "previous `movprfx'");
5522 mismatch_detail->index = 0;
5523 mismatch_detail->non_fatal = true;
5524 res = ERR_VFI;
5525 goto done;
5529 done:
5530 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5531 /* We've checked the last instruction in the sequence and so
5532 don't need the sequence any more. */
5533 init_insn_sequence (NULL, insn_sequence);
5534 else
5535 add_insn_to_sequence (inst, insn_sequence);
5538 return res;
5542 /* Return true if VALUE cannot be moved into an SVE register using DUP
5543 (with any element size, not just ESIZE) and if using DUPM would
5544 therefore be OK. ESIZE is the number of bytes in the immediate. */
5546 bool
5547 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5549 int64_t svalue = uvalue;
5550 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5552 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5553 return false;
5554 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5556 svalue = (int32_t) uvalue;
5557 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5559 svalue = (int16_t) uvalue;
5560 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5561 return false;
5564 if ((svalue & 0xff) == 0)
5565 svalue /= 256;
5566 return svalue < -128 || svalue >= 128;
5569 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5570 supports the instruction described by INST. */
5572 bool
5573 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5574 aarch64_inst *inst)
5576 if (!inst->opcode->avariant
5577 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5578 return false;
5580 if (inst->opcode->iclass == sme_fp_sd
5581 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5582 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5583 return false;
5585 if (inst->opcode->iclass == sme_int_sd
5586 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5587 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5588 return false;
5590 return true;
5593 /* Include the opcode description table as well as the operand description
5594 table. */
5595 #define VERIFIER(x) verify_##x
5596 #include "aarch64-tbl.h"