aarch64: Add SVE2.1 Contiguous load/store instructions.
[binutils-gdb.git] / opcodes / aarch64-opc.c
blob13cd2bcd8a7a79508c340bcf618af61b622bc0fe
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
29 #include "opintl.h"
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
59 /* 16-23. */
68 /* 24-31. */
74 "mul4",
75 "mul3",
76 "all"
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
103 operation. */
104 const char *const aarch64_rprfmop_array[64] = {
105 "pldkeep",
106 "pstkeep",
109 "pldstrm",
110 "pststrm"
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array[2] = {
116 "vlx2",
117 "vlx4"
120 /* Helper functions to determine which operand to be used to encode/decode
121 the size:Q fields for AdvSIMD instructions. */
123 static inline bool
124 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
126 return (qualifier >= AARCH64_OPND_QLF_V_8B
127 && qualifier <= AARCH64_OPND_QLF_V_1Q);
130 static inline bool
131 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
133 return (qualifier >= AARCH64_OPND_QLF_S_B
134 && qualifier <= AARCH64_OPND_QLF_S_Q);
137 enum data_pattern
139 DP_UNKNOWN,
140 DP_VECTOR_3SAME,
141 DP_VECTOR_LONG,
142 DP_VECTOR_WIDE,
143 DP_VECTOR_ACROSS_LANES,
146 static const char significant_operand_index [] =
148 0, /* DP_UNKNOWN, by default using operand 0. */
149 0, /* DP_VECTOR_3SAME */
150 1, /* DP_VECTOR_LONG */
151 2, /* DP_VECTOR_WIDE */
152 1, /* DP_VECTOR_ACROSS_LANES */
155 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
156 the data pattern.
157 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
158 corresponds to one of a sequence of operands. */
160 static enum data_pattern
161 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
163 if (vector_qualifier_p (qualifiers[0]))
165 /* e.g. v.4s, v.4s, v.4s
166 or v.4h, v.4h, v.h[3]. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2])
169 && (aarch64_get_qualifier_esize (qualifiers[0])
170 == aarch64_get_qualifier_esize (qualifiers[1]))
171 && (aarch64_get_qualifier_esize (qualifiers[0])
172 == aarch64_get_qualifier_esize (qualifiers[2])))
173 return DP_VECTOR_3SAME;
174 /* e.g. v.8h, v.8b, v.8b.
175 or v.4s, v.4h, v.h[2].
176 or v.8h, v.16b. */
177 if (vector_qualifier_p (qualifiers[1])
178 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
179 && (aarch64_get_qualifier_esize (qualifiers[0])
180 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
181 return DP_VECTOR_LONG;
182 /* e.g. v.8h, v.8h, v.8b. */
183 if (qualifiers[0] == qualifiers[1]
184 && vector_qualifier_p (qualifiers[2])
185 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
186 && (aarch64_get_qualifier_esize (qualifiers[0])
187 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
188 && (aarch64_get_qualifier_esize (qualifiers[0])
189 == aarch64_get_qualifier_esize (qualifiers[1])))
190 return DP_VECTOR_WIDE;
192 else if (fp_qualifier_p (qualifiers[0]))
194 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
195 if (vector_qualifier_p (qualifiers[1])
196 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
197 return DP_VECTOR_ACROSS_LANES;
200 return DP_UNKNOWN;
203 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
204 the AdvSIMD instructions. */
205 /* N.B. it is possible to do some optimization that doesn't call
206 get_data_pattern each time when we need to select an operand. We can
207 either buffer the caculated the result or statically generate the data,
208 however, it is not obvious that the optimization will bring significant
209 benefit. */
212 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
214 return
215 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
218 /* Instruction bit-fields.
219 + Keep synced with 'enum aarch64_field_kind'. */
220 const aarch64_field fields[] =
222 { 0, 0 }, /* NIL. */
223 { 8, 4 }, /* CRm: in the system instructions. */
224 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
225 { 12, 4 }, /* CRn: in the system instructions. */
226 { 10, 8 }, /* CSSC_imm8. */
227 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
228 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
229 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
230 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
231 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
232 { 22, 1 }, /* N: in logical (immediate) instructions. */
233 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
234 { 10, 5 }, /* Ra: in fp instructions. */
235 { 0, 5 }, /* Rd: in many integer instructions. */
236 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
237 { 5, 5 }, /* Rn: in many integer instructions. */
238 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
239 { 0, 5 }, /* Rt: in load/store instructions. */
240 { 10, 5 }, /* Rt2: in load/store pair instructions. */
241 { 12, 1 }, /* S: in load/store reg offset instructions. */
242 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
243 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
244 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
245 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
246 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
247 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
248 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
249 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
250 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
251 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
252 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
253 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
254 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
255 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
256 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
257 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
258 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
259 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
260 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
261 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
262 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
263 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
264 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
265 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
266 { 12, 2 }, /* SME_size_12: bits [13:12]. */
267 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
268 { 23, 1 }, /* SME_sz_23: bit [23]. */
269 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
270 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
271 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
327 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
328 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
329 { 19, 5 }, /* b40: in the test bit and branch instructions. */
330 { 31, 1 }, /* b5: in the test bit and branch instructions. */
331 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
332 { 12, 4 }, /* cond: condition flags as a source operand. */
333 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
334 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
335 { 21, 2 }, /* hw: in move wide constant instructions. */
336 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
337 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
338 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
339 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
340 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
341 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
342 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
343 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
344 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
345 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
346 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
347 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
348 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
349 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
350 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
351 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
352 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
353 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
354 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
355 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
356 { 0, 4 }, /* imm4_0: in rmif instructions. */
357 { 5, 4 }, /* imm4_5: in SME instructions. */
358 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
359 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
360 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
361 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
362 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
363 { 15, 6 }, /* imm6_15: in rmif instructions. */
364 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
365 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
366 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
367 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
368 { 5, 14 }, /* imm14: in test bit and branch instructions. */
369 { 0, 16 }, /* imm16_0: in udf instruction. */
370 { 5, 16 }, /* imm16_5: in exception instructions. */
371 { 5, 19 }, /* imm19: e.g. in CBZ. */
372 { 0, 26 }, /* imm26: in unconditional branch instructions. */
373 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
374 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
375 { 5, 19 }, /* immhi: e.g. in ADRP. */
376 { 29, 2 }, /* immlo: e.g. in ADRP. */
377 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
378 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
379 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
380 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
381 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
382 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
383 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
384 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
385 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
386 { 19, 2 }, /* op0: in the system instructions. */
387 { 16, 3 }, /* op1: in the system instructions. */
388 { 5, 3 }, /* op2: in the system instructions. */
389 { 22, 2 }, /* opc: in load/store reg offset instructions. */
390 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
391 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
392 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
393 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
394 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
395 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
396 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
397 { 31, 1 }, /* sf: in integer data processing instructions. */
398 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
399 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
400 { 22, 1 }, /* sz: 1-bit element size select. */
401 { 22, 2 }, /* type: floating point type field in fp data inst. */
402 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
403 { 5, 3 }, /* off3: immediate offset used to calculate slice number in a
404 ZA tile. */
405 { 5, 2 }, /* off2: immediate offset used to calculate slice number in
406 a ZA tile. */
407 { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */
408 { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA
409 tile. */
410 { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */
411 { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */
412 { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */
415 enum aarch64_operand_class
416 aarch64_get_operand_class (enum aarch64_opnd type)
418 return aarch64_operands[type].op_class;
421 const char *
422 aarch64_get_operand_name (enum aarch64_opnd type)
424 return aarch64_operands[type].name;
427 /* Get operand description string.
428 This is usually for the diagnosis purpose. */
429 const char *
430 aarch64_get_operand_desc (enum aarch64_opnd type)
432 return aarch64_operands[type].desc;
435 /* Table of all conditional affixes. */
436 const aarch64_cond aarch64_conds[16] =
438 {{"eq", "none"}, 0x0},
439 {{"ne", "any"}, 0x1},
440 {{"cs", "hs", "nlast"}, 0x2},
441 {{"cc", "lo", "ul", "last"}, 0x3},
442 {{"mi", "first"}, 0x4},
443 {{"pl", "nfrst"}, 0x5},
444 {{"vs"}, 0x6},
445 {{"vc"}, 0x7},
446 {{"hi", "pmore"}, 0x8},
447 {{"ls", "plast"}, 0x9},
448 {{"ge", "tcont"}, 0xa},
449 {{"lt", "tstop"}, 0xb},
450 {{"gt"}, 0xc},
451 {{"le"}, 0xd},
452 {{"al"}, 0xe},
453 {{"nv"}, 0xf},
456 const aarch64_cond *
457 get_cond_from_value (aarch64_insn value)
459 assert (value < 16);
460 return &aarch64_conds[(unsigned int) value];
463 const aarch64_cond *
464 get_inverted_cond (const aarch64_cond *cond)
466 return &aarch64_conds[cond->value ^ 0x1];
469 /* Table describing the operand extension/shifting operators; indexed by
470 enum aarch64_modifier_kind.
472 The value column provides the most common values for encoding modifiers,
473 which enables table-driven encoding/decoding for the modifiers. */
474 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
476 {"none", 0x0},
477 {"msl", 0x0},
478 {"ror", 0x3},
479 {"asr", 0x2},
480 {"lsr", 0x1},
481 {"lsl", 0x0},
482 {"uxtb", 0x0},
483 {"uxth", 0x1},
484 {"uxtw", 0x2},
485 {"uxtx", 0x3},
486 {"sxtb", 0x4},
487 {"sxth", 0x5},
488 {"sxtw", 0x6},
489 {"sxtx", 0x7},
490 {"mul", 0x0},
491 {"mul vl", 0x0},
492 {NULL, 0},
495 enum aarch64_modifier_kind
496 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
498 return desc - aarch64_operand_modifiers;
501 aarch64_insn
502 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
504 return aarch64_operand_modifiers[kind].value;
507 enum aarch64_modifier_kind
508 aarch64_get_operand_modifier_from_value (aarch64_insn value,
509 bool extend_p)
511 if (extend_p)
512 return AARCH64_MOD_UXTB + value;
513 else
514 return AARCH64_MOD_LSL - value;
517 bool
518 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
520 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
523 static inline bool
524 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
526 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
529 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
531 { "#0x00", 0x0 },
532 { "oshld", 0x1 },
533 { "oshst", 0x2 },
534 { "osh", 0x3 },
535 { "#0x04", 0x4 },
536 { "nshld", 0x5 },
537 { "nshst", 0x6 },
538 { "nsh", 0x7 },
539 { "#0x08", 0x8 },
540 { "ishld", 0x9 },
541 { "ishst", 0xa },
542 { "ish", 0xb },
543 { "#0x0c", 0xc },
544 { "ld", 0xd },
545 { "st", 0xe },
546 { "sy", 0xf },
549 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
550 { /* CRm<3:2> #imm */
551 { "oshnxs", 16 }, /* 00 16 */
552 { "nshnxs", 20 }, /* 01 20 */
553 { "ishnxs", 24 }, /* 10 24 */
554 { "synxs", 28 }, /* 11 28 */
557 /* Table describing the operands supported by the aliases of the HINT
558 instruction.
560 The name column is the operand that is accepted for the alias. The value
561 column is the hint number of the alias. The list of operands is terminated
562 by NULL in the name column. */
564 const struct aarch64_name_value_pair aarch64_hint_options[] =
566 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
567 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
568 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
569 { "dsync", HINT_OPD_DSYNC }, /* GCSB DSYNC. */
570 { "c", HINT_OPD_C }, /* BTI C. */
571 { "j", HINT_OPD_J }, /* BTI J. */
572 { "jc", HINT_OPD_JC }, /* BTI JC. */
573 { NULL, HINT_OPD_NULL },
576 /* op -> op: load = 0 instruction = 1 store = 2
577 l -> level: 1-3
578 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
579 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
580 const struct aarch64_name_value_pair aarch64_prfops[32] =
582 { "pldl1keep", B(0, 1, 0) },
583 { "pldl1strm", B(0, 1, 1) },
584 { "pldl2keep", B(0, 2, 0) },
585 { "pldl2strm", B(0, 2, 1) },
586 { "pldl3keep", B(0, 3, 0) },
587 { "pldl3strm", B(0, 3, 1) },
588 { "pldslckeep", B(0, 4, 0) },
589 { "pldslcstrm", B(0, 4, 1) },
590 { "plil1keep", B(1, 1, 0) },
591 { "plil1strm", B(1, 1, 1) },
592 { "plil2keep", B(1, 2, 0) },
593 { "plil2strm", B(1, 2, 1) },
594 { "plil3keep", B(1, 3, 0) },
595 { "plil3strm", B(1, 3, 1) },
596 { "plislckeep", B(1, 4, 0) },
597 { "plislcstrm", B(1, 4, 1) },
598 { "pstl1keep", B(2, 1, 0) },
599 { "pstl1strm", B(2, 1, 1) },
600 { "pstl2keep", B(2, 2, 0) },
601 { "pstl2strm", B(2, 2, 1) },
602 { "pstl3keep", B(2, 3, 0) },
603 { "pstl3strm", B(2, 3, 1) },
604 { "pstslckeep", B(2, 4, 0) },
605 { "pstslcstrm", B(2, 4, 1) },
606 { NULL, 0x18 },
607 { NULL, 0x19 },
608 { NULL, 0x1a },
609 { NULL, 0x1b },
610 { NULL, 0x1c },
611 { NULL, 0x1d },
612 { NULL, 0x1e },
613 { NULL, 0x1f },
615 #undef B
617 /* Utilities on value constraint. */
619 static inline int
620 value_in_range_p (int64_t value, int low, int high)
622 return (value >= low && value <= high) ? 1 : 0;
625 /* Return true if VALUE is a multiple of ALIGN. */
626 static inline int
627 value_aligned_p (int64_t value, int align)
629 return (value % align) == 0;
632 /* A signed value fits in a field. */
633 static inline int
634 value_fit_signed_field_p (int64_t value, unsigned width)
636 assert (width < 32);
637 if (width < sizeof (value) * 8)
639 int64_t lim = (uint64_t) 1 << (width - 1);
640 if (value >= -lim && value < lim)
641 return 1;
643 return 0;
646 /* An unsigned value fits in a field. */
647 static inline int
648 value_fit_unsigned_field_p (int64_t value, unsigned width)
650 assert (width < 32);
651 if (width < sizeof (value) * 8)
653 int64_t lim = (uint64_t) 1 << width;
654 if (value >= 0 && value < lim)
655 return 1;
657 return 0;
660 /* Return 1 if OPERAND is SP or WSP. */
662 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
664 return ((aarch64_get_operand_class (operand->type)
665 == AARCH64_OPND_CLASS_INT_REG)
666 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
667 && operand->reg.regno == 31);
670 /* Return 1 if OPERAND is XZR or WZP. */
672 aarch64_zero_register_p (const aarch64_opnd_info *operand)
674 return ((aarch64_get_operand_class (operand->type)
675 == AARCH64_OPND_CLASS_INT_REG)
676 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
677 && operand->reg.regno == 31);
680 /* Return true if the operand *OPERAND that has the operand code
681 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
682 qualified by the qualifier TARGET. */
684 static inline int
685 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
686 aarch64_opnd_qualifier_t target)
688 switch (operand->qualifier)
690 case AARCH64_OPND_QLF_W:
691 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
692 return 1;
693 break;
694 case AARCH64_OPND_QLF_X:
695 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
696 return 1;
697 break;
698 case AARCH64_OPND_QLF_WSP:
699 if (target == AARCH64_OPND_QLF_W
700 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
701 return 1;
702 break;
703 case AARCH64_OPND_QLF_SP:
704 if (target == AARCH64_OPND_QLF_X
705 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
706 return 1;
707 break;
708 default:
709 break;
712 return 0;
715 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
716 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
718 Return NIL if more than one expected qualifiers are found. */
720 aarch64_opnd_qualifier_t
721 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
722 int idx,
723 const aarch64_opnd_qualifier_t known_qlf,
724 int known_idx)
726 int i, saved_i;
728 /* Special case.
730 When the known qualifier is NIL, we have to assume that there is only
731 one qualifier sequence in the *QSEQ_LIST and return the corresponding
732 qualifier directly. One scenario is that for instruction
733 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
734 which has only one possible valid qualifier sequence
735 NIL, S_D
736 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
737 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
739 Because the qualifier NIL has dual roles in the qualifier sequence:
740 it can mean no qualifier for the operand, or the qualifer sequence is
741 not in use (when all qualifiers in the sequence are NILs), we have to
742 handle this special case here. */
743 if (known_qlf == AARCH64_OPND_NIL)
745 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
746 return qseq_list[0][idx];
749 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
751 if (qseq_list[i][known_idx] == known_qlf)
753 if (saved_i != -1)
754 /* More than one sequences are found to have KNOWN_QLF at
755 KNOWN_IDX. */
756 return AARCH64_OPND_NIL;
757 saved_i = i;
761 return qseq_list[saved_i][idx];
764 enum operand_qualifier_kind
766 OQK_NIL,
767 OQK_OPD_VARIANT,
768 OQK_VALUE_IN_RANGE,
769 OQK_MISC,
772 /* Operand qualifier description. */
773 struct operand_qualifier_data
775 /* The usage of the three data fields depends on the qualifier kind. */
776 int data0;
777 int data1;
778 int data2;
779 /* Description. */
780 const char *desc;
781 /* Kind. */
782 enum operand_qualifier_kind kind;
785 /* Indexed by the operand qualifier enumerators. */
786 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
788 {0, 0, 0, "NIL", OQK_NIL},
790 /* Operand variant qualifiers.
791 First 3 fields:
792 element size, number of elements and common value for encoding. */
794 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
795 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
796 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
797 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
799 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
800 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
801 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
802 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
803 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
804 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
805 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
807 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
808 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
809 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
810 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
811 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
812 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
813 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
814 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
815 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
816 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
817 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
819 {0, 0, 0, "z", OQK_OPD_VARIANT},
820 {0, 0, 0, "m", OQK_OPD_VARIANT},
822 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
823 {16, 0, 0, "tag", OQK_OPD_VARIANT},
825 /* Qualifiers constraining the value range.
826 First 3 fields:
827 Lower bound, higher bound, unused. */
829 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
830 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
831 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
832 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
833 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
834 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
835 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
837 /* Qualifiers for miscellaneous purpose.
838 First 3 fields:
839 unused, unused and unused. */
841 {0, 0, 0, "lsl", 0},
842 {0, 0, 0, "msl", 0},
844 {0, 0, 0, "retrieving", 0},
847 static inline bool
848 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
850 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
853 static inline bool
854 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
856 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
859 const char*
860 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
862 return aarch64_opnd_qualifiers[qualifier].desc;
865 /* Given an operand qualifier, return the expected data element size
866 of a qualified operand. */
867 unsigned char
868 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
870 assert (operand_variant_qualifier_p (qualifier));
871 return aarch64_opnd_qualifiers[qualifier].data0;
874 unsigned char
875 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
877 assert (operand_variant_qualifier_p (qualifier));
878 return aarch64_opnd_qualifiers[qualifier].data1;
881 aarch64_insn
882 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
884 assert (operand_variant_qualifier_p (qualifier));
885 return aarch64_opnd_qualifiers[qualifier].data2;
888 static int
889 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
891 assert (qualifier_value_in_range_constraint_p (qualifier));
892 return aarch64_opnd_qualifiers[qualifier].data0;
895 static int
896 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
898 assert (qualifier_value_in_range_constraint_p (qualifier));
899 return aarch64_opnd_qualifiers[qualifier].data1;
902 #ifdef DEBUG_AARCH64
903 void
904 aarch64_verbose (const char *str, ...)
906 va_list ap;
907 va_start (ap, str);
908 printf ("#### ");
909 vprintf (str, ap);
910 printf ("\n");
911 va_end (ap);
914 static inline void
915 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
917 int i;
918 printf ("#### \t");
919 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
920 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
921 printf ("\n");
924 static void
925 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
926 const aarch64_opnd_qualifier_t *qualifier)
928 int i;
929 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
931 aarch64_verbose ("dump_match_qualifiers:");
932 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
933 curr[i] = opnd[i].qualifier;
934 dump_qualifier_sequence (curr);
935 aarch64_verbose ("against");
936 dump_qualifier_sequence (qualifier);
938 #endif /* DEBUG_AARCH64 */
940 /* This function checks if the given instruction INSN is a destructive
941 instruction based on the usage of the registers. It does not recognize
942 unary destructive instructions. */
943 bool
944 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
946 int i = 0;
947 const enum aarch64_opnd *opnds = opcode->operands;
949 if (opnds[0] == AARCH64_OPND_NIL)
950 return false;
952 while (opnds[++i] != AARCH64_OPND_NIL)
953 if (opnds[i] == opnds[0])
954 return true;
956 return false;
959 /* TODO improve this, we can have an extra field at the runtime to
960 store the number of operands rather than calculating it every time. */
963 aarch64_num_of_operands (const aarch64_opcode *opcode)
965 int i = 0;
966 const enum aarch64_opnd *opnds = opcode->operands;
967 while (opnds[i++] != AARCH64_OPND_NIL)
969 --i;
970 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
971 return i;
974 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
975 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
977 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
978 This is always 0 if the function succeeds.
980 N.B. on the entry, it is very likely that only some operands in *INST
981 have had their qualifiers been established.
983 If STOP_AT is not -1, the function will only try to match
984 the qualifier sequence for operands before and including the operand
985 of index STOP_AT; and on success *RET will only be filled with the first
986 (STOP_AT+1) qualifiers.
988 A couple examples of the matching algorithm:
990 X,W,NIL should match
991 X,W,NIL
993 NIL,NIL should match
994 X ,NIL
996 Apart from serving the main encoding routine, this can also be called
997 during or after the operand decoding. */
1000 aarch64_find_best_match (const aarch64_inst *inst,
1001 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
1002 int stop_at, aarch64_opnd_qualifier_t *ret,
1003 int *invalid_count)
1005 int i, num_opnds, invalid, min_invalid;
1006 const aarch64_opnd_qualifier_t *qualifiers;
1008 num_opnds = aarch64_num_of_operands (inst->opcode);
1009 if (num_opnds == 0)
1011 DEBUG_TRACE ("SUCCEED: no operand");
1012 *invalid_count = 0;
1013 return 1;
1016 if (stop_at < 0 || stop_at >= num_opnds)
1017 stop_at = num_opnds - 1;
1019 /* For each pattern. */
1020 min_invalid = num_opnds;
1021 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
1023 int j;
1024 qualifiers = *qualifiers_list;
1026 /* Start as positive. */
1027 invalid = 0;
1029 DEBUG_TRACE ("%d", i);
1030 #ifdef DEBUG_AARCH64
1031 if (debug_dump)
1032 dump_match_qualifiers (inst->operands, qualifiers);
1033 #endif
1035 /* The first entry should be taken literally, even if it's an empty
1036 qualifier sequence. (This matters for strict testing.) In other
1037 positions an empty sequence acts as a terminator. */
1038 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1039 break;
1041 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1043 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1044 && !(inst->opcode->flags & F_STRICT))
1046 /* Either the operand does not have qualifier, or the qualifier
1047 for the operand needs to be deduced from the qualifier
1048 sequence.
1049 In the latter case, any constraint checking related with
1050 the obtained qualifier should be done later in
1051 operand_general_constraint_met_p. */
1052 continue;
1054 else if (*qualifiers != inst->operands[j].qualifier)
1056 /* Unless the target qualifier can also qualify the operand
1057 (which has already had a non-nil qualifier), non-equal
1058 qualifiers are generally un-matched. */
1059 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1060 continue;
1061 else
1062 invalid += 1;
1064 else
1065 continue; /* Equal qualifiers are certainly matched. */
1068 if (min_invalid > invalid)
1069 min_invalid = invalid;
1071 /* Qualifiers established. */
1072 if (min_invalid == 0)
1073 break;
1076 *invalid_count = min_invalid;
1077 if (min_invalid == 0)
1079 /* Fill the result in *RET. */
1080 int j;
1081 qualifiers = *qualifiers_list;
1083 DEBUG_TRACE ("complete qualifiers using list %d", i);
1084 #ifdef DEBUG_AARCH64
1085 if (debug_dump)
1086 dump_qualifier_sequence (qualifiers);
1087 #endif
1089 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1090 ret[j] = *qualifiers;
1091 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1092 ret[j] = AARCH64_OPND_QLF_NIL;
1094 DEBUG_TRACE ("SUCCESS");
1095 return 1;
1098 DEBUG_TRACE ("FAIL");
1099 return 0;
1102 /* Operand qualifier matching and resolving.
1104 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1105 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1107 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1108 This is always 0 if the function succeeds.
1110 if UPDATE_P, update the qualifier(s) in *INST after the matching
1111 succeeds. */
1113 static int
1114 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1115 int *invalid_count)
1117 int i;
1118 aarch64_opnd_qualifier_seq_t qualifiers;
1120 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1121 qualifiers, invalid_count))
1123 DEBUG_TRACE ("matching FAIL");
1124 return 0;
1127 /* Update the qualifiers. */
1128 if (update_p)
1129 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1131 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1132 break;
1133 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1134 "update %s with %s for operand %d",
1135 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1136 aarch64_get_qualifier_name (qualifiers[i]), i);
1137 inst->operands[i].qualifier = qualifiers[i];
1140 DEBUG_TRACE ("matching SUCCESS");
1141 return 1;
1144 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1145 register by MOVZ.
1147 IS32 indicates whether value is a 32-bit immediate or not.
1148 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1149 amount will be returned in *SHIFT_AMOUNT. */
1151 bool
1152 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1154 int amount;
1156 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1158 if (is32)
1160 /* Allow all zeros or all ones in top 32-bits, so that
1161 32-bit constant expressions like ~0x80000000 are
1162 permitted. */
1163 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1164 /* Immediate out of range. */
1165 return false;
1166 value &= 0xffffffff;
1169 /* first, try movz then movn */
1170 amount = -1;
1171 if ((value & ((uint64_t) 0xffff << 0)) == value)
1172 amount = 0;
1173 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1174 amount = 16;
1175 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1176 amount = 32;
1177 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1178 amount = 48;
1180 if (amount == -1)
1182 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1183 return false;
1186 if (shift_amount != NULL)
1187 *shift_amount = amount;
1189 DEBUG_TRACE ("exit true with amount %d", amount);
1191 return true;
1194 /* Build the accepted values for immediate logical SIMD instructions.
1196 The standard encodings of the immediate value are:
1197 N imms immr SIMD size R S
1198 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1199 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1200 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1201 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1202 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1203 0 11110s 00000r 2 UInt(r) UInt(s)
1204 where all-ones value of S is reserved.
1206 Let's call E the SIMD size.
1208 The immediate value is: S+1 bits '1' rotated to the right by R.
1210 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1211 (remember S != E - 1). */
1213 #define TOTAL_IMM_NB 5334
1215 typedef struct
1217 uint64_t imm;
1218 aarch64_insn encoding;
1219 } simd_imm_encoding;
1221 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1223 static int
1224 simd_imm_encoding_cmp(const void *i1, const void *i2)
1226 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1227 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1229 if (imm1->imm < imm2->imm)
1230 return -1;
1231 if (imm1->imm > imm2->imm)
1232 return +1;
1233 return 0;
1236 /* immediate bitfield standard encoding
1237 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1238 1 ssssss rrrrrr 64 rrrrrr ssssss
1239 0 0sssss 0rrrrr 32 rrrrr sssss
1240 0 10ssss 00rrrr 16 rrrr ssss
1241 0 110sss 000rrr 8 rrr sss
1242 0 1110ss 0000rr 4 rr ss
1243 0 11110s 00000r 2 r s */
1244 static inline int
1245 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1247 return (is64 << 12) | (r << 6) | s;
1250 static void
1251 build_immediate_table (void)
1253 uint32_t log_e, e, s, r, s_mask;
1254 uint64_t mask, imm;
1255 int nb_imms;
1256 int is64;
1258 nb_imms = 0;
1259 for (log_e = 1; log_e <= 6; log_e++)
1261 /* Get element size. */
1262 e = 1u << log_e;
1263 if (log_e == 6)
1265 is64 = 1;
1266 mask = 0xffffffffffffffffull;
1267 s_mask = 0;
1269 else
1271 is64 = 0;
1272 mask = (1ull << e) - 1;
1273 /* log_e s_mask
1274 1 ((1 << 4) - 1) << 2 = 111100
1275 2 ((1 << 3) - 1) << 3 = 111000
1276 3 ((1 << 2) - 1) << 4 = 110000
1277 4 ((1 << 1) - 1) << 5 = 100000
1278 5 ((1 << 0) - 1) << 6 = 000000 */
1279 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1281 for (s = 0; s < e - 1; s++)
1282 for (r = 0; r < e; r++)
1284 /* s+1 consecutive bits to 1 (s < 63) */
1285 imm = (1ull << (s + 1)) - 1;
1286 /* rotate right by r */
1287 if (r != 0)
1288 imm = (imm >> r) | ((imm << (e - r)) & mask);
1289 /* replicate the constant depending on SIMD size */
1290 switch (log_e)
1292 case 1: imm = (imm << 2) | imm;
1293 /* Fall through. */
1294 case 2: imm = (imm << 4) | imm;
1295 /* Fall through. */
1296 case 3: imm = (imm << 8) | imm;
1297 /* Fall through. */
1298 case 4: imm = (imm << 16) | imm;
1299 /* Fall through. */
1300 case 5: imm = (imm << 32) | imm;
1301 /* Fall through. */
1302 case 6: break;
1303 default: abort ();
1305 simd_immediates[nb_imms].imm = imm;
1306 simd_immediates[nb_imms].encoding =
1307 encode_immediate_bitfield(is64, s | s_mask, r);
1308 nb_imms++;
1311 assert (nb_imms == TOTAL_IMM_NB);
1312 qsort(simd_immediates, nb_imms,
1313 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1316 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1317 be accepted by logical (immediate) instructions
1318 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1320 ESIZE is the number of bytes in the decoded immediate value.
1321 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1322 VALUE will be returned in *ENCODING. */
1324 bool
1325 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1327 simd_imm_encoding imm_enc;
1328 const simd_imm_encoding *imm_encoding;
1329 static bool initialized = false;
1330 uint64_t upper;
1331 int i;
1333 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1334 value, esize);
1336 if (!initialized)
1338 build_immediate_table ();
1339 initialized = true;
1342 /* Allow all zeros or all ones in top bits, so that
1343 constant expressions like ~1 are permitted. */
1344 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1345 if ((value & ~upper) != value && (value | upper) != value)
1346 return false;
1348 /* Replicate to a full 64-bit value. */
1349 value &= ~upper;
1350 for (i = esize * 8; i < 64; i *= 2)
1351 value |= (value << i);
1353 imm_enc.imm = value;
1354 imm_encoding = (const simd_imm_encoding *)
1355 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1356 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1357 if (imm_encoding == NULL)
1359 DEBUG_TRACE ("exit with false");
1360 return false;
1362 if (encoding != NULL)
1363 *encoding = imm_encoding->encoding;
1364 DEBUG_TRACE ("exit with true");
1365 return true;
1368 /* If 64-bit immediate IMM is in the format of
1369 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1370 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1371 of value "abcdefgh". Otherwise return -1. */
1373 aarch64_shrink_expanded_imm8 (uint64_t imm)
1375 int i, ret;
1376 uint32_t byte;
1378 ret = 0;
1379 for (i = 0; i < 8; i++)
1381 byte = (imm >> (8 * i)) & 0xff;
1382 if (byte == 0xff)
1383 ret |= 1 << i;
1384 else if (byte != 0x00)
1385 return -1;
1387 return ret;
1390 /* Utility inline functions for operand_general_constraint_met_p. */
1392 static inline void
1393 set_error (aarch64_operand_error *mismatch_detail,
1394 enum aarch64_operand_error_kind kind, int idx,
1395 const char* error)
1397 if (mismatch_detail == NULL)
1398 return;
1399 mismatch_detail->kind = kind;
1400 mismatch_detail->index = idx;
1401 mismatch_detail->error = error;
1404 static inline void
1405 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1406 const char* error)
1408 if (mismatch_detail == NULL)
1409 return;
1410 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1413 static inline void
1414 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1415 const char *prefix, int lower_bound, int upper_bound)
1417 if (mismatch_detail == NULL)
1418 return;
1419 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1420 mismatch_detail->data[0].s = prefix;
1421 mismatch_detail->data[1].i = lower_bound;
1422 mismatch_detail->data[2].i = upper_bound;
1425 static inline void
1426 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1427 int idx, int lower_bound, int upper_bound,
1428 const char* error)
1430 if (mismatch_detail == NULL)
1431 return;
1432 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1433 mismatch_detail->data[0].i = lower_bound;
1434 mismatch_detail->data[1].i = upper_bound;
1437 static inline void
1438 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1439 int idx, int lower_bound, int upper_bound)
1441 if (mismatch_detail == NULL)
1442 return;
1443 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1444 _("immediate value"));
1447 static inline void
1448 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1449 int idx, int lower_bound, int upper_bound)
1451 if (mismatch_detail == NULL)
1452 return;
1453 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1454 _("immediate offset"));
1457 static inline void
1458 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1459 int idx, int lower_bound, int upper_bound)
1461 if (mismatch_detail == NULL)
1462 return;
1463 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1464 _("register number"));
1467 static inline void
1468 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1469 int idx, int lower_bound, int upper_bound)
1471 if (mismatch_detail == NULL)
1472 return;
1473 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1474 _("register element index"));
1477 static inline void
1478 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1479 int idx, int lower_bound, int upper_bound)
1481 if (mismatch_detail == NULL)
1482 return;
1483 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1484 _("shift amount"));
1487 /* Report that the MUL modifier in operand IDX should be in the range
1488 [LOWER_BOUND, UPPER_BOUND]. */
1489 static inline void
1490 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1491 int idx, int lower_bound, int upper_bound)
1493 if (mismatch_detail == NULL)
1494 return;
1495 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1496 _("multiplier"));
1499 static inline void
1500 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1501 int alignment)
1503 if (mismatch_detail == NULL)
1504 return;
1505 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1506 mismatch_detail->data[0].i = alignment;
1509 static inline void
1510 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1511 int expected_num)
1513 if (mismatch_detail == NULL)
1514 return;
1515 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1516 mismatch_detail->data[0].i = 1 << expected_num;
1519 static inline void
1520 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1521 int expected_num)
1523 if (mismatch_detail == NULL)
1524 return;
1525 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1526 mismatch_detail->data[0].i = 1 << expected_num;
1529 static inline void
1530 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1531 int idx, int expected)
1533 if (mismatch_detail == NULL)
1534 return;
1535 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1536 mismatch_detail->data[0].i = expected;
1539 static inline void
1540 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1541 const char* error)
1543 if (mismatch_detail == NULL)
1544 return;
1545 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1548 /* Check that indexed register operand OPND has a register in the range
1549 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1550 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1552 static bool
1553 check_reglane (const aarch64_opnd_info *opnd,
1554 aarch64_operand_error *mismatch_detail, int idx,
1555 const char *prefix, int min_regno, int max_regno,
1556 int min_index, int max_index)
1558 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1560 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1561 max_regno);
1562 return false;
1564 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1566 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1567 max_index);
1568 return false;
1570 return true;
1573 /* Check that register list operand OPND has NUM_REGS registers and a
1574 register stride of STRIDE. */
1576 static bool
1577 check_reglist (const aarch64_opnd_info *opnd,
1578 aarch64_operand_error *mismatch_detail, int idx,
1579 int num_regs, int stride)
1581 if (opnd->reglist.num_regs != num_regs)
1583 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1584 return false;
1586 if (opnd->reglist.stride != stride)
1588 set_reg_list_stride_error (mismatch_detail, idx, stride);
1589 return false;
1591 return true;
1594 /* Check that indexed ZA operand OPND has:
1596 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1598 - RANGE_SIZE consecutive immediate offsets.
1600 - an initial immediate offset that is a multiple of RANGE_SIZE
1601 in the range [0, MAX_VALUE * RANGE_SIZE]
1603 - a vector group size of GROUP_SIZE. */
1605 static bool
1606 check_za_access (const aarch64_opnd_info *opnd,
1607 aarch64_operand_error *mismatch_detail, int idx,
1608 int min_wreg, int max_value, unsigned int range_size,
1609 int group_size)
1611 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1613 if (min_wreg == 12)
1614 set_other_error (mismatch_detail, idx,
1615 _("expected a selection register in the"
1616 " range w12-w15"));
1617 else if (min_wreg == 8)
1618 set_other_error (mismatch_detail, idx,
1619 _("expected a selection register in the"
1620 " range w8-w11"));
1621 else
1622 abort ();
1623 return false;
1626 int max_index = max_value * range_size;
1627 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1629 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1630 return false;
1633 if ((opnd->indexed_za.index.imm % range_size) != 0)
1635 assert (range_size == 2 || range_size == 4);
1636 set_other_error (mismatch_detail, idx,
1637 range_size == 2
1638 ? _("starting offset is not a multiple of 2")
1639 : _("starting offset is not a multiple of 4"));
1640 return false;
1643 if (opnd->indexed_za.index.countm1 != range_size - 1)
1645 if (range_size == 1)
1646 set_other_error (mismatch_detail, idx,
1647 _("expected a single offset rather than"
1648 " a range"));
1649 else if (range_size == 2)
1650 set_other_error (mismatch_detail, idx,
1651 _("expected a range of two offsets"));
1652 else if (range_size == 4)
1653 set_other_error (mismatch_detail, idx,
1654 _("expected a range of four offsets"));
1655 else
1656 abort ();
1657 return false;
1660 /* The vector group specifier is optional in assembly code. */
1661 if (opnd->indexed_za.group_size != 0
1662 && opnd->indexed_za.group_size != group_size)
1664 set_invalid_vg_size (mismatch_detail, idx, group_size);
1665 return false;
1668 return true;
1671 /* General constraint checking based on operand code.
1673 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1674 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1676 This function has to be called after the qualifiers for all operands
1677 have been resolved.
1679 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1680 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1681 of error message during the disassembling where error message is not
1682 wanted. We avoid the dynamic construction of strings of error messages
1683 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1684 use a combination of error code, static string and some integer data to
1685 represent an error. */
1687 static int
1688 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1689 enum aarch64_opnd type,
1690 const aarch64_opcode *opcode,
1691 aarch64_operand_error *mismatch_detail)
1693 unsigned num, modifiers, shift;
1694 unsigned char size;
1695 int64_t imm, min_value, max_value;
1696 uint64_t uvalue, mask;
1697 const aarch64_opnd_info *opnd = opnds + idx;
1698 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1699 int i;
1701 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1703 switch (aarch64_operands[type].op_class)
1705 case AARCH64_OPND_CLASS_INT_REG:
1706 /* Check for pair of xzr registers. */
1707 if (type == AARCH64_OPND_PAIRREG_OR_XZR
1708 && opnds[idx - 1].reg.regno == 0x1f)
1710 if (opnds[idx].reg.regno != 0x1f)
1712 set_syntax_error (mismatch_detail, idx - 1,
1713 _("second reg in pair should be xzr if first is"
1714 " xzr"));
1715 return 0;
1718 /* Check pair reg constraints for instructions taking a pair of
1719 consecutively-numbered general-purpose registers. */
1720 else if (type == AARCH64_OPND_PAIRREG
1721 || type == AARCH64_OPND_PAIRREG_OR_XZR)
1723 assert (idx == 1 || idx == 2 || idx == 3 || idx == 5);
1724 if (opnds[idx - 1].reg.regno % 2 != 0)
1726 set_syntax_error (mismatch_detail, idx - 1,
1727 _("reg pair must start from even reg"));
1728 return 0;
1730 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1732 set_syntax_error (mismatch_detail, idx,
1733 _("reg pair must be contiguous"));
1734 return 0;
1736 break;
1739 /* <Xt> may be optional in some IC and TLBI instructions. */
1740 if (type == AARCH64_OPND_Rt_SYS)
1742 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1743 == AARCH64_OPND_CLASS_SYSTEM));
1744 if (opnds[1].present
1745 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1747 set_other_error (mismatch_detail, idx, _("extraneous register"));
1748 return 0;
1750 if (!opnds[1].present
1751 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1753 set_other_error (mismatch_detail, idx, _("missing register"));
1754 return 0;
1757 switch (qualifier)
1759 case AARCH64_OPND_QLF_WSP:
1760 case AARCH64_OPND_QLF_SP:
1761 if (!aarch64_stack_pointer_p (opnd))
1763 set_other_error (mismatch_detail, idx,
1764 _("stack pointer register expected"));
1765 return 0;
1767 break;
1768 default:
1769 break;
1771 break;
1773 case AARCH64_OPND_CLASS_SVE_REG:
1774 switch (type)
1776 case AARCH64_OPND_SVE_Zm3_INDEX:
1777 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1778 case AARCH64_OPND_SVE_Zm3_19_INDEX:
1779 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1780 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1781 case AARCH64_OPND_SVE_Zm4_INDEX:
1782 size = get_operand_fields_width (get_operand_from_code (type));
1783 shift = get_operand_specific_data (&aarch64_operands[type]);
1784 if (!check_reglane (opnd, mismatch_detail, idx,
1785 "z", 0, (1 << shift) - 1,
1786 0, (1u << (size - shift)) - 1))
1787 return 0;
1788 break;
1790 case AARCH64_OPND_SVE_Zn_INDEX:
1791 size = aarch64_get_qualifier_esize (opnd->qualifier);
1792 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1793 0, 64 / size - 1))
1794 return 0;
1795 break;
1797 case AARCH64_OPND_SVE_Zm_imm4:
1798 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31, 0, 15))
1799 return 0;
1800 break;
1802 case AARCH64_OPND_SVE_Zn_5_INDEX:
1803 size = aarch64_get_qualifier_esize (opnd->qualifier);
1804 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1805 0, 16 / size - 1))
1806 return 0;
1807 break;
1809 case AARCH64_OPND_SME_PNn3_INDEX1:
1810 case AARCH64_OPND_SME_PNn3_INDEX2:
1811 size = get_operand_field_width (get_operand_from_code (type), 1);
1812 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1813 0, (1 << size) - 1))
1814 return 0;
1815 break;
1817 case AARCH64_OPND_SME_Zn_INDEX1_16:
1818 case AARCH64_OPND_SME_Zn_INDEX2_15:
1819 case AARCH64_OPND_SME_Zn_INDEX2_16:
1820 case AARCH64_OPND_SME_Zn_INDEX3_14:
1821 case AARCH64_OPND_SME_Zn_INDEX3_15:
1822 case AARCH64_OPND_SME_Zn_INDEX4_14:
1823 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1824 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1825 0, (1 << size) - 1))
1826 return 0;
1827 break;
1829 case AARCH64_OPND_SME_Zm_INDEX1:
1830 case AARCH64_OPND_SME_Zm_INDEX2:
1831 case AARCH64_OPND_SME_Zm_INDEX3_1:
1832 case AARCH64_OPND_SME_Zm_INDEX3_2:
1833 case AARCH64_OPND_SME_Zm_INDEX3_10:
1834 case AARCH64_OPND_SME_Zm_INDEX4_1:
1835 case AARCH64_OPND_SME_Zm_INDEX4_10:
1836 size = get_operand_fields_width (get_operand_from_code (type)) - 4;
1837 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
1838 0, (1 << size) - 1))
1839 return 0;
1840 break;
1842 case AARCH64_OPND_SME_Zm:
1843 if (opnd->reg.regno > 15)
1845 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1846 return 0;
1848 break;
1850 case AARCH64_OPND_SME_PnT_Wm_imm:
1851 size = aarch64_get_qualifier_esize (opnd->qualifier);
1852 max_value = 16 / size - 1;
1853 if (!check_za_access (opnd, mismatch_detail, idx,
1854 12, max_value, 1, 0))
1855 return 0;
1856 break;
1858 default:
1859 break;
1861 break;
1863 case AARCH64_OPND_CLASS_SVE_REGLIST:
1864 switch (type)
1866 case AARCH64_OPND_SME_Pdx2:
1867 case AARCH64_OPND_SME_Zdnx2:
1868 case AARCH64_OPND_SME_Zdnx4:
1869 case AARCH64_OPND_SME_Zmx2:
1870 case AARCH64_OPND_SME_Zmx4:
1871 case AARCH64_OPND_SME_Znx2:
1872 case AARCH64_OPND_SME_Znx4:
1873 case AARCH64_OPND_SME_Zt2:
1874 case AARCH64_OPND_SME_Zt3:
1875 case AARCH64_OPND_SME_Zt4:
1876 num = get_operand_specific_data (&aarch64_operands[type]);
1877 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1878 return 0;
1879 if ((opnd->reglist.first_regno % num) != 0)
1881 set_other_error (mismatch_detail, idx,
1882 _("start register out of range"));
1883 return 0;
1885 break;
1887 case AARCH64_OPND_SME_Ztx2_STRIDED:
1888 case AARCH64_OPND_SME_Ztx4_STRIDED:
1889 /* 2-register lists have a stride of 8 and 4-register lists
1890 have a stride of 4. */
1891 num = get_operand_specific_data (&aarch64_operands[type]);
1892 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1893 return 0;
1894 num = 16 | (opnd->reglist.stride - 1);
1895 if ((opnd->reglist.first_regno & ~num) != 0)
1897 set_other_error (mismatch_detail, idx,
1898 _("start register out of range"));
1899 return 0;
1901 break;
1903 case AARCH64_OPND_SME_PdxN:
1904 case AARCH64_OPND_SVE_ZnxN:
1905 case AARCH64_OPND_SVE_ZtxN:
1906 num = get_opcode_dependent_value (opcode);
1907 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1908 return 0;
1909 break;
1911 default:
1912 abort ();
1914 break;
1916 case AARCH64_OPND_CLASS_ZA_ACCESS:
1917 switch (type)
1919 case AARCH64_OPND_SME_ZA_HV_idx_src:
1920 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1921 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1922 size = aarch64_get_qualifier_esize (opnd->qualifier);
1923 max_value = 16 / size - 1;
1924 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1925 get_opcode_dependent_value (opcode)))
1926 return 0;
1927 break;
1929 case AARCH64_OPND_SME_ZA_array_off4:
1930 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1931 get_opcode_dependent_value (opcode)))
1932 return 0;
1933 break;
1935 case AARCH64_OPND_SME_ZA_array_off3_0:
1936 case AARCH64_OPND_SME_ZA_array_off3_5:
1937 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1938 get_opcode_dependent_value (opcode)))
1939 return 0;
1940 break;
1942 case AARCH64_OPND_SME_ZA_array_off1x4:
1943 if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
1944 get_opcode_dependent_value (opcode)))
1945 return 0;
1946 break;
1948 case AARCH64_OPND_SME_ZA_array_off2x2:
1949 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
1950 get_opcode_dependent_value (opcode)))
1951 return 0;
1952 break;
1954 case AARCH64_OPND_SME_ZA_array_off2x4:
1955 if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
1956 get_opcode_dependent_value (opcode)))
1957 return 0;
1958 break;
1960 case AARCH64_OPND_SME_ZA_array_off3x2:
1961 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
1962 get_opcode_dependent_value (opcode)))
1963 return 0;
1964 break;
1966 case AARCH64_OPND_SME_ZA_array_vrsb_1:
1967 if (!check_za_access (opnd, mismatch_detail, idx, 12, 7, 2,
1968 get_opcode_dependent_value (opcode)))
1969 return 0;
1970 break;
1972 case AARCH64_OPND_SME_ZA_array_vrsh_1:
1973 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 2,
1974 get_opcode_dependent_value (opcode)))
1975 return 0;
1976 break;
1978 case AARCH64_OPND_SME_ZA_array_vrss_1:
1979 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 2,
1980 get_opcode_dependent_value (opcode)))
1981 return 0;
1982 break;
1984 case AARCH64_OPND_SME_ZA_array_vrsd_1:
1985 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 2,
1986 get_opcode_dependent_value (opcode)))
1987 return 0;
1988 break;
1990 case AARCH64_OPND_SME_ZA_array_vrsb_2:
1991 if (!check_za_access (opnd, mismatch_detail, idx, 12, 3, 4,
1992 get_opcode_dependent_value (opcode)))
1993 return 0;
1994 break;
1996 case AARCH64_OPND_SME_ZA_array_vrsh_2:
1997 if (!check_za_access (opnd, mismatch_detail, idx, 12, 1, 4,
1998 get_opcode_dependent_value (opcode)))
1999 return 0;
2000 break;
2002 case AARCH64_OPND_SME_ZA_array_vrss_2:
2003 case AARCH64_OPND_SME_ZA_array_vrsd_2:
2004 if (!check_za_access (opnd, mismatch_detail, idx, 12, 0, 4,
2005 get_opcode_dependent_value (opcode)))
2006 return 0;
2007 break;
2009 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
2010 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
2011 size = aarch64_get_qualifier_esize (opnd->qualifier);
2012 num = get_opcode_dependent_value (opcode);
2013 max_value = 16 / num / size;
2014 if (max_value > 0)
2015 max_value -= 1;
2016 if (!check_za_access (opnd, mismatch_detail, idx,
2017 12, max_value, num, 0))
2018 return 0;
2019 break;
2021 default:
2022 abort ();
2024 break;
2026 case AARCH64_OPND_CLASS_PRED_REG:
2027 switch (type)
2029 case AARCH64_OPND_SME_PNd3:
2030 case AARCH64_OPND_SME_PNg3:
2031 if (opnd->reg.regno < 8)
2033 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
2034 return 0;
2036 break;
2038 default:
2039 if (opnd->reg.regno >= 8
2040 && get_operand_fields_width (get_operand_from_code (type)) == 3)
2042 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
2043 return 0;
2045 break;
2047 break;
2049 case AARCH64_OPND_CLASS_COND:
2050 if (type == AARCH64_OPND_COND1
2051 && (opnds[idx].cond->value & 0xe) == 0xe)
2053 /* Not allow AL or NV. */
2054 set_syntax_error (mismatch_detail, idx, NULL);
2056 break;
2058 case AARCH64_OPND_CLASS_ADDRESS:
2059 /* Check writeback. */
2060 switch (opcode->iclass)
2062 case ldst_pos:
2063 case ldst_unscaled:
2064 case ldstnapair_offs:
2065 case ldstpair_off:
2066 case ldst_unpriv:
2067 if (opnd->addr.writeback == 1)
2069 set_syntax_error (mismatch_detail, idx,
2070 _("unexpected address writeback"));
2071 return 0;
2073 break;
2074 case ldst_imm10:
2075 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
2077 set_syntax_error (mismatch_detail, idx,
2078 _("unexpected address writeback"));
2079 return 0;
2081 break;
2082 case ldst_imm9:
2083 case ldstpair_indexed:
2084 case asisdlsep:
2085 case asisdlsop:
2086 if (opnd->addr.writeback == 0)
2088 set_syntax_error (mismatch_detail, idx,
2089 _("address writeback expected"));
2090 return 0;
2092 break;
2093 default:
2094 assert (opnd->addr.writeback == 0);
2095 break;
2097 switch (type)
2099 case AARCH64_OPND_ADDR_SIMM7:
2100 /* Scaled signed 7 bits immediate offset. */
2101 /* Get the size of the data element that is accessed, which may be
2102 different from that of the source register size,
2103 e.g. in strb/ldrb. */
2104 size = aarch64_get_qualifier_esize (opnd->qualifier);
2105 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
2107 set_offset_out_of_range_error (mismatch_detail, idx,
2108 -64 * size, 63 * size);
2109 return 0;
2111 if (!value_aligned_p (opnd->addr.offset.imm, size))
2113 set_unaligned_error (mismatch_detail, idx, size);
2114 return 0;
2116 break;
2117 case AARCH64_OPND_ADDR_OFFSET:
2118 case AARCH64_OPND_ADDR_SIMM9:
2119 /* Unscaled signed 9 bits immediate offset. */
2120 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
2122 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
2123 return 0;
2125 break;
2127 case AARCH64_OPND_ADDR_SIMM9_2:
2128 /* Unscaled signed 9 bits immediate offset, which has to be negative
2129 or unaligned. */
2130 size = aarch64_get_qualifier_esize (qualifier);
2131 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
2132 && !value_aligned_p (opnd->addr.offset.imm, size))
2133 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
2134 return 1;
2135 set_other_error (mismatch_detail, idx,
2136 _("negative or unaligned offset expected"));
2137 return 0;
2139 case AARCH64_OPND_ADDR_SIMM10:
2140 /* Scaled signed 10 bits immediate offset. */
2141 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
2143 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2144 return 0;
2146 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2148 set_unaligned_error (mismatch_detail, idx, 8);
2149 return 0;
2151 break;
2153 case AARCH64_OPND_ADDR_SIMM11:
2154 /* Signed 11 bits immediate offset (multiple of 16). */
2155 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2157 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2158 return 0;
2161 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2163 set_unaligned_error (mismatch_detail, idx, 16);
2164 return 0;
2166 break;
2168 case AARCH64_OPND_ADDR_SIMM13:
2169 /* Signed 13 bits immediate offset (multiple of 16). */
2170 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2172 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2173 return 0;
2176 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2178 set_unaligned_error (mismatch_detail, idx, 16);
2179 return 0;
2181 break;
2183 case AARCH64_OPND_SIMD_ADDR_POST:
2184 /* AdvSIMD load/store multiple structures, post-index. */
2185 assert (idx == 1);
2186 if (opnd->addr.offset.is_reg)
2188 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2189 return 1;
2190 else
2192 set_other_error (mismatch_detail, idx,
2193 _("invalid register offset"));
2194 return 0;
2197 else
2199 const aarch64_opnd_info *prev = &opnds[idx-1];
2200 unsigned num_bytes; /* total number of bytes transferred. */
2201 /* The opcode dependent area stores the number of elements in
2202 each structure to be loaded/stored. */
2203 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2204 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2205 /* Special handling of loading single structure to all lane. */
2206 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2207 * aarch64_get_qualifier_esize (prev->qualifier);
2208 else
2209 num_bytes = prev->reglist.num_regs
2210 * aarch64_get_qualifier_esize (prev->qualifier)
2211 * aarch64_get_qualifier_nelem (prev->qualifier);
2212 if ((int) num_bytes != opnd->addr.offset.imm)
2214 set_other_error (mismatch_detail, idx,
2215 _("invalid post-increment amount"));
2216 return 0;
2219 break;
2221 case AARCH64_OPND_ADDR_REGOFF:
2222 /* Get the size of the data element that is accessed, which may be
2223 different from that of the source register size,
2224 e.g. in strb/ldrb. */
2225 size = aarch64_get_qualifier_esize (opnd->qualifier);
2226 /* It is either no shift or shift by the binary logarithm of SIZE. */
2227 if (opnd->shifter.amount != 0
2228 && opnd->shifter.amount != (int)get_logsz (size))
2230 set_other_error (mismatch_detail, idx,
2231 _("invalid shift amount"));
2232 return 0;
2234 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2235 operators. */
2236 switch (opnd->shifter.kind)
2238 case AARCH64_MOD_UXTW:
2239 case AARCH64_MOD_LSL:
2240 case AARCH64_MOD_SXTW:
2241 case AARCH64_MOD_SXTX: break;
2242 default:
2243 set_other_error (mismatch_detail, idx,
2244 _("invalid extend/shift operator"));
2245 return 0;
2247 break;
2249 case AARCH64_OPND_ADDR_UIMM12:
2250 imm = opnd->addr.offset.imm;
2251 /* Get the size of the data element that is accessed, which may be
2252 different from that of the source register size,
2253 e.g. in strb/ldrb. */
2254 size = aarch64_get_qualifier_esize (qualifier);
2255 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2257 set_offset_out_of_range_error (mismatch_detail, idx,
2258 0, 4095 * size);
2259 return 0;
2261 if (!value_aligned_p (opnd->addr.offset.imm, size))
2263 set_unaligned_error (mismatch_detail, idx, size);
2264 return 0;
2266 break;
2268 case AARCH64_OPND_ADDR_PCREL14:
2269 case AARCH64_OPND_ADDR_PCREL19:
2270 case AARCH64_OPND_ADDR_PCREL21:
2271 case AARCH64_OPND_ADDR_PCREL26:
2272 imm = opnd->imm.value;
2273 if (operand_need_shift_by_two (get_operand_from_code (type)))
2275 /* The offset value in a PC-relative branch instruction is alway
2276 4-byte aligned and is encoded without the lowest 2 bits. */
2277 if (!value_aligned_p (imm, 4))
2279 set_unaligned_error (mismatch_detail, idx, 4);
2280 return 0;
2282 /* Right shift by 2 so that we can carry out the following check
2283 canonically. */
2284 imm >>= 2;
2286 size = get_operand_fields_width (get_operand_from_code (type));
2287 if (!value_fit_signed_field_p (imm, size))
2289 set_other_error (mismatch_detail, idx,
2290 _("immediate out of range"));
2291 return 0;
2293 break;
2295 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2296 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2298 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2299 return 0;
2301 break;
2303 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2304 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2305 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2306 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2307 min_value = -8;
2308 max_value = 7;
2309 sve_imm_offset_vl:
2310 assert (!opnd->addr.offset.is_reg);
2311 assert (opnd->addr.preind);
2312 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2313 min_value *= num;
2314 max_value *= num;
2315 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2316 || (opnd->shifter.operator_present
2317 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2319 set_other_error (mismatch_detail, idx,
2320 _("invalid addressing mode"));
2321 return 0;
2323 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2325 set_offset_out_of_range_error (mismatch_detail, idx,
2326 min_value, max_value);
2327 return 0;
2329 if (!value_aligned_p (opnd->addr.offset.imm, num))
2331 set_unaligned_error (mismatch_detail, idx, num);
2332 return 0;
2334 break;
2336 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2337 min_value = -32;
2338 max_value = 31;
2339 goto sve_imm_offset_vl;
2341 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2342 min_value = -256;
2343 max_value = 255;
2344 goto sve_imm_offset_vl;
2346 case AARCH64_OPND_SVE_ADDR_RI_U6:
2347 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2348 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2349 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2350 min_value = 0;
2351 max_value = 63;
2352 sve_imm_offset:
2353 assert (!opnd->addr.offset.is_reg);
2354 assert (opnd->addr.preind);
2355 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2356 min_value *= num;
2357 max_value *= num;
2358 if (opnd->shifter.operator_present
2359 || opnd->shifter.amount_present)
2361 set_other_error (mismatch_detail, idx,
2362 _("invalid addressing mode"));
2363 return 0;
2365 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2367 set_offset_out_of_range_error (mismatch_detail, idx,
2368 min_value, max_value);
2369 return 0;
2371 if (!value_aligned_p (opnd->addr.offset.imm, num))
2373 set_unaligned_error (mismatch_detail, idx, num);
2374 return 0;
2376 break;
2378 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2379 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2380 min_value = -8;
2381 max_value = 7;
2382 goto sve_imm_offset;
2384 case AARCH64_OPND_SVE_ADDR_ZX:
2385 /* Everything is already ensured by parse_operands or
2386 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2387 argument type). */
2388 assert (opnd->addr.offset.is_reg);
2389 assert (opnd->addr.preind);
2390 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2391 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2392 assert (opnd->shifter.operator_present == 0);
2393 break;
2395 case AARCH64_OPND_SVE_ADDR_R:
2396 case AARCH64_OPND_SVE_ADDR_RR:
2397 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2398 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2399 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2400 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2401 case AARCH64_OPND_SVE_ADDR_RX:
2402 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2403 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2404 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2405 case AARCH64_OPND_SVE_ADDR_RZ:
2406 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2407 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2408 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2409 modifiers = 1 << AARCH64_MOD_LSL;
2410 sve_rr_operand:
2411 assert (opnd->addr.offset.is_reg);
2412 assert (opnd->addr.preind);
2413 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2414 && opnd->addr.offset.regno == 31)
2416 set_other_error (mismatch_detail, idx,
2417 _("index register xzr is not allowed"));
2418 return 0;
2420 if (((1 << opnd->shifter.kind) & modifiers) == 0
2421 || (opnd->shifter.amount
2422 != get_operand_specific_data (&aarch64_operands[type])))
2424 set_other_error (mismatch_detail, idx,
2425 _("invalid addressing mode"));
2426 return 0;
2428 break;
2430 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2431 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2432 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2433 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2434 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2435 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2436 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2437 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2438 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2439 goto sve_rr_operand;
2441 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2442 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2443 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2444 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2445 min_value = 0;
2446 max_value = 31;
2447 goto sve_imm_offset;
2449 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2450 modifiers = 1 << AARCH64_MOD_LSL;
2451 sve_zz_operand:
2452 assert (opnd->addr.offset.is_reg);
2453 assert (opnd->addr.preind);
2454 if (((1 << opnd->shifter.kind) & modifiers) == 0
2455 || opnd->shifter.amount < 0
2456 || opnd->shifter.amount > 3)
2458 set_other_error (mismatch_detail, idx,
2459 _("invalid addressing mode"));
2460 return 0;
2462 break;
2464 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2465 modifiers = (1 << AARCH64_MOD_SXTW);
2466 goto sve_zz_operand;
2468 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2469 modifiers = 1 << AARCH64_MOD_UXTW;
2470 goto sve_zz_operand;
2472 default:
2473 break;
2475 break;
2477 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2478 if (type == AARCH64_OPND_LEt)
2480 /* Get the upper bound for the element index. */
2481 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2482 if (!value_in_range_p (opnd->reglist.index, 0, num))
2484 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2485 return 0;
2488 /* The opcode dependent area stores the number of elements in
2489 each structure to be loaded/stored. */
2490 num = get_opcode_dependent_value (opcode);
2491 switch (type)
2493 case AARCH64_OPND_LVt:
2494 assert (num >= 1 && num <= 4);
2495 /* Unless LD1/ST1, the number of registers should be equal to that
2496 of the structure elements. */
2497 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2498 return 0;
2499 break;
2500 case AARCH64_OPND_LVt_AL:
2501 case AARCH64_OPND_LEt:
2502 assert (num >= 1 && num <= 4);
2503 /* The number of registers should be equal to that of the structure
2504 elements. */
2505 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2506 return 0;
2507 break;
2508 default:
2509 break;
2511 if (opnd->reglist.stride != 1)
2513 set_reg_list_stride_error (mismatch_detail, idx, 1);
2514 return 0;
2516 break;
2518 case AARCH64_OPND_CLASS_IMMEDIATE:
2519 /* Constraint check on immediate operand. */
2520 imm = opnd->imm.value;
2521 /* E.g. imm_0_31 constrains value to be 0..31. */
2522 if (qualifier_value_in_range_constraint_p (qualifier)
2523 && !value_in_range_p (imm, get_lower_bound (qualifier),
2524 get_upper_bound (qualifier)))
2526 set_imm_out_of_range_error (mismatch_detail, idx,
2527 get_lower_bound (qualifier),
2528 get_upper_bound (qualifier));
2529 return 0;
2532 switch (type)
2534 case AARCH64_OPND_AIMM:
2535 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2537 set_other_error (mismatch_detail, idx,
2538 _("invalid shift operator"));
2539 return 0;
2541 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2543 set_other_error (mismatch_detail, idx,
2544 _("shift amount must be 0 or 12"));
2545 return 0;
2547 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2549 set_other_error (mismatch_detail, idx,
2550 _("immediate out of range"));
2551 return 0;
2553 break;
2555 case AARCH64_OPND_HALF:
2556 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2557 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2559 set_other_error (mismatch_detail, idx,
2560 _("invalid shift operator"));
2561 return 0;
2563 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2564 if (!value_aligned_p (opnd->shifter.amount, 16))
2566 set_other_error (mismatch_detail, idx,
2567 _("shift amount must be a multiple of 16"));
2568 return 0;
2570 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2572 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2573 0, size * 8 - 16);
2574 return 0;
2576 if (opnd->imm.value < 0)
2578 set_other_error (mismatch_detail, idx,
2579 _("negative immediate value not allowed"));
2580 return 0;
2582 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2584 set_other_error (mismatch_detail, idx,
2585 _("immediate out of range"));
2586 return 0;
2588 break;
2590 case AARCH64_OPND_IMM_MOV:
2592 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2593 imm = opnd->imm.value;
2594 assert (idx == 1);
2595 switch (opcode->op)
2597 case OP_MOV_IMM_WIDEN:
2598 imm = ~imm;
2599 /* Fall through. */
2600 case OP_MOV_IMM_WIDE:
2601 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2603 set_other_error (mismatch_detail, idx,
2604 _("immediate out of range"));
2605 return 0;
2607 break;
2608 case OP_MOV_IMM_LOG:
2609 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2611 set_other_error (mismatch_detail, idx,
2612 _("immediate out of range"));
2613 return 0;
2615 break;
2616 default:
2617 assert (0);
2618 return 0;
2621 break;
2623 case AARCH64_OPND_NZCV:
2624 case AARCH64_OPND_CCMP_IMM:
2625 case AARCH64_OPND_EXCEPTION:
2626 case AARCH64_OPND_UNDEFINED:
2627 case AARCH64_OPND_TME_UIMM16:
2628 case AARCH64_OPND_UIMM4:
2629 case AARCH64_OPND_UIMM4_ADDG:
2630 case AARCH64_OPND_UIMM7:
2631 case AARCH64_OPND_UIMM3_OP1:
2632 case AARCH64_OPND_UIMM3_OP2:
2633 case AARCH64_OPND_SVE_UIMM3:
2634 case AARCH64_OPND_SVE_UIMM7:
2635 case AARCH64_OPND_SVE_UIMM8:
2636 case AARCH64_OPND_SVE_UIMM8_53:
2637 case AARCH64_OPND_CSSC_UIMM8:
2638 size = get_operand_fields_width (get_operand_from_code (type));
2639 assert (size < 32);
2640 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2642 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2643 (1u << size) - 1);
2644 return 0;
2646 break;
2648 case AARCH64_OPND_UIMM10:
2649 /* Scaled unsigned 10 bits immediate offset. */
2650 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2652 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2653 return 0;
2656 if (!value_aligned_p (opnd->imm.value, 16))
2658 set_unaligned_error (mismatch_detail, idx, 16);
2659 return 0;
2661 break;
2663 case AARCH64_OPND_SIMM5:
2664 case AARCH64_OPND_SVE_SIMM5:
2665 case AARCH64_OPND_SVE_SIMM5B:
2666 case AARCH64_OPND_SVE_SIMM6:
2667 case AARCH64_OPND_SVE_SIMM8:
2668 case AARCH64_OPND_CSSC_SIMM8:
2669 size = get_operand_fields_width (get_operand_from_code (type));
2670 assert (size < 32);
2671 if (!value_fit_signed_field_p (opnd->imm.value, size))
2673 set_imm_out_of_range_error (mismatch_detail, idx,
2674 -(1 << (size - 1)),
2675 (1 << (size - 1)) - 1);
2676 return 0;
2678 break;
2680 case AARCH64_OPND_WIDTH:
2681 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2682 && opnds[0].type == AARCH64_OPND_Rd);
2683 size = get_upper_bound (qualifier);
2684 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2685 /* lsb+width <= reg.size */
2687 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2688 size - opnds[idx-1].imm.value);
2689 return 0;
2691 break;
2693 case AARCH64_OPND_LIMM:
2694 case AARCH64_OPND_SVE_LIMM:
2696 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2697 uint64_t uimm = opnd->imm.value;
2698 if (opcode->op == OP_BIC)
2699 uimm = ~uimm;
2700 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2702 set_other_error (mismatch_detail, idx,
2703 _("immediate out of range"));
2704 return 0;
2707 break;
2709 case AARCH64_OPND_IMM0:
2710 case AARCH64_OPND_FPIMM0:
2711 if (opnd->imm.value != 0)
2713 set_other_error (mismatch_detail, idx,
2714 _("immediate zero expected"));
2715 return 0;
2717 break;
2719 case AARCH64_OPND_IMM_ROT1:
2720 case AARCH64_OPND_IMM_ROT2:
2721 case AARCH64_OPND_SVE_IMM_ROT2:
2722 if (opnd->imm.value != 0
2723 && opnd->imm.value != 90
2724 && opnd->imm.value != 180
2725 && opnd->imm.value != 270)
2727 set_other_error (mismatch_detail, idx,
2728 _("rotate expected to be 0, 90, 180 or 270"));
2729 return 0;
2731 break;
2733 case AARCH64_OPND_IMM_ROT3:
2734 case AARCH64_OPND_SVE_IMM_ROT1:
2735 case AARCH64_OPND_SVE_IMM_ROT3:
2736 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2738 set_other_error (mismatch_detail, idx,
2739 _("rotate expected to be 90 or 270"));
2740 return 0;
2742 break;
2744 case AARCH64_OPND_SHLL_IMM:
2745 assert (idx == 2);
2746 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2747 if (opnd->imm.value != size)
2749 set_other_error (mismatch_detail, idx,
2750 _("invalid shift amount"));
2751 return 0;
2753 break;
2755 case AARCH64_OPND_IMM_VLSL:
2756 size = aarch64_get_qualifier_esize (qualifier);
2757 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2759 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2760 size * 8 - 1);
2761 return 0;
2763 break;
2765 case AARCH64_OPND_IMM_VLSR:
2766 size = aarch64_get_qualifier_esize (qualifier);
2767 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2769 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2770 return 0;
2772 break;
2774 case AARCH64_OPND_SIMD_IMM:
2775 case AARCH64_OPND_SIMD_IMM_SFT:
2776 /* Qualifier check. */
2777 switch (qualifier)
2779 case AARCH64_OPND_QLF_LSL:
2780 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2782 set_other_error (mismatch_detail, idx,
2783 _("invalid shift operator"));
2784 return 0;
2786 break;
2787 case AARCH64_OPND_QLF_MSL:
2788 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2790 set_other_error (mismatch_detail, idx,
2791 _("invalid shift operator"));
2792 return 0;
2794 break;
2795 case AARCH64_OPND_QLF_NIL:
2796 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2798 set_other_error (mismatch_detail, idx,
2799 _("shift is not permitted"));
2800 return 0;
2802 break;
2803 default:
2804 assert (0);
2805 return 0;
2807 /* Is the immediate valid? */
2808 assert (idx == 1);
2809 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2811 /* uimm8 or simm8 */
2812 if (!value_in_range_p (opnd->imm.value, -128, 255))
2814 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2815 return 0;
2818 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2820 /* uimm64 is not
2821 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2822 ffffffffgggggggghhhhhhhh'. */
2823 set_other_error (mismatch_detail, idx,
2824 _("invalid value for immediate"));
2825 return 0;
2827 /* Is the shift amount valid? */
2828 switch (opnd->shifter.kind)
2830 case AARCH64_MOD_LSL:
2831 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2832 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2834 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2835 (size - 1) * 8);
2836 return 0;
2838 if (!value_aligned_p (opnd->shifter.amount, 8))
2840 set_unaligned_error (mismatch_detail, idx, 8);
2841 return 0;
2843 break;
2844 case AARCH64_MOD_MSL:
2845 /* Only 8 and 16 are valid shift amount. */
2846 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2848 set_other_error (mismatch_detail, idx,
2849 _("shift amount must be 0 or 16"));
2850 return 0;
2852 break;
2853 default:
2854 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2856 set_other_error (mismatch_detail, idx,
2857 _("invalid shift operator"));
2858 return 0;
2860 break;
2862 break;
2864 case AARCH64_OPND_FPIMM:
2865 case AARCH64_OPND_SIMD_FPIMM:
2866 case AARCH64_OPND_SVE_FPIMM8:
2867 if (opnd->imm.is_fp == 0)
2869 set_other_error (mismatch_detail, idx,
2870 _("floating-point immediate expected"));
2871 return 0;
2873 /* The value is expected to be an 8-bit floating-point constant with
2874 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2875 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2876 instruction). */
2877 if (!value_in_range_p (opnd->imm.value, 0, 255))
2879 set_other_error (mismatch_detail, idx,
2880 _("immediate out of range"));
2881 return 0;
2883 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2885 set_other_error (mismatch_detail, idx,
2886 _("invalid shift operator"));
2887 return 0;
2889 break;
2891 case AARCH64_OPND_SVE_AIMM:
2892 min_value = 0;
2893 sve_aimm:
2894 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2895 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2896 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2897 uvalue = opnd->imm.value;
2898 shift = opnd->shifter.amount;
2899 if (size == 1)
2901 if (shift != 0)
2903 set_other_error (mismatch_detail, idx,
2904 _("no shift amount allowed for"
2905 " 8-bit constants"));
2906 return 0;
2909 else
2911 if (shift != 0 && shift != 8)
2913 set_other_error (mismatch_detail, idx,
2914 _("shift amount must be 0 or 8"));
2915 return 0;
2917 if (shift == 0 && (uvalue & 0xff) == 0)
2919 shift = 8;
2920 uvalue = (int64_t) uvalue / 256;
2923 mask >>= shift;
2924 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2926 set_other_error (mismatch_detail, idx,
2927 _("immediate too big for element size"));
2928 return 0;
2930 uvalue = (uvalue - min_value) & mask;
2931 if (uvalue > 0xff)
2933 set_other_error (mismatch_detail, idx,
2934 _("invalid arithmetic immediate"));
2935 return 0;
2937 break;
2939 case AARCH64_OPND_SVE_ASIMM:
2940 min_value = -128;
2941 goto sve_aimm;
2943 case AARCH64_OPND_SVE_I1_HALF_ONE:
2944 assert (opnd->imm.is_fp);
2945 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2947 set_other_error (mismatch_detail, idx,
2948 _("floating-point value must be 0.5 or 1.0"));
2949 return 0;
2951 break;
2953 case AARCH64_OPND_SVE_I1_HALF_TWO:
2954 assert (opnd->imm.is_fp);
2955 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2957 set_other_error (mismatch_detail, idx,
2958 _("floating-point value must be 0.5 or 2.0"));
2959 return 0;
2961 break;
2963 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2964 assert (opnd->imm.is_fp);
2965 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2967 set_other_error (mismatch_detail, idx,
2968 _("floating-point value must be 0.0 or 1.0"));
2969 return 0;
2971 break;
2973 case AARCH64_OPND_SVE_INV_LIMM:
2975 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2976 uint64_t uimm = ~opnd->imm.value;
2977 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2979 set_other_error (mismatch_detail, idx,
2980 _("immediate out of range"));
2981 return 0;
2984 break;
2986 case AARCH64_OPND_SVE_LIMM_MOV:
2988 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2989 uint64_t uimm = opnd->imm.value;
2990 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2992 set_other_error (mismatch_detail, idx,
2993 _("immediate out of range"));
2994 return 0;
2996 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2998 set_other_error (mismatch_detail, idx,
2999 _("invalid replicated MOV immediate"));
3000 return 0;
3003 break;
3005 case AARCH64_OPND_SVE_PATTERN_SCALED:
3006 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
3007 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
3009 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
3010 return 0;
3012 break;
3014 case AARCH64_OPND_SVE_SHLIMM_PRED:
3015 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3016 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3017 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
3018 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
3020 set_imm_out_of_range_error (mismatch_detail, idx,
3021 0, 8 * size - 1);
3022 return 0;
3024 break;
3026 case AARCH64_OPND_SME_SHRIMM4:
3027 size = 1 << get_operand_fields_width (get_operand_from_code (type));
3028 if (!value_in_range_p (opnd->imm.value, 1, size))
3030 set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
3031 return 0;
3033 break;
3035 case AARCH64_OPND_SME_SHRIMM5:
3036 case AARCH64_OPND_SVE_SHRIMM_PRED:
3037 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3038 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3039 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
3040 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
3041 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
3043 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
3044 return 0;
3046 break;
3048 case AARCH64_OPND_SME_ZT0_INDEX:
3049 if (!value_in_range_p (opnd->imm.value, 0, 56))
3051 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
3052 return 0;
3054 if (opnd->imm.value % 8 != 0)
3056 set_other_error (mismatch_detail, idx,
3057 _("byte index must be a multiple of 8"));
3058 return 0;
3060 break;
3062 default:
3063 break;
3065 break;
3067 case AARCH64_OPND_CLASS_SYSTEM:
3068 switch (type)
3070 case AARCH64_OPND_PSTATEFIELD:
3071 for (i = 0; aarch64_pstatefields[i].name; ++i)
3072 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3073 break;
3074 assert (aarch64_pstatefields[i].name);
3075 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
3076 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
3077 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
3079 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
3080 return 0;
3082 break;
3083 case AARCH64_OPND_PRFOP:
3084 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
3086 set_other_error (mismatch_detail, idx,
3087 _("the register-index form of PRFM does"
3088 " not accept opcodes in the range 24-31"));
3089 return 0;
3091 break;
3092 default:
3093 break;
3095 break;
3097 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
3098 /* Get the upper bound for the element index. */
3099 if (opcode->op == OP_FCMLA_ELEM)
3100 /* FCMLA index range depends on the vector size of other operands
3101 and is halfed because complex numbers take two elements. */
3102 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
3103 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
3104 else
3105 num = 16;
3106 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
3107 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
3109 /* Index out-of-range. */
3110 if (!value_in_range_p (opnd->reglane.index, 0, num))
3112 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
3113 return 0;
3115 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3116 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3117 number is encoded in "size:M:Rm":
3118 size <Vm>
3119 00 RESERVED
3120 01 0:Rm
3121 10 M:Rm
3122 11 RESERVED */
3123 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
3124 && !value_in_range_p (opnd->reglane.regno, 0, 15))
3126 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
3127 return 0;
3129 break;
3131 case AARCH64_OPND_CLASS_MODIFIED_REG:
3132 assert (idx == 1 || idx == 2);
3133 switch (type)
3135 case AARCH64_OPND_Rm_EXT:
3136 if (!aarch64_extend_operator_p (opnd->shifter.kind)
3137 && opnd->shifter.kind != AARCH64_MOD_LSL)
3139 set_other_error (mismatch_detail, idx,
3140 _("extend operator expected"));
3141 return 0;
3143 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3144 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3145 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3146 case. */
3147 if (!aarch64_stack_pointer_p (opnds + 0)
3148 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
3150 if (!opnd->shifter.operator_present)
3152 set_other_error (mismatch_detail, idx,
3153 _("missing extend operator"));
3154 return 0;
3156 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3158 set_other_error (mismatch_detail, idx,
3159 _("'LSL' operator not allowed"));
3160 return 0;
3163 assert (opnd->shifter.operator_present /* Default to LSL. */
3164 || opnd->shifter.kind == AARCH64_MOD_LSL);
3165 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3167 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3168 return 0;
3170 /* In the 64-bit form, the final register operand is written as Wm
3171 for all but the (possibly omitted) UXTX/LSL and SXTX
3172 operators.
3173 N.B. GAS allows X register to be used with any operator as a
3174 programming convenience. */
3175 if (qualifier == AARCH64_OPND_QLF_X
3176 && opnd->shifter.kind != AARCH64_MOD_LSL
3177 && opnd->shifter.kind != AARCH64_MOD_UXTX
3178 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3180 set_other_error (mismatch_detail, idx, _("W register expected"));
3181 return 0;
3183 break;
3185 case AARCH64_OPND_Rm_SFT:
3186 /* ROR is not available to the shifted register operand in
3187 arithmetic instructions. */
3188 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3190 set_other_error (mismatch_detail, idx,
3191 _("shift operator expected"));
3192 return 0;
3194 if (opnd->shifter.kind == AARCH64_MOD_ROR
3195 && opcode->iclass != log_shift)
3197 set_other_error (mismatch_detail, idx,
3198 _("'ROR' operator not allowed"));
3199 return 0;
3201 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3202 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3204 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3205 return 0;
3207 break;
3209 default:
3210 break;
3212 break;
3214 default:
3215 break;
3218 return 1;
3221 /* Main entrypoint for the operand constraint checking.
3223 Return 1 if operands of *INST meet the constraint applied by the operand
3224 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3225 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3226 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3227 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3228 error kind when it is notified that an instruction does not pass the check).
3230 Un-determined operand qualifiers may get established during the process. */
3233 aarch64_match_operands_constraint (aarch64_inst *inst,
3234 aarch64_operand_error *mismatch_detail)
3236 int i;
3238 DEBUG_TRACE ("enter");
3240 i = inst->opcode->tied_operand;
3242 if (i > 0)
3244 /* Check for tied_operands with specific opcode iclass. */
3245 switch (inst->opcode->iclass)
3247 /* For SME LDR and STR instructions #imm must have the same numerical
3248 value for both operands.
3250 case sme_ldr:
3251 case sme_str:
3252 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3253 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3254 if (inst->operands[0].indexed_za.index.imm
3255 != inst->operands[1].addr.offset.imm)
3257 if (mismatch_detail)
3259 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3260 mismatch_detail->index = i;
3262 return 0;
3264 break;
3266 default:
3268 /* Check for cases where a source register needs to be the
3269 same as the destination register. Do this before
3270 matching qualifiers since if an instruction has both
3271 invalid tying and invalid qualifiers, the error about
3272 qualifiers would suggest several alternative instructions
3273 that also have invalid tying. */
3274 enum aarch64_operand_class op_class1
3275 = aarch64_get_operand_class (inst->operands[0].type);
3276 enum aarch64_operand_class op_class2
3277 = aarch64_get_operand_class (inst->operands[i].type);
3278 assert (op_class1 == op_class2);
3279 if (op_class1 == AARCH64_OPND_CLASS_SVE_REGLIST
3280 ? ((inst->operands[0].reglist.first_regno
3281 != inst->operands[i].reglist.first_regno)
3282 || (inst->operands[0].reglist.num_regs
3283 != inst->operands[i].reglist.num_regs)
3284 || (inst->operands[0].reglist.stride
3285 != inst->operands[i].reglist.stride))
3286 : (inst->operands[0].reg.regno
3287 != inst->operands[i].reg.regno))
3289 if (mismatch_detail)
3291 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3292 mismatch_detail->index = i;
3293 mismatch_detail->error = NULL;
3295 return 0;
3297 break;
3302 /* Match operands' qualifier.
3303 *INST has already had qualifier establish for some, if not all, of
3304 its operands; we need to find out whether these established
3305 qualifiers match one of the qualifier sequence in
3306 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3307 with the corresponding qualifier in such a sequence.
3308 Only basic operand constraint checking is done here; the more thorough
3309 constraint checking will carried out by operand_general_constraint_met_p,
3310 which has be to called after this in order to get all of the operands'
3311 qualifiers established. */
3312 int invalid_count;
3313 if (match_operands_qualifier (inst, true /* update_p */,
3314 &invalid_count) == 0)
3316 DEBUG_TRACE ("FAIL on operand qualifier matching");
3317 if (mismatch_detail)
3319 /* Return an error type to indicate that it is the qualifier
3320 matching failure; we don't care about which operand as there
3321 are enough information in the opcode table to reproduce it. */
3322 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3323 mismatch_detail->index = -1;
3324 mismatch_detail->error = NULL;
3325 mismatch_detail->data[0].i = invalid_count;
3327 return 0;
3330 /* Match operands' constraint. */
3331 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3333 enum aarch64_opnd type = inst->opcode->operands[i];
3334 if (type == AARCH64_OPND_NIL)
3335 break;
3336 if (inst->operands[i].skip)
3338 DEBUG_TRACE ("skip the incomplete operand %d", i);
3339 continue;
3341 if (operand_general_constraint_met_p (inst->operands, i, type,
3342 inst->opcode, mismatch_detail) == 0)
3344 DEBUG_TRACE ("FAIL on operand %d", i);
3345 return 0;
3349 DEBUG_TRACE ("PASS");
3351 return 1;
3354 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3355 Also updates the TYPE of each INST->OPERANDS with the corresponding
3356 value of OPCODE->OPERANDS.
3358 Note that some operand qualifiers may need to be manually cleared by
3359 the caller before it further calls the aarch64_opcode_encode; by
3360 doing this, it helps the qualifier matching facilities work
3361 properly. */
3363 const aarch64_opcode*
3364 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3366 int i;
3367 const aarch64_opcode *old = inst->opcode;
3369 inst->opcode = opcode;
3371 /* Update the operand types. */
3372 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3374 inst->operands[i].type = opcode->operands[i];
3375 if (opcode->operands[i] == AARCH64_OPND_NIL)
3376 break;
3379 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3381 return old;
3385 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3387 int i;
3388 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3389 if (operands[i] == operand)
3390 return i;
3391 else if (operands[i] == AARCH64_OPND_NIL)
3392 break;
3393 return -1;
3396 /* R0...R30, followed by FOR31. */
3397 #define BANK(R, FOR31) \
3398 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3399 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3400 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3401 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3402 /* [0][0] 32-bit integer regs with sp Wn
3403 [0][1] 64-bit integer regs with sp Xn sf=1
3404 [1][0] 32-bit integer regs with #0 Wn
3405 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3406 static const char *int_reg[2][2][32] = {
3407 #define R32(X) "w" #X
3408 #define R64(X) "x" #X
3409 { BANK (R32, "wsp"), BANK (R64, "sp") },
3410 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3411 #undef R64
3412 #undef R32
3415 /* Names of the SVE vector registers, first with .S suffixes,
3416 then with .D suffixes. */
3418 static const char *sve_reg[2][32] = {
3419 #define ZS(X) "z" #X ".s"
3420 #define ZD(X) "z" #X ".d"
3421 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3422 #undef ZD
3423 #undef ZS
3425 #undef BANK
3427 /* Return the integer register name.
3428 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3430 static inline const char *
3431 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3433 const int has_zr = sp_reg_p ? 0 : 1;
3434 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3435 return int_reg[has_zr][is_64][regno];
3438 /* Like get_int_reg_name, but IS_64 is always 1. */
3440 static inline const char *
3441 get_64bit_int_reg_name (int regno, int sp_reg_p)
3443 const int has_zr = sp_reg_p ? 0 : 1;
3444 return int_reg[has_zr][1][regno];
3447 /* Get the name of the integer offset register in OPND, using the shift type
3448 to decide whether it's a word or doubleword. */
3450 static inline const char *
3451 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3453 switch (opnd->shifter.kind)
3455 case AARCH64_MOD_UXTW:
3456 case AARCH64_MOD_SXTW:
3457 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3459 case AARCH64_MOD_LSL:
3460 case AARCH64_MOD_SXTX:
3461 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3463 default:
3464 abort ();
3468 /* Get the name of the SVE vector offset register in OPND, using the operand
3469 qualifier to decide whether the suffix should be .S or .D. */
3471 static inline const char *
3472 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3474 assert (qualifier == AARCH64_OPND_QLF_S_S
3475 || qualifier == AARCH64_OPND_QLF_S_D);
3476 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3479 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3481 typedef union
3483 uint64_t i;
3484 double d;
3485 } double_conv_t;
3487 typedef union
3489 uint32_t i;
3490 float f;
3491 } single_conv_t;
3493 typedef union
3495 uint32_t i;
3496 float f;
3497 } half_conv_t;
3499 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3500 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3501 (depending on the type of the instruction). IMM8 will be expanded to a
3502 single-precision floating-point value (SIZE == 4) or a double-precision
3503 floating-point value (SIZE == 8). A half-precision floating-point value
3504 (SIZE == 2) is expanded to a single-precision floating-point value. The
3505 expanded value is returned. */
3507 static uint64_t
3508 expand_fp_imm (int size, uint32_t imm8)
3510 uint64_t imm = 0;
3511 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3513 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3514 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3515 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3516 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3517 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3518 if (size == 8)
3520 imm = (imm8_7 << (63-32)) /* imm8<7> */
3521 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3522 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3523 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3524 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3525 imm <<= 32;
3527 else if (size == 4 || size == 2)
3529 imm = (imm8_7 << 31) /* imm8<7> */
3530 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3531 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3532 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3534 else
3536 /* An unsupported size. */
3537 assert (0);
3540 return imm;
3543 /* Return a string based on FMT with the register style applied. */
3545 static const char *
3546 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3548 const char *txt;
3549 va_list ap;
3551 va_start (ap, fmt);
3552 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3553 va_end (ap);
3555 return txt;
3558 /* Return a string based on FMT with the immediate style applied. */
3560 static const char *
3561 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3563 const char *txt;
3564 va_list ap;
3566 va_start (ap, fmt);
3567 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3568 va_end (ap);
3570 return txt;
3573 /* Return a string based on FMT with the sub-mnemonic style applied. */
3575 static const char *
3576 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3578 const char *txt;
3579 va_list ap;
3581 va_start (ap, fmt);
3582 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3583 va_end (ap);
3585 return txt;
3588 /* Return a string based on FMT with the address style applied. */
3590 static const char *
3591 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3593 const char *txt;
3594 va_list ap;
3596 va_start (ap, fmt);
3597 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3598 va_end (ap);
3600 return txt;
3603 /* Produce the string representation of the register list operand *OPND
3604 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3605 the register name that comes before the register number, such as "v". */
3606 static void
3607 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3608 const char *prefix, struct aarch64_styler *styler)
3610 const int mask = (prefix[0] == 'p' ? 15 : 31);
3611 const int num_regs = opnd->reglist.num_regs;
3612 const int stride = opnd->reglist.stride;
3613 const int first_reg = opnd->reglist.first_regno;
3614 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3615 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3616 char tb[16]; /* Temporary buffer. */
3618 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3619 assert (num_regs >= 1 && num_regs <= 4);
3621 /* Prepare the index if any. */
3622 if (opnd->reglist.has_index)
3623 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3624 snprintf (tb, sizeof (tb), "[%s]",
3625 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3626 else
3627 tb[0] = '\0';
3629 /* The hyphenated form is preferred for disassembly if there are
3630 more than two registers in the list, and the register numbers
3631 are monotonically increasing in increments of one. */
3632 if (stride == 1 && num_regs > 1
3633 && ((opnd->type != AARCH64_OPND_SME_Zt2)
3634 && (opnd->type != AARCH64_OPND_SME_Zt3)
3635 && (opnd->type != AARCH64_OPND_SME_Zt4)))
3636 snprintf (buf, size, "{%s-%s}%s",
3637 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3638 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3639 else
3641 const int reg0 = first_reg;
3642 const int reg1 = (first_reg + stride) & mask;
3643 const int reg2 = (first_reg + stride * 2) & mask;
3644 const int reg3 = (first_reg + stride * 3) & mask;
3646 switch (num_regs)
3648 case 1:
3649 snprintf (buf, size, "{%s}%s",
3650 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3651 tb);
3652 break;
3653 case 2:
3654 snprintf (buf, size, "{%s, %s}%s",
3655 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3656 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3657 tb);
3658 break;
3659 case 3:
3660 snprintf (buf, size, "{%s, %s, %s}%s",
3661 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3662 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3663 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3664 tb);
3665 break;
3666 case 4:
3667 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3668 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3669 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3670 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3671 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3672 tb);
3673 break;
3678 /* Print the register+immediate address in OPND to BUF, which has SIZE
3679 characters. BASE is the name of the base register. */
3681 static void
3682 print_immediate_offset_address (char *buf, size_t size,
3683 const aarch64_opnd_info *opnd,
3684 const char *base,
3685 struct aarch64_styler *styler)
3687 if (opnd->addr.writeback)
3689 if (opnd->addr.preind)
3691 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3692 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3693 else
3694 snprintf (buf, size, "[%s, %s]!",
3695 style_reg (styler, base),
3696 style_imm (styler, "#%d", opnd->addr.offset.imm));
3698 else
3699 snprintf (buf, size, "[%s], %s",
3700 style_reg (styler, base),
3701 style_imm (styler, "#%d", opnd->addr.offset.imm));
3703 else
3705 if (opnd->shifter.operator_present)
3707 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3708 snprintf (buf, size, "[%s, %s, %s]",
3709 style_reg (styler, base),
3710 style_imm (styler, "#%d", opnd->addr.offset.imm),
3711 style_sub_mnem (styler, "mul vl"));
3713 else if (opnd->addr.offset.imm)
3714 snprintf (buf, size, "[%s, %s]",
3715 style_reg (styler, base),
3716 style_imm (styler, "#%d", opnd->addr.offset.imm));
3717 else
3718 snprintf (buf, size, "[%s]", style_reg (styler, base));
3722 /* Produce the string representation of the register offset address operand
3723 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3724 the names of the base and offset registers. */
3725 static void
3726 print_register_offset_address (char *buf, size_t size,
3727 const aarch64_opnd_info *opnd,
3728 const char *base, const char *offset,
3729 struct aarch64_styler *styler)
3731 char tb[32]; /* Temporary buffer. */
3732 bool print_extend_p = true;
3733 bool print_amount_p = true;
3734 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3736 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3737 || !opnd->shifter.amount_present))
3739 /* Not print the shift/extend amount when the amount is zero and
3740 when it is not the special case of 8-bit load/store instruction. */
3741 print_amount_p = false;
3742 /* Likewise, no need to print the shift operator LSL in such a
3743 situation. */
3744 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3745 print_extend_p = false;
3748 /* Prepare for the extend/shift. */
3749 if (print_extend_p)
3751 if (print_amount_p)
3752 snprintf (tb, sizeof (tb), ", %s %s",
3753 style_sub_mnem (styler, shift_name),
3754 style_imm (styler, "#%" PRIi64,
3755 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3756 (opnd->shifter.amount % 100)));
3757 else
3758 snprintf (tb, sizeof (tb), ", %s",
3759 style_sub_mnem (styler, shift_name));
3761 else
3762 tb[0] = '\0';
3764 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3765 style_reg (styler, offset), tb);
3768 /* Print ZA tiles from imm8 in ZERO instruction.
3770 The preferred disassembly of this instruction uses the shortest list of tile
3771 names that represent the encoded immediate mask.
3773 For example:
3774 * An all-ones immediate is disassembled as {ZA}.
3775 * An all-zeros immediate is disassembled as an empty list { }.
3777 static void
3778 print_sme_za_list (char *buf, size_t size, int mask,
3779 struct aarch64_styler *styler)
3781 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3782 "za1.s", "za2.s", "za3.s", "za0.d",
3783 "za1.d", "za2.d", "za3.d", "za4.d",
3784 "za5.d", "za6.d", "za7.d", " " };
3785 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3786 0x22, 0x44, 0x88, 0x01,
3787 0x02, 0x04, 0x08, 0x10,
3788 0x20, 0x40, 0x80, 0x00 };
3789 int i, k;
3790 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3792 k = snprintf (buf, size, "{");
3793 for (i = 0; i < ZAN_SIZE; i++)
3795 if ((mask & zan_v[i]) == zan_v[i])
3797 mask &= ~zan_v[i];
3798 if (k > 1)
3799 k += snprintf (buf + k, size - k, ", ");
3801 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3803 if (mask == 0)
3804 break;
3806 snprintf (buf + k, size - k, "}");
3809 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3810 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3811 PC, PCREL_P and ADDRESS are used to pass in and return information about
3812 the PC-relative address calculation, where the PC value is passed in
3813 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3814 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3815 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3817 The function serves both the disassembler and the assembler diagnostics
3818 issuer, which is the reason why it lives in this file. */
3820 void
3821 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3822 const aarch64_opcode *opcode,
3823 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3824 bfd_vma *address, char** notes,
3825 char *comment, size_t comment_size,
3826 aarch64_feature_set features,
3827 struct aarch64_styler *styler)
3829 unsigned int i, num_conds;
3830 const char *name = NULL;
3831 const aarch64_opnd_info *opnd = opnds + idx;
3832 enum aarch64_modifier_kind kind;
3833 uint64_t addr, enum_value;
3835 if (comment != NULL)
3837 assert (comment_size > 0);
3838 comment[0] = '\0';
3840 else
3841 assert (comment_size == 0);
3843 buf[0] = '\0';
3844 if (pcrel_p)
3845 *pcrel_p = 0;
3847 switch (opnd->type)
3849 case AARCH64_OPND_Rd:
3850 case AARCH64_OPND_Rn:
3851 case AARCH64_OPND_Rm:
3852 case AARCH64_OPND_Rt:
3853 case AARCH64_OPND_Rt2:
3854 case AARCH64_OPND_Rs:
3855 case AARCH64_OPND_Ra:
3856 case AARCH64_OPND_Rt_LS64:
3857 case AARCH64_OPND_Rt_SYS:
3858 case AARCH64_OPND_PAIRREG:
3859 case AARCH64_OPND_PAIRREG_OR_XZR:
3860 case AARCH64_OPND_SVE_Rm:
3861 case AARCH64_OPND_LSE128_Rt:
3862 case AARCH64_OPND_LSE128_Rt2:
3863 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3864 the <ic_op>, therefore we use opnd->present to override the
3865 generic optional-ness information. */
3866 if (opnd->type == AARCH64_OPND_Rt_SYS)
3868 if (!opnd->present)
3869 break;
3871 /* Omit the operand, e.g. RET. */
3872 else if (optional_operand_p (opcode, idx)
3873 && (opnd->reg.regno
3874 == get_optional_operand_default_value (opcode)))
3875 break;
3876 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3877 || opnd->qualifier == AARCH64_OPND_QLF_X);
3878 snprintf (buf, size, "%s",
3879 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3880 opnd->qualifier, 0)));
3881 break;
3883 case AARCH64_OPND_Rd_SP:
3884 case AARCH64_OPND_Rn_SP:
3885 case AARCH64_OPND_Rt_SP:
3886 case AARCH64_OPND_SVE_Rn_SP:
3887 case AARCH64_OPND_Rm_SP:
3888 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3889 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3890 || opnd->qualifier == AARCH64_OPND_QLF_X
3891 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3892 snprintf (buf, size, "%s",
3893 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3894 opnd->qualifier, 1)));
3895 break;
3897 case AARCH64_OPND_Rm_EXT:
3898 kind = opnd->shifter.kind;
3899 assert (idx == 1 || idx == 2);
3900 if ((aarch64_stack_pointer_p (opnds)
3901 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3902 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3903 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3904 && kind == AARCH64_MOD_UXTW)
3905 || (opnd->qualifier == AARCH64_OPND_QLF_X
3906 && kind == AARCH64_MOD_UXTX)))
3908 /* 'LSL' is the preferred form in this case. */
3909 kind = AARCH64_MOD_LSL;
3910 if (opnd->shifter.amount == 0)
3912 /* Shifter omitted. */
3913 snprintf (buf, size, "%s",
3914 style_reg (styler,
3915 get_int_reg_name (opnd->reg.regno,
3916 opnd->qualifier, 0)));
3917 break;
3920 if (opnd->shifter.amount)
3921 snprintf (buf, size, "%s, %s %s",
3922 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3923 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3924 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3925 else
3926 snprintf (buf, size, "%s, %s",
3927 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3928 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3929 break;
3931 case AARCH64_OPND_Rm_SFT:
3932 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3933 || opnd->qualifier == AARCH64_OPND_QLF_X);
3934 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3935 snprintf (buf, size, "%s",
3936 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3937 opnd->qualifier, 0)));
3938 else
3939 snprintf (buf, size, "%s, %s %s",
3940 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3941 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3942 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3943 break;
3945 case AARCH64_OPND_Fd:
3946 case AARCH64_OPND_Fn:
3947 case AARCH64_OPND_Fm:
3948 case AARCH64_OPND_Fa:
3949 case AARCH64_OPND_Ft:
3950 case AARCH64_OPND_Ft2:
3951 case AARCH64_OPND_Sd:
3952 case AARCH64_OPND_Sn:
3953 case AARCH64_OPND_Sm:
3954 case AARCH64_OPND_SVE_VZn:
3955 case AARCH64_OPND_SVE_Vd:
3956 case AARCH64_OPND_SVE_Vm:
3957 case AARCH64_OPND_SVE_Vn:
3958 snprintf (buf, size, "%s",
3959 style_reg (styler, "%s%d",
3960 aarch64_get_qualifier_name (opnd->qualifier),
3961 opnd->reg.regno));
3962 break;
3964 case AARCH64_OPND_Va:
3965 case AARCH64_OPND_Vd:
3966 case AARCH64_OPND_Vn:
3967 case AARCH64_OPND_Vm:
3968 snprintf (buf, size, "%s",
3969 style_reg (styler, "v%d.%s", opnd->reg.regno,
3970 aarch64_get_qualifier_name (opnd->qualifier)));
3971 break;
3973 case AARCH64_OPND_Ed:
3974 case AARCH64_OPND_En:
3975 case AARCH64_OPND_Em:
3976 case AARCH64_OPND_Em16:
3977 case AARCH64_OPND_SM3_IMM2:
3978 snprintf (buf, size, "%s[%s]",
3979 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3980 aarch64_get_qualifier_name (opnd->qualifier)),
3981 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3982 break;
3984 case AARCH64_OPND_VdD1:
3985 case AARCH64_OPND_VnD1:
3986 snprintf (buf, size, "%s[%s]",
3987 style_reg (styler, "v%d.d", opnd->reg.regno),
3988 style_imm (styler, "1"));
3989 break;
3991 case AARCH64_OPND_LVn:
3992 case AARCH64_OPND_LVt:
3993 case AARCH64_OPND_LVt_AL:
3994 case AARCH64_OPND_LEt:
3995 print_register_list (buf, size, opnd, "v", styler);
3996 break;
3998 case AARCH64_OPND_SVE_Pd:
3999 case AARCH64_OPND_SVE_Pg3:
4000 case AARCH64_OPND_SVE_Pg4_5:
4001 case AARCH64_OPND_SVE_Pg4_10:
4002 case AARCH64_OPND_SVE_Pg4_16:
4003 case AARCH64_OPND_SVE_Pm:
4004 case AARCH64_OPND_SVE_Pn:
4005 case AARCH64_OPND_SVE_Pt:
4006 case AARCH64_OPND_SME_Pm:
4007 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4008 snprintf (buf, size, "%s",
4009 style_reg (styler, "p%d", opnd->reg.regno));
4010 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4011 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4012 snprintf (buf, size, "%s",
4013 style_reg (styler, "p%d/%s", opnd->reg.regno,
4014 aarch64_get_qualifier_name (opnd->qualifier)));
4015 else
4016 snprintf (buf, size, "%s",
4017 style_reg (styler, "p%d.%s", opnd->reg.regno,
4018 aarch64_get_qualifier_name (opnd->qualifier)));
4019 break;
4021 case AARCH64_OPND_SVE_PNd:
4022 case AARCH64_OPND_SVE_PNg4_10:
4023 case AARCH64_OPND_SVE_PNn:
4024 case AARCH64_OPND_SVE_PNt:
4025 case AARCH64_OPND_SME_PNd3:
4026 case AARCH64_OPND_SME_PNg3:
4027 case AARCH64_OPND_SME_PNn:
4028 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4029 snprintf (buf, size, "%s",
4030 style_reg (styler, "pn%d", opnd->reg.regno));
4031 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
4032 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
4033 snprintf (buf, size, "%s",
4034 style_reg (styler, "pn%d/%s", opnd->reg.regno,
4035 aarch64_get_qualifier_name (opnd->qualifier)));
4036 else
4037 snprintf (buf, size, "%s",
4038 style_reg (styler, "pn%d.%s", opnd->reg.regno,
4039 aarch64_get_qualifier_name (opnd->qualifier)));
4040 break;
4042 case AARCH64_OPND_SME_Pdx2:
4043 case AARCH64_OPND_SME_PdxN:
4044 print_register_list (buf, size, opnd, "p", styler);
4045 break;
4047 case AARCH64_OPND_SME_PNn3_INDEX1:
4048 case AARCH64_OPND_SME_PNn3_INDEX2:
4049 snprintf (buf, size, "%s[%s]",
4050 style_reg (styler, "pn%d", opnd->reglane.regno),
4051 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4052 break;
4054 case AARCH64_OPND_SVE_Za_5:
4055 case AARCH64_OPND_SVE_Za_16:
4056 case AARCH64_OPND_SVE_Zd:
4057 case AARCH64_OPND_SVE_Zm_5:
4058 case AARCH64_OPND_SVE_Zm_16:
4059 case AARCH64_OPND_SVE_Zn:
4060 case AARCH64_OPND_SVE_Zt:
4061 case AARCH64_OPND_SME_Zm:
4062 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
4063 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
4064 else
4065 snprintf (buf, size, "%s",
4066 style_reg (styler, "z%d.%s", opnd->reg.regno,
4067 aarch64_get_qualifier_name (opnd->qualifier)));
4068 break;
4070 case AARCH64_OPND_SVE_ZnxN:
4071 case AARCH64_OPND_SVE_ZtxN:
4072 case AARCH64_OPND_SME_Zdnx2:
4073 case AARCH64_OPND_SME_Zdnx4:
4074 case AARCH64_OPND_SME_Zmx2:
4075 case AARCH64_OPND_SME_Zmx4:
4076 case AARCH64_OPND_SME_Znx2:
4077 case AARCH64_OPND_SME_Znx4:
4078 case AARCH64_OPND_SME_Ztx2_STRIDED:
4079 case AARCH64_OPND_SME_Ztx4_STRIDED:
4080 case AARCH64_OPND_SME_Zt2:
4081 case AARCH64_OPND_SME_Zt3:
4082 case AARCH64_OPND_SME_Zt4:
4083 print_register_list (buf, size, opnd, "z", styler);
4084 break;
4086 case AARCH64_OPND_SVE_Zm3_INDEX:
4087 case AARCH64_OPND_SVE_Zm3_22_INDEX:
4088 case AARCH64_OPND_SVE_Zm3_19_INDEX:
4089 case AARCH64_OPND_SVE_Zm3_11_INDEX:
4090 case AARCH64_OPND_SVE_Zm4_11_INDEX:
4091 case AARCH64_OPND_SVE_Zm4_INDEX:
4092 case AARCH64_OPND_SVE_Zn_INDEX:
4093 case AARCH64_OPND_SME_Zm_INDEX1:
4094 case AARCH64_OPND_SME_Zm_INDEX2:
4095 case AARCH64_OPND_SME_Zm_INDEX3_1:
4096 case AARCH64_OPND_SME_Zm_INDEX3_2:
4097 case AARCH64_OPND_SME_Zm_INDEX3_10:
4098 case AARCH64_OPND_SVE_Zn_5_INDEX:
4099 case AARCH64_OPND_SME_Zm_INDEX4_1:
4100 case AARCH64_OPND_SME_Zm_INDEX4_10:
4101 case AARCH64_OPND_SME_Zn_INDEX1_16:
4102 case AARCH64_OPND_SME_Zn_INDEX2_15:
4103 case AARCH64_OPND_SME_Zn_INDEX2_16:
4104 case AARCH64_OPND_SME_Zn_INDEX3_14:
4105 case AARCH64_OPND_SME_Zn_INDEX3_15:
4106 case AARCH64_OPND_SME_Zn_INDEX4_14:
4107 case AARCH64_OPND_SVE_Zm_imm4:
4108 snprintf (buf, size, "%s[%s]",
4109 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4110 ? style_reg (styler, "z%d", opnd->reglane.regno)
4111 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
4112 aarch64_get_qualifier_name (opnd->qualifier))),
4113 style_imm (styler, "%" PRIi64, opnd->reglane.index));
4114 break;
4116 case AARCH64_OPND_SME_ZAda_2b:
4117 case AARCH64_OPND_SME_ZAda_3b:
4118 snprintf (buf, size, "%s",
4119 style_reg (styler, "za%d.%s", opnd->reg.regno,
4120 aarch64_get_qualifier_name (opnd->qualifier)));
4121 break;
4123 case AARCH64_OPND_SME_ZA_HV_idx_src:
4124 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
4125 case AARCH64_OPND_SME_ZA_HV_idx_dest:
4126 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
4127 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
4128 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
4129 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
4130 style_reg (styler, "za%d%c.%s",
4131 opnd->indexed_za.regno,
4132 opnd->indexed_za.v == 1 ? 'v' : 'h',
4133 aarch64_get_qualifier_name (opnd->qualifier)),
4134 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4135 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4136 opnd->indexed_za.index.countm1 ? ":" : "",
4137 (opnd->indexed_za.index.countm1
4138 ? style_imm (styler, "%d",
4139 opnd->indexed_za.index.imm
4140 + opnd->indexed_za.index.countm1)
4141 : ""),
4142 opnd->indexed_za.group_size ? ", " : "",
4143 opnd->indexed_za.group_size == 2
4144 ? style_sub_mnem (styler, "vgx2")
4145 : opnd->indexed_za.group_size == 4
4146 ? style_sub_mnem (styler, "vgx4") : "",
4147 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
4148 break;
4150 case AARCH64_OPND_SME_list_of_64bit_tiles:
4151 print_sme_za_list (buf, size, opnd->reg.regno, styler);
4152 break;
4154 case AARCH64_OPND_SME_ZA_array_off1x4:
4155 case AARCH64_OPND_SME_ZA_array_off2x2:
4156 case AARCH64_OPND_SME_ZA_array_off2x4:
4157 case AARCH64_OPND_SME_ZA_array_off3_0:
4158 case AARCH64_OPND_SME_ZA_array_off3_5:
4159 case AARCH64_OPND_SME_ZA_array_off3x2:
4160 case AARCH64_OPND_SME_ZA_array_off4:
4161 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
4162 style_reg (styler, "za%s%s",
4163 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4164 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4165 ? ""
4166 : aarch64_get_qualifier_name (opnd->qualifier))),
4167 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4168 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4169 opnd->indexed_za.index.countm1 ? ":" : "",
4170 (opnd->indexed_za.index.countm1
4171 ? style_imm (styler, "%d",
4172 opnd->indexed_za.index.imm
4173 + opnd->indexed_za.index.countm1)
4174 : ""),
4175 opnd->indexed_za.group_size ? ", " : "",
4176 opnd->indexed_za.group_size == 2
4177 ? style_sub_mnem (styler, "vgx2")
4178 : opnd->indexed_za.group_size == 4
4179 ? style_sub_mnem (styler, "vgx4") : "");
4180 break;
4182 case AARCH64_OPND_SME_ZA_array_vrsb_1:
4183 case AARCH64_OPND_SME_ZA_array_vrsh_1:
4184 case AARCH64_OPND_SME_ZA_array_vrss_1:
4185 case AARCH64_OPND_SME_ZA_array_vrsd_1:
4186 case AARCH64_OPND_SME_ZA_array_vrsb_2:
4187 case AARCH64_OPND_SME_ZA_array_vrsh_2:
4188 case AARCH64_OPND_SME_ZA_array_vrss_2:
4189 case AARCH64_OPND_SME_ZA_array_vrsd_2:
4190 snprintf (buf, size, "%s [%s, %s%s%s]",
4191 style_reg (styler, "za%d%c%s%s",
4192 opnd->indexed_za.regno,
4193 opnd->indexed_za.v ? 'v': 'h',
4194 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
4195 (opnd->qualifier == AARCH64_OPND_QLF_NIL
4196 ? ""
4197 : aarch64_get_qualifier_name (opnd->qualifier))),
4198 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4199 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
4200 opnd->indexed_za.index.countm1 ? ":" : "",
4201 opnd->indexed_za.index.countm1 ? style_imm (styler, "%d",
4202 opnd->indexed_za.index.imm
4203 + opnd->indexed_za.index.countm1):"");
4204 break;
4206 case AARCH64_OPND_SME_SM_ZA:
4207 snprintf (buf, size, "%s",
4208 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4209 break;
4211 case AARCH64_OPND_SME_PnT_Wm_imm:
4212 snprintf (buf, size, "%s[%s, %s]",
4213 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4214 aarch64_get_qualifier_name (opnd->qualifier)),
4215 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4216 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4217 break;
4219 case AARCH64_OPND_SME_VLxN_10:
4220 case AARCH64_OPND_SME_VLxN_13:
4221 enum_value = opnd->imm.value;
4222 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4223 snprintf (buf, size, "%s",
4224 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4225 break;
4227 case AARCH64_OPND_CRn:
4228 case AARCH64_OPND_CRm:
4229 snprintf (buf, size, "%s",
4230 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4231 break;
4233 case AARCH64_OPND_IDX:
4234 case AARCH64_OPND_MASK:
4235 case AARCH64_OPND_IMM:
4236 case AARCH64_OPND_IMM_2:
4237 case AARCH64_OPND_WIDTH:
4238 case AARCH64_OPND_UIMM3_OP1:
4239 case AARCH64_OPND_UIMM3_OP2:
4240 case AARCH64_OPND_BIT_NUM:
4241 case AARCH64_OPND_IMM_VLSL:
4242 case AARCH64_OPND_IMM_VLSR:
4243 case AARCH64_OPND_SHLL_IMM:
4244 case AARCH64_OPND_IMM0:
4245 case AARCH64_OPND_IMMR:
4246 case AARCH64_OPND_IMMS:
4247 case AARCH64_OPND_UNDEFINED:
4248 case AARCH64_OPND_FBITS:
4249 case AARCH64_OPND_TME_UIMM16:
4250 case AARCH64_OPND_SIMM5:
4251 case AARCH64_OPND_SME_SHRIMM4:
4252 case AARCH64_OPND_SME_SHRIMM5:
4253 case AARCH64_OPND_SVE_SHLIMM_PRED:
4254 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4255 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4256 case AARCH64_OPND_SVE_SHRIMM_PRED:
4257 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4258 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4259 case AARCH64_OPND_SVE_SIMM5:
4260 case AARCH64_OPND_SVE_SIMM5B:
4261 case AARCH64_OPND_SVE_SIMM6:
4262 case AARCH64_OPND_SVE_SIMM8:
4263 case AARCH64_OPND_SVE_UIMM3:
4264 case AARCH64_OPND_SVE_UIMM7:
4265 case AARCH64_OPND_SVE_UIMM8:
4266 case AARCH64_OPND_SVE_UIMM8_53:
4267 case AARCH64_OPND_IMM_ROT1:
4268 case AARCH64_OPND_IMM_ROT2:
4269 case AARCH64_OPND_IMM_ROT3:
4270 case AARCH64_OPND_SVE_IMM_ROT1:
4271 case AARCH64_OPND_SVE_IMM_ROT2:
4272 case AARCH64_OPND_SVE_IMM_ROT3:
4273 case AARCH64_OPND_CSSC_SIMM8:
4274 case AARCH64_OPND_CSSC_UIMM8:
4275 snprintf (buf, size, "%s",
4276 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4277 break;
4279 case AARCH64_OPND_SVE_I1_HALF_ONE:
4280 case AARCH64_OPND_SVE_I1_HALF_TWO:
4281 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4283 single_conv_t c;
4284 c.i = opnd->imm.value;
4285 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4286 break;
4289 case AARCH64_OPND_SVE_PATTERN:
4290 if (optional_operand_p (opcode, idx)
4291 && opnd->imm.value == get_optional_operand_default_value (opcode))
4292 break;
4293 enum_value = opnd->imm.value;
4294 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4295 if (aarch64_sve_pattern_array[enum_value])
4296 snprintf (buf, size, "%s",
4297 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4298 else
4299 snprintf (buf, size, "%s",
4300 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4301 break;
4303 case AARCH64_OPND_SVE_PATTERN_SCALED:
4304 if (optional_operand_p (opcode, idx)
4305 && !opnd->shifter.operator_present
4306 && opnd->imm.value == get_optional_operand_default_value (opcode))
4307 break;
4308 enum_value = opnd->imm.value;
4309 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4310 if (aarch64_sve_pattern_array[opnd->imm.value])
4311 snprintf (buf, size, "%s",
4312 style_reg (styler,
4313 aarch64_sve_pattern_array[opnd->imm.value]));
4314 else
4315 snprintf (buf, size, "%s",
4316 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4317 if (opnd->shifter.operator_present)
4319 size_t len = strlen (buf);
4320 const char *shift_name
4321 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4322 snprintf (buf + len, size - len, ", %s %s",
4323 style_sub_mnem (styler, shift_name),
4324 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4326 break;
4328 case AARCH64_OPND_SVE_PRFOP:
4329 enum_value = opnd->imm.value;
4330 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4331 if (aarch64_sve_prfop_array[enum_value])
4332 snprintf (buf, size, "%s",
4333 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4334 else
4335 snprintf (buf, size, "%s",
4336 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4337 break;
4339 case AARCH64_OPND_IMM_MOV:
4340 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4342 case 4: /* e.g. MOV Wd, #<imm32>. */
4344 int imm32 = opnd->imm.value;
4345 snprintf (buf, size, "%s",
4346 style_imm (styler, "#0x%-20x", imm32));
4347 snprintf (comment, comment_size, "#%d", imm32);
4349 break;
4350 case 8: /* e.g. MOV Xd, #<imm64>. */
4351 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4352 opnd->imm.value));
4353 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4354 break;
4355 default:
4356 snprintf (buf, size, "<invalid>");
4357 break;
4359 break;
4361 case AARCH64_OPND_FPIMM0:
4362 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4363 break;
4365 case AARCH64_OPND_LIMM:
4366 case AARCH64_OPND_AIMM:
4367 case AARCH64_OPND_HALF:
4368 case AARCH64_OPND_SVE_INV_LIMM:
4369 case AARCH64_OPND_SVE_LIMM:
4370 case AARCH64_OPND_SVE_LIMM_MOV:
4371 if (opnd->shifter.amount)
4372 snprintf (buf, size, "%s, %s %s",
4373 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4374 style_sub_mnem (styler, "lsl"),
4375 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4376 else
4377 snprintf (buf, size, "%s",
4378 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4379 break;
4381 case AARCH64_OPND_SIMD_IMM:
4382 case AARCH64_OPND_SIMD_IMM_SFT:
4383 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4384 || opnd->shifter.kind == AARCH64_MOD_NONE)
4385 snprintf (buf, size, "%s",
4386 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4387 else
4388 snprintf (buf, size, "%s, %s %s",
4389 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4390 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4391 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4392 break;
4394 case AARCH64_OPND_SVE_AIMM:
4395 case AARCH64_OPND_SVE_ASIMM:
4396 if (opnd->shifter.amount)
4397 snprintf (buf, size, "%s, %s %s",
4398 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4399 style_sub_mnem (styler, "lsl"),
4400 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4401 else
4402 snprintf (buf, size, "%s",
4403 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4404 break;
4406 case AARCH64_OPND_FPIMM:
4407 case AARCH64_OPND_SIMD_FPIMM:
4408 case AARCH64_OPND_SVE_FPIMM8:
4409 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4411 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4413 half_conv_t c;
4414 c.i = expand_fp_imm (2, opnd->imm.value);
4415 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4417 break;
4418 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4420 single_conv_t c;
4421 c.i = expand_fp_imm (4, opnd->imm.value);
4422 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4424 break;
4425 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4427 double_conv_t c;
4428 c.i = expand_fp_imm (8, opnd->imm.value);
4429 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4431 break;
4432 default:
4433 snprintf (buf, size, "<invalid>");
4434 break;
4436 break;
4438 case AARCH64_OPND_CCMP_IMM:
4439 case AARCH64_OPND_NZCV:
4440 case AARCH64_OPND_EXCEPTION:
4441 case AARCH64_OPND_UIMM4:
4442 case AARCH64_OPND_UIMM4_ADDG:
4443 case AARCH64_OPND_UIMM7:
4444 case AARCH64_OPND_UIMM10:
4445 if (optional_operand_p (opcode, idx)
4446 && (opnd->imm.value ==
4447 (int64_t) get_optional_operand_default_value (opcode)))
4448 /* Omit the operand, e.g. DCPS1. */
4449 break;
4450 snprintf (buf, size, "%s",
4451 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4452 break;
4454 case AARCH64_OPND_COND:
4455 case AARCH64_OPND_COND1:
4456 snprintf (buf, size, "%s",
4457 style_sub_mnem (styler, opnd->cond->names[0]));
4458 num_conds = ARRAY_SIZE (opnd->cond->names);
4459 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4461 size_t len = comment != NULL ? strlen (comment) : 0;
4462 if (i == 1)
4463 snprintf (comment + len, comment_size - len, "%s = %s",
4464 opnd->cond->names[0], opnd->cond->names[i]);
4465 else
4466 snprintf (comment + len, comment_size - len, ", %s",
4467 opnd->cond->names[i]);
4469 break;
4471 case AARCH64_OPND_ADDR_ADRP:
4472 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4473 + opnd->imm.value;
4474 if (pcrel_p)
4475 *pcrel_p = 1;
4476 if (address)
4477 *address = addr;
4478 /* This is not necessary during the disassembling, as print_address_func
4479 in the disassemble_info will take care of the printing. But some
4480 other callers may be still interested in getting the string in *STR,
4481 so here we do snprintf regardless. */
4482 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4483 break;
4485 case AARCH64_OPND_ADDR_PCREL14:
4486 case AARCH64_OPND_ADDR_PCREL19:
4487 case AARCH64_OPND_ADDR_PCREL21:
4488 case AARCH64_OPND_ADDR_PCREL26:
4489 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4490 if (pcrel_p)
4491 *pcrel_p = 1;
4492 if (address)
4493 *address = addr;
4494 /* This is not necessary during the disassembling, as print_address_func
4495 in the disassemble_info will take care of the printing. But some
4496 other callers may be still interested in getting the string in *STR,
4497 so here we do snprintf regardless. */
4498 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4499 break;
4501 case AARCH64_OPND_ADDR_SIMPLE:
4502 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4503 case AARCH64_OPND_SIMD_ADDR_POST:
4504 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4505 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4507 if (opnd->addr.offset.is_reg)
4508 snprintf (buf, size, "[%s], %s",
4509 style_reg (styler, name),
4510 style_reg (styler, "x%d", opnd->addr.offset.regno));
4511 else
4512 snprintf (buf, size, "[%s], %s",
4513 style_reg (styler, name),
4514 style_imm (styler, "#%d", opnd->addr.offset.imm));
4516 else
4517 snprintf (buf, size, "[%s]", style_reg (styler, name));
4518 break;
4520 case AARCH64_OPND_ADDR_REGOFF:
4521 case AARCH64_OPND_SVE_ADDR_R:
4522 case AARCH64_OPND_SVE_ADDR_RR:
4523 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4524 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4525 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4526 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4527 case AARCH64_OPND_SVE_ADDR_RX:
4528 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4529 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4530 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4531 print_register_offset_address
4532 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4533 get_offset_int_reg_name (opnd), styler);
4534 break;
4536 case AARCH64_OPND_SVE_ADDR_ZX:
4537 print_register_offset_address
4538 (buf, size, opnd,
4539 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4540 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4541 break;
4543 case AARCH64_OPND_SVE_ADDR_RZ:
4544 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4545 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4546 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4547 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4548 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4549 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4550 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4551 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4552 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4553 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4554 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4555 print_register_offset_address
4556 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4557 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4558 styler);
4559 break;
4561 case AARCH64_OPND_ADDR_SIMM7:
4562 case AARCH64_OPND_ADDR_SIMM9:
4563 case AARCH64_OPND_ADDR_SIMM9_2:
4564 case AARCH64_OPND_ADDR_SIMM10:
4565 case AARCH64_OPND_ADDR_SIMM11:
4566 case AARCH64_OPND_ADDR_SIMM13:
4567 case AARCH64_OPND_ADDR_OFFSET:
4568 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4569 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4570 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4571 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4572 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4573 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4574 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4575 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4576 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4577 case AARCH64_OPND_SVE_ADDR_RI_U6:
4578 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4579 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4580 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4581 print_immediate_offset_address
4582 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4583 styler);
4584 break;
4586 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4587 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4588 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4589 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4590 print_immediate_offset_address
4591 (buf, size, opnd,
4592 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4593 styler);
4594 break;
4596 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4597 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4598 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4599 print_register_offset_address
4600 (buf, size, opnd,
4601 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4602 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4603 styler);
4604 break;
4606 case AARCH64_OPND_ADDR_UIMM12:
4607 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4608 if (opnd->addr.offset.imm)
4609 snprintf (buf, size, "[%s, %s]",
4610 style_reg (styler, name),
4611 style_imm (styler, "#%d", opnd->addr.offset.imm));
4612 else
4613 snprintf (buf, size, "[%s]", style_reg (styler, name));
4614 break;
4616 case AARCH64_OPND_SYSREG:
4617 case AARCH64_OPND_SYSREG128:
4618 for (i = 0; aarch64_sys_regs[i].name; ++i)
4620 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4622 bool exact_match
4623 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4624 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4625 && AARCH64_CPU_HAS_ALL_FEATURES (features, sr->features);
4627 /* Try and find an exact match, But if that fails, return the first
4628 partial match that was found. */
4629 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4630 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4631 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs[i].flags)
4632 && (name == NULL || exact_match))
4634 name = aarch64_sys_regs[i].name;
4635 if (exact_match)
4637 if (notes)
4638 *notes = NULL;
4639 break;
4642 /* If we didn't match exactly, that means the presense of a flag
4643 indicates what we didn't want for this instruction. e.g. If
4644 F_REG_READ is there, that means we were looking for a write
4645 register. See aarch64_ext_sysreg. */
4646 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4647 *notes = _("reading from a write-only register");
4648 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4649 *notes = _("writing to a read-only register");
4653 if (name)
4654 snprintf (buf, size, "%s", style_reg (styler, name));
4655 else
4657 /* Implementation defined system register. */
4658 unsigned int value = opnd->sysreg.value;
4659 snprintf (buf, size, "%s",
4660 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4661 (value >> 14) & 0x3, (value >> 11) & 0x7,
4662 (value >> 7) & 0xf, (value >> 3) & 0xf,
4663 value & 0x7));
4665 break;
4667 case AARCH64_OPND_PSTATEFIELD:
4668 for (i = 0; aarch64_pstatefields[i].name; ++i)
4669 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4671 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4672 SVCRZA and SVCRSMZA. */
4673 uint32_t flags = aarch64_pstatefields[i].flags;
4674 if (flags & F_REG_IN_CRM
4675 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4676 != PSTATE_DECODE_CRM (flags)))
4677 continue;
4678 break;
4680 assert (aarch64_pstatefields[i].name);
4681 snprintf (buf, size, "%s",
4682 style_reg (styler, aarch64_pstatefields[i].name));
4683 break;
4685 case AARCH64_OPND_SYSREG_AT:
4686 case AARCH64_OPND_SYSREG_DC:
4687 case AARCH64_OPND_SYSREG_IC:
4688 case AARCH64_OPND_SYSREG_TLBI:
4689 case AARCH64_OPND_SYSREG_SR:
4690 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4691 break;
4693 case AARCH64_OPND_BARRIER:
4694 case AARCH64_OPND_BARRIER_DSB_NXS:
4696 if (opnd->barrier->name[0] == '#')
4697 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4698 else
4699 snprintf (buf, size, "%s",
4700 style_sub_mnem (styler, opnd->barrier->name));
4702 break;
4704 case AARCH64_OPND_BARRIER_ISB:
4705 /* Operand can be omitted, e.g. in DCPS1. */
4706 if (! optional_operand_p (opcode, idx)
4707 || (opnd->barrier->value
4708 != get_optional_operand_default_value (opcode)))
4709 snprintf (buf, size, "%s",
4710 style_imm (styler, "#0x%x", opnd->barrier->value));
4711 break;
4713 case AARCH64_OPND_PRFOP:
4714 if (opnd->prfop->name != NULL)
4715 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4716 else
4717 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4718 opnd->prfop->value));
4719 break;
4721 case AARCH64_OPND_RPRFMOP:
4722 enum_value = opnd->imm.value;
4723 if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
4724 && aarch64_rprfmop_array[enum_value])
4725 snprintf (buf, size, "%s",
4726 style_reg (styler, aarch64_rprfmop_array[enum_value]));
4727 else
4728 snprintf (buf, size, "%s",
4729 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4730 break;
4732 case AARCH64_OPND_BARRIER_PSB:
4733 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4734 break;
4736 case AARCH64_OPND_X16:
4737 snprintf (buf, size, "%s", style_reg (styler, "x16"));
4738 break;
4740 case AARCH64_OPND_SME_ZT0:
4741 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4742 break;
4744 case AARCH64_OPND_SME_ZT0_INDEX:
4745 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4746 style_imm (styler, "%d", (int) opnd->imm.value));
4747 break;
4749 case AARCH64_OPND_SME_ZT0_LIST:
4750 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4751 break;
4753 case AARCH64_OPND_BARRIER_GCSB:
4754 snprintf (buf, size, "%s", style_sub_mnem (styler, "dsync"));
4755 break;
4757 case AARCH64_OPND_BTI_TARGET:
4758 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4759 snprintf (buf, size, "%s",
4760 style_sub_mnem (styler, opnd->hint_option->name));
4761 break;
4763 case AARCH64_OPND_MOPS_ADDR_Rd:
4764 case AARCH64_OPND_MOPS_ADDR_Rs:
4765 snprintf (buf, size, "[%s]!",
4766 style_reg (styler,
4767 get_int_reg_name (opnd->reg.regno,
4768 AARCH64_OPND_QLF_X, 0)));
4769 break;
4771 case AARCH64_OPND_MOPS_WB_Rn:
4772 snprintf (buf, size, "%s!",
4773 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4774 AARCH64_OPND_QLF_X, 0)));
4775 break;
4777 default:
4778 snprintf (buf, size, "<invalid>");
4779 break;
4783 #define CPENC(op0,op1,crn,crm,op2) \
4784 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4785 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4786 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4787 /* for 3.9.10 System Instructions */
4788 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4790 #define C0 0
4791 #define C1 1
4792 #define C2 2
4793 #define C3 3
4794 #define C4 4
4795 #define C5 5
4796 #define C6 6
4797 #define C7 7
4798 #define C8 8
4799 #define C9 9
4800 #define C10 10
4801 #define C11 11
4802 #define C12 12
4803 #define C13 13
4804 #define C14 14
4805 #define C15 15
4807 /* TODO there is one more issues need to be resolved
4808 1. handle cpu-implementation-defined system registers.
4810 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4811 respectively. If neither of these are set then the register is read-write. */
4812 const aarch64_sys_reg aarch64_sys_regs [] =
4814 #define SYSREG(name, encoding, flags, features) \
4815 { name, encoding, flags, features },
4816 #include "aarch64-sys-regs.def"
4817 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES }
4818 #undef SYSREG
4821 bool
4822 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4824 return (reg_flags & F_DEPRECATED) != 0;
4827 bool
4828 aarch64_sys_reg_128bit_p (const uint32_t reg_flags)
4830 return (reg_flags & F_REG_128) != 0;
4833 bool
4834 aarch64_sys_reg_alias_p (const uint32_t reg_flags)
4836 return (reg_flags & F_REG_ALIAS) != 0;
4839 /* The CPENC below is fairly misleading, the fields
4840 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4841 by ins_pstatefield, which just shifts the value by the width of the fields
4842 in a loop. So if you CPENC them only the first value will be set, the rest
4843 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4844 value of 0b110000000001000000 (0x30040) while what you want is
4845 0b011010 (0x1a). */
4846 const aarch64_sys_reg aarch64_pstatefields [] =
4848 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES },
4849 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4850 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES },
4851 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (PAN) },
4852 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_2A) },
4853 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (SSBS) },
4854 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_4A) },
4855 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (MEMTAG) },
4856 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
4857 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4858 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
4859 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4860 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
4861 | F_ARCHEXT, AARCH64_FEATURE (SME) },
4862 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT, AARCH64_FEATURE (V8_8A) },
4863 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES },
4866 bool
4867 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4868 const aarch64_sys_reg *reg)
4870 if (!(reg->flags & F_ARCHEXT))
4871 return true;
4873 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4876 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4878 { "ialluis", CPENS(0,C7,C1,0), 0 },
4879 { "iallu", CPENS(0,C7,C5,0), 0 },
4880 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4881 { 0, CPENS(0,0,0,0), 0 }
4884 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4886 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4887 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4888 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4889 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4890 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4891 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4892 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4893 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4894 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4895 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4896 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4897 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4898 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4899 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4900 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4901 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4902 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4903 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4904 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4905 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4906 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4907 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4908 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4909 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4910 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4911 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4912 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4913 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4914 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
4915 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
4916 { 0, CPENS(0,0,0,0), 0 }
4919 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4921 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4922 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4923 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4924 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4925 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4926 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4927 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4928 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4929 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4930 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4931 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4932 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4933 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4934 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4935 { "s1e1a", CPENS (0, C7, C9, 2), F_HASXT | F_ARCHEXT },
4936 { "s1e2a", CPENS (4, C7, C9, 2), F_HASXT | F_ARCHEXT },
4937 { "s1e3a", CPENS (6, C7, C9, 2), F_HASXT | F_ARCHEXT },
4938 { 0, CPENS(0,0,0,0), 0 }
4941 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4943 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4944 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4945 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4946 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4947 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4948 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4949 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4950 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4951 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4952 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4953 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4954 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4955 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4956 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4957 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4958 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4959 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4960 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4961 { "alle2", CPENS(4,C8,C7,0), 0 },
4962 { "alle2is", CPENS(4,C8,C3,0), 0 },
4963 { "alle1", CPENS(4,C8,C7,4), 0 },
4964 { "alle1is", CPENS(4,C8,C3,4), 0 },
4965 { "alle3", CPENS(6,C8,C7,0), 0 },
4966 { "alle3is", CPENS(6,C8,C3,0), 0 },
4967 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4968 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4969 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4970 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4971 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4972 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4973 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4974 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4976 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4977 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4978 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4979 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4980 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4981 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4982 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4983 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4984 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4985 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4986 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4987 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4988 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4989 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4990 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4991 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4993 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4994 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4995 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4996 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4997 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4998 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4999 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5000 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5001 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5002 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5003 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5004 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5005 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5006 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5007 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5008 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5009 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5010 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5011 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5012 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5013 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5014 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5015 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5016 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5017 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5018 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5019 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5020 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5021 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5022 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5024 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5025 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5026 { "paallos", CPENS (6, C8, C1, 4), 0},
5027 { "paall", CPENS (6, C8, C7, 4), 0},
5029 { "vae1osnxs", CPENS (0, C9, C1, 1), F_HASXT | F_ARCHEXT },
5030 { "vaae1osnxs", CPENS (0, C9, C1, 3), F_HASXT | F_ARCHEXT },
5031 { "vale1osnxs", CPENS (0, C9, C1, 5), F_HASXT | F_ARCHEXT },
5032 { "vaale1osnxs", CPENS (0, C9, C1, 7), F_HASXT | F_ARCHEXT },
5033 { "rvae1isnxs", CPENS (0, C9, C2, 1), F_HASXT | F_ARCHEXT },
5034 { "rvaae1isnxs", CPENS (0, C9, C2, 3), F_HASXT | F_ARCHEXT },
5035 { "rvale1isnxs", CPENS (0, C9, C2, 5), F_HASXT | F_ARCHEXT },
5036 { "rvaale1isnxs", CPENS (0, C9, C2, 7), F_HASXT | F_ARCHEXT },
5037 { "vae1isnxs", CPENS (0, C9, C3, 1), F_HASXT },
5038 { "vaae1isnxs", CPENS (0, C9, C3, 3), F_HASXT },
5039 { "vale1isnxs", CPENS (0, C9, C3, 5), F_HASXT },
5040 { "vaale1isnxs", CPENS (0, C9, C3, 7), F_HASXT },
5041 { "rvae1osnxs", CPENS (0, C9, C5, 1), F_HASXT | F_ARCHEXT },
5042 { "rvaae1osnxs", CPENS (0, C9, C5, 3), F_HASXT | F_ARCHEXT },
5043 { "rvale1osnxs", CPENS (0, C9, C5, 5), F_HASXT | F_ARCHEXT },
5044 { "rvaale1osnxs", CPENS (0, C9, C5, 7), F_HASXT | F_ARCHEXT },
5045 { "rvae1nxs", CPENS (0, C9, C6, 1), F_HASXT | F_ARCHEXT },
5046 { "rvaae1nxs", CPENS (0, C9, C6, 3), F_HASXT | F_ARCHEXT },
5047 { "rvale1nxs", CPENS (0, C9, C6, 5), F_HASXT | F_ARCHEXT },
5048 { "rvaale1nxs", CPENS (0, C9, C6, 7), F_HASXT | F_ARCHEXT },
5049 { "vae1nxs", CPENS (0, C9, C7, 1), F_HASXT },
5050 { "vaae1nxs", CPENS (0, C9, C7, 3), F_HASXT },
5051 { "vale1nxs", CPENS (0, C9, C7, 5), F_HASXT },
5052 { "vaale1nxs", CPENS (0, C9, C7, 7), F_HASXT },
5053 { "ipas2e1isnxs", CPENS (4, C9, C0, 1), F_HASXT },
5054 { "ripas2e1isnxs", CPENS (4, C9, C0, 2), F_HASXT | F_ARCHEXT },
5055 { "ipas2le1isnxs", CPENS (4, C9, C0, 5), F_HASXT },
5056 { "ripas2le1isnxs", CPENS (4, C9, C0, 6), F_HASXT | F_ARCHEXT },
5057 { "vae2osnxs", CPENS (4, C9, C1, 1), F_HASXT | F_ARCHEXT },
5058 { "vale2osnxs", CPENS (4, C9, C1, 5), F_HASXT | F_ARCHEXT },
5059 { "rvae2isnxs", CPENS (4, C9, C2, 1), F_HASXT | F_ARCHEXT },
5060 { "rvale2isnxs", CPENS (4, C9, C2, 5), F_HASXT | F_ARCHEXT },
5061 { "vae2isnxs", CPENS (4, C9, C3, 1), F_HASXT },
5062 { "vale2isnxs", CPENS (4, C9, C3, 5), F_HASXT },
5063 { "ipas2e1osnxs", CPENS (4, C9, C4, 0), F_HASXT | F_ARCHEXT },
5064 { "ipas2e1nxs", CPENS (4, C9, C4, 1), F_HASXT },
5065 { "ripas2e1nxs", CPENS (4, C9, C4, 2), F_HASXT | F_ARCHEXT },
5066 { "ripas2e1osnxs", CPENS (4, C9, C4, 3), F_HASXT | F_ARCHEXT },
5067 { "ipas2le1osnxs", CPENS (4, C9, C4, 4), F_HASXT | F_ARCHEXT },
5068 { "ipas2le1nxs", CPENS (4, C9, C4, 5), F_HASXT },
5069 { "ripas2le1nxs", CPENS (4, C9, C4, 6), F_HASXT | F_ARCHEXT },
5070 { "ripas2le1osnxs", CPENS (4, C9, C4, 7), F_HASXT | F_ARCHEXT },
5071 { "rvae2osnxs", CPENS (4, C9, C5, 1), F_HASXT | F_ARCHEXT },
5072 { "rvale2osnxs", CPENS (4, C9, C5, 5), F_HASXT | F_ARCHEXT },
5073 { "rvae2nxs", CPENS (4, C9, C6, 1), F_HASXT | F_ARCHEXT },
5074 { "rvale2nxs", CPENS (4, C9, C6, 5), F_HASXT | F_ARCHEXT },
5075 { "vae2nxs", CPENS (4, C9, C7, 1), F_HASXT },
5076 { "vale2nxs", CPENS (4, C9, C7, 5), F_HASXT },
5077 { "vae3osnxs", CPENS (6, C9, C1, 1), F_HASXT | F_ARCHEXT },
5078 { "vale3osnxs", CPENS (6, C9, C1, 5), F_HASXT | F_ARCHEXT },
5079 { "rvae3isnxs", CPENS (6, C9, C2, 1), F_HASXT | F_ARCHEXT },
5080 { "rvale3isnxs", CPENS (6, C9, C2, 5), F_HASXT | F_ARCHEXT },
5081 { "vae3isnxs", CPENS (6, C9, C3, 1), F_HASXT },
5082 { "vale3isnxs", CPENS (6, C9, C3, 5), F_HASXT },
5083 { "rvae3osnxs", CPENS (6, C9, C5, 1), F_HASXT | F_ARCHEXT },
5084 { "rvale3osnxs", CPENS (6, C9, C5, 5), F_HASXT | F_ARCHEXT },
5085 { "rvae3nxs", CPENS (6, C9, C6, 1), F_HASXT | F_ARCHEXT },
5086 { "rvale3nxs", CPENS (6, C9, C6, 5), F_HASXT | F_ARCHEXT },
5087 { "vae3nxs", CPENS (6, C9, C7, 1), F_HASXT },
5088 { "vale3nxs", CPENS (6, C9, C7, 5), F_HASXT },
5090 { 0, CPENS(0,0,0,0), 0 }
5093 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5095 /* RCTX is somewhat unique in a way that it has different values
5096 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5097 Thus op2 is masked out and instead encoded directly in the
5098 aarch64_opcode_table entries for the respective instructions. */
5099 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5101 { 0, CPENS(0,0,0,0), 0 }
5104 bool
5105 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5107 return (sys_ins_reg->flags & F_HASXT) != 0;
5110 extern bool
5111 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5112 const char *reg_name,
5113 aarch64_insn reg_value,
5114 uint32_t reg_flags,
5115 const aarch64_feature_set *reg_features)
5117 /* Armv8-R has no EL3. */
5118 if (AARCH64_CPU_HAS_FEATURE (features, V8R))
5120 const char *suffix = strrchr (reg_name, '_');
5121 if (suffix && !strcmp (suffix, "_el3"))
5122 return false;
5125 if (!(reg_flags & F_ARCHEXT))
5126 return true;
5128 if (reg_features
5129 && AARCH64_CPU_HAS_ALL_FEATURES (features, *reg_features))
5130 return true;
5132 /* ARMv8.4 TLB instructions. */
5133 if ((reg_value == CPENS (0, C8, C1, 0)
5134 || reg_value == CPENS (0, C8, C1, 1)
5135 || reg_value == CPENS (0, C8, C1, 2)
5136 || reg_value == CPENS (0, C8, C1, 3)
5137 || reg_value == CPENS (0, C8, C1, 5)
5138 || reg_value == CPENS (0, C8, C1, 7)
5139 || reg_value == CPENS (4, C8, C4, 0)
5140 || reg_value == CPENS (4, C8, C4, 4)
5141 || reg_value == CPENS (4, C8, C1, 1)
5142 || reg_value == CPENS (4, C8, C1, 5)
5143 || reg_value == CPENS (4, C8, C1, 6)
5144 || reg_value == CPENS (6, C8, C1, 1)
5145 || reg_value == CPENS (6, C8, C1, 5)
5146 || reg_value == CPENS (4, C8, C1, 0)
5147 || reg_value == CPENS (4, C8, C1, 4)
5148 || reg_value == CPENS (6, C8, C1, 0)
5149 || reg_value == CPENS (0, C8, C6, 1)
5150 || reg_value == CPENS (0, C8, C6, 3)
5151 || reg_value == CPENS (0, C8, C6, 5)
5152 || reg_value == CPENS (0, C8, C6, 7)
5153 || reg_value == CPENS (0, C8, C2, 1)
5154 || reg_value == CPENS (0, C8, C2, 3)
5155 || reg_value == CPENS (0, C8, C2, 5)
5156 || reg_value == CPENS (0, C8, C2, 7)
5157 || reg_value == CPENS (0, C8, C5, 1)
5158 || reg_value == CPENS (0, C8, C5, 3)
5159 || reg_value == CPENS (0, C8, C5, 5)
5160 || reg_value == CPENS (0, C8, C5, 7)
5161 || reg_value == CPENS (4, C8, C0, 2)
5162 || reg_value == CPENS (4, C8, C0, 6)
5163 || reg_value == CPENS (4, C8, C4, 2)
5164 || reg_value == CPENS (4, C8, C4, 6)
5165 || reg_value == CPENS (4, C8, C4, 3)
5166 || reg_value == CPENS (4, C8, C4, 7)
5167 || reg_value == CPENS (4, C8, C6, 1)
5168 || reg_value == CPENS (4, C8, C6, 5)
5169 || reg_value == CPENS (4, C8, C2, 1)
5170 || reg_value == CPENS (4, C8, C2, 5)
5171 || reg_value == CPENS (4, C8, C5, 1)
5172 || reg_value == CPENS (4, C8, C5, 5)
5173 || reg_value == CPENS (6, C8, C6, 1)
5174 || reg_value == CPENS (6, C8, C6, 5)
5175 || reg_value == CPENS (6, C8, C2, 1)
5176 || reg_value == CPENS (6, C8, C2, 5)
5177 || reg_value == CPENS (6, C8, C5, 1)
5178 || reg_value == CPENS (6, C8, C5, 5))
5179 && AARCH64_CPU_HAS_FEATURE (features, V8_4A))
5180 return true;
5182 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5183 if (reg_value == CPENS (3, C7, C12, 1)
5184 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
5185 return true;
5187 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5188 if (reg_value == CPENS (3, C7, C13, 1)
5189 && AARCH64_CPU_HAS_FEATURE (features, CVADP))
5190 return true;
5192 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5193 if ((reg_value == CPENS (0, C7, C6, 3)
5194 || reg_value == CPENS (0, C7, C6, 4)
5195 || reg_value == CPENS (0, C7, C10, 4)
5196 || reg_value == CPENS (0, C7, C14, 4)
5197 || reg_value == CPENS (3, C7, C10, 3)
5198 || reg_value == CPENS (3, C7, C12, 3)
5199 || reg_value == CPENS (3, C7, C13, 3)
5200 || reg_value == CPENS (3, C7, C14, 3)
5201 || reg_value == CPENS (3, C7, C4, 3)
5202 || reg_value == CPENS (0, C7, C6, 5)
5203 || reg_value == CPENS (0, C7, C6, 6)
5204 || reg_value == CPENS (0, C7, C10, 6)
5205 || reg_value == CPENS (0, C7, C14, 6)
5206 || reg_value == CPENS (3, C7, C10, 5)
5207 || reg_value == CPENS (3, C7, C12, 5)
5208 || reg_value == CPENS (3, C7, C13, 5)
5209 || reg_value == CPENS (3, C7, C14, 5)
5210 || reg_value == CPENS (3, C7, C4, 4))
5211 && AARCH64_CPU_HAS_FEATURE (features, MEMTAG))
5212 return true;
5214 if ((reg_value == CPENS (0, C9, C1, 1)
5215 || reg_value == CPENS (0, C9, C1, 3)
5216 || reg_value == CPENS (0, C9, C1, 5)
5217 || reg_value == CPENS (0, C9, C1, 7)
5218 || reg_value == CPENS (0, C9, C2, 1)
5219 || reg_value == CPENS (0, C9, C2, 3)
5220 || reg_value == CPENS (0, C9, C2, 5)
5221 || reg_value == CPENS (0, C9, C2, 7)
5222 || reg_value == CPENS (0, C9, C3, 1)
5223 || reg_value == CPENS (0, C9, C3, 3)
5224 || reg_value == CPENS (0, C9, C3, 5)
5225 || reg_value == CPENS (0, C9, C3, 7)
5226 || reg_value == CPENS (0, C9, C5, 1)
5227 || reg_value == CPENS (0, C9, C5, 3)
5228 || reg_value == CPENS (0, C9, C5, 5)
5229 || reg_value == CPENS (0, C9, C5, 7)
5230 || reg_value == CPENS (0, C9, C6, 1)
5231 || reg_value == CPENS (0, C9, C6, 3)
5232 || reg_value == CPENS (0, C9, C6, 5)
5233 || reg_value == CPENS (0, C9, C6, 7)
5234 || reg_value == CPENS (0, C9, C7, 1)
5235 || reg_value == CPENS (0, C9, C7, 3)
5236 || reg_value == CPENS (0, C9, C7, 5)
5237 || reg_value == CPENS (0, C9, C7, 7)
5238 || reg_value == CPENS (4, C9, C0, 1)
5239 || reg_value == CPENS (4, C9, C0, 2)
5240 || reg_value == CPENS (4, C9, C0, 5)
5241 || reg_value == CPENS (4, C9, C0, 6)
5242 || reg_value == CPENS (4, C9, C1, 1)
5243 || reg_value == CPENS (4, C9, C1, 5)
5244 || reg_value == CPENS (4, C9, C2, 1)
5245 || reg_value == CPENS (4, C9, C2, 5)
5246 || reg_value == CPENS (4, C9, C3, 1)
5247 || reg_value == CPENS (4, C9, C3, 5)
5248 || reg_value == CPENS (4, C9, C4, 0)
5249 || reg_value == CPENS (4, C9, C4, 1)
5250 || reg_value == CPENS (4, C9, C4, 2)
5251 || reg_value == CPENS (4, C9, C4, 3)
5252 || reg_value == CPENS (4, C9, C4, 4)
5253 || reg_value == CPENS (4, C9, C4, 5)
5254 || reg_value == CPENS (4, C9, C4, 6)
5255 || reg_value == CPENS (4, C9, C4, 7)
5256 || reg_value == CPENS (4, C9, C5, 1)
5257 || reg_value == CPENS (4, C9, C5, 5)
5258 || reg_value == CPENS (4, C9, C6, 1)
5259 || reg_value == CPENS (4, C9, C6, 5)
5260 || reg_value == CPENS (4, C9, C7, 1)
5261 || reg_value == CPENS (4, C9, C7, 5)
5262 || reg_value == CPENS (6, C9, C1, 1)
5263 || reg_value == CPENS (6, C9, C1, 5)
5264 || reg_value == CPENS (6, C9, C2, 1)
5265 || reg_value == CPENS (6, C9, C2, 5)
5266 || reg_value == CPENS (6, C9, C3, 1)
5267 || reg_value == CPENS (6, C9, C3, 5)
5268 || reg_value == CPENS (6, C9, C5, 1)
5269 || reg_value == CPENS (6, C9, C5, 5)
5270 || reg_value == CPENS (6, C9, C6, 1)
5271 || reg_value == CPENS (6, C9, C6, 5)
5272 || reg_value == CPENS (6, C9, C7, 1)
5273 || reg_value == CPENS (6, C9, C7, 5))
5274 && AARCH64_CPU_HAS_FEATURE (features, XS))
5275 return true;
5277 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5278 if ((reg_value == CPENS (0, C7, C9, 0)
5279 || reg_value == CPENS (0, C7, C9, 1))
5280 && AARCH64_CPU_HAS_FEATURE (features, V8_2A))
5281 return true;
5283 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5284 if (reg_value == CPENS (3, C7, C3, 0)
5285 && AARCH64_CPU_HAS_FEATURE (features, PREDRES))
5286 return true;
5288 if ((reg_value == CPENC (3,0,13,0,3)
5289 || reg_value == CPENC (3,0,13,0,6))
5290 && AARCH64_CPU_HAS_FEATURE (features, THE))
5291 return true;
5293 if ((reg_value == CPENS (0, C7, C9, 2)
5294 || reg_value == CPENS (4, C7, C9, 2)
5295 || reg_value == CPENS (6, C7, C9, 2))
5296 && AARCH64_CPU_HAS_FEATURE (features, ATS1A))
5297 return true;
5299 return false;
5302 #undef C0
5303 #undef C1
5304 #undef C2
5305 #undef C3
5306 #undef C4
5307 #undef C5
5308 #undef C6
5309 #undef C7
5310 #undef C8
5311 #undef C9
5312 #undef C10
5313 #undef C11
5314 #undef C12
5315 #undef C13
5316 #undef C14
5317 #undef C15
5319 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5320 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5322 static enum err_type
5323 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5324 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5325 bool encoding ATTRIBUTE_UNUSED,
5326 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5327 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5329 int t = BITS (insn, 4, 0);
5330 int n = BITS (insn, 9, 5);
5331 int t2 = BITS (insn, 14, 10);
5333 if (BIT (insn, 23))
5335 /* Write back enabled. */
5336 if ((t == n || t2 == n) && n != 31)
5337 return ERR_UND;
5340 if (BIT (insn, 22))
5342 /* Load */
5343 if (t == t2)
5344 return ERR_UND;
5347 return ERR_OK;
5350 /* Verifier for vector by element 3 operands functions where the
5351 conditions `if sz:L == 11 then UNDEFINED` holds. */
5353 static enum err_type
5354 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5355 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5356 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5357 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5359 const aarch64_insn undef_pattern = 0x3;
5360 aarch64_insn value;
5362 assert (inst->opcode);
5363 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5364 value = encoding ? inst->value : insn;
5365 assert (value);
5367 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5368 return ERR_UND;
5370 return ERR_OK;
5373 /* Check an instruction that takes three register operands and that
5374 requires the register numbers to be distinct from one another. */
5376 static enum err_type
5377 verify_three_different_regs (const struct aarch64_inst *inst,
5378 const aarch64_insn insn ATTRIBUTE_UNUSED,
5379 bfd_vma pc ATTRIBUTE_UNUSED,
5380 bool encoding ATTRIBUTE_UNUSED,
5381 aarch64_operand_error *mismatch_detail
5382 ATTRIBUTE_UNUSED,
5383 aarch64_instr_sequence *insn_sequence
5384 ATTRIBUTE_UNUSED)
5386 int rd, rs, rn;
5388 rd = inst->operands[0].reg.regno;
5389 rs = inst->operands[1].reg.regno;
5390 rn = inst->operands[2].reg.regno;
5391 if (rd == rs || rd == rn || rs == rn)
5393 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5394 mismatch_detail->error
5395 = _("the three register operands must be distinct from one another");
5396 mismatch_detail->index = -1;
5397 return ERR_UND;
5400 return ERR_OK;
5403 /* Add INST to the end of INSN_SEQUENCE. */
5405 static void
5406 add_insn_to_sequence (const struct aarch64_inst *inst,
5407 aarch64_instr_sequence *insn_sequence)
5409 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5412 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5413 If INST is NULL the given insn_sequence is cleared and the sequence is left
5414 uninitialized. */
5416 void
5417 init_insn_sequence (const struct aarch64_inst *inst,
5418 aarch64_instr_sequence *insn_sequence)
5420 int num_req_entries = 0;
5422 if (insn_sequence->instr)
5424 XDELETE (insn_sequence->instr);
5425 insn_sequence->instr = NULL;
5428 /* Handle all the cases here. May need to think of something smarter than
5429 a giant if/else chain if this grows. At that time, a lookup table may be
5430 best. */
5431 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5432 num_req_entries = 1;
5433 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5434 num_req_entries = 2;
5436 insn_sequence->num_added_insns = 0;
5437 insn_sequence->num_allocated_insns = num_req_entries;
5439 if (num_req_entries != 0)
5441 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5442 add_insn_to_sequence (inst, insn_sequence);
5446 /* Subroutine of verify_constraints. Check whether the instruction
5447 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5448 expectations are met. Return true if the check passes, otherwise
5449 describe the problem in MISMATCH_DETAIL.
5451 IS_NEW_SECTION is true if INST is assumed to start a new section.
5452 The other arguments are as for verify_constraints. */
5454 static bool
5455 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5456 bool is_new_section,
5457 aarch64_operand_error *mismatch_detail,
5458 aarch64_instr_sequence *insn_sequence)
5460 const struct aarch64_opcode *opcode;
5461 const struct aarch64_inst *prev_insn;
5462 int i;
5464 opcode = inst->opcode;
5465 if (insn_sequence->instr)
5466 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5467 else
5468 prev_insn = NULL;
5470 if (prev_insn
5471 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5472 && prev_insn->opcode != opcode - 1)
5474 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5475 mismatch_detail->error = NULL;
5476 mismatch_detail->index = -1;
5477 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5478 mismatch_detail->data[1].s = prev_insn->opcode->name;
5479 mismatch_detail->non_fatal = true;
5480 return false;
5483 if (opcode->constraints & C_SCAN_MOPS_PME)
5485 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5487 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5488 mismatch_detail->error = NULL;
5489 mismatch_detail->index = -1;
5490 mismatch_detail->data[0].s = opcode->name;
5491 mismatch_detail->data[1].s = opcode[-1].name;
5492 mismatch_detail->non_fatal = true;
5493 return false;
5496 for (i = 0; i < 3; ++i)
5497 /* There's no specific requirement for the data register to be
5498 the same between consecutive SET* instructions. */
5499 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5500 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5501 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5502 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5504 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5505 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5506 mismatch_detail->error = _("destination register differs from "
5507 "preceding instruction");
5508 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5509 mismatch_detail->error = _("source register differs from "
5510 "preceding instruction");
5511 else
5512 mismatch_detail->error = _("size register differs from "
5513 "preceding instruction");
5514 mismatch_detail->index = i;
5515 mismatch_detail->non_fatal = true;
5516 return false;
5520 return true;
5523 /* This function verifies that the instruction INST adheres to its specified
5524 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5525 returned and MISMATCH_DETAIL contains the reason why verification failed.
5527 The function is called both during assembly and disassembly. If assembling
5528 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5529 and will contain the PC of the current instruction w.r.t to the section.
5531 If ENCODING and PC=0 then you are at a start of a section. The constraints
5532 are verified against the given state insn_sequence which is updated as it
5533 transitions through the verification. */
5535 enum err_type
5536 verify_constraints (const struct aarch64_inst *inst,
5537 const aarch64_insn insn ATTRIBUTE_UNUSED,
5538 bfd_vma pc,
5539 bool encoding,
5540 aarch64_operand_error *mismatch_detail,
5541 aarch64_instr_sequence *insn_sequence)
5543 assert (inst);
5544 assert (inst->opcode);
5546 const struct aarch64_opcode *opcode = inst->opcode;
5547 if (!opcode->constraints && !insn_sequence->instr)
5548 return ERR_OK;
5550 assert (insn_sequence);
5552 enum err_type res = ERR_OK;
5554 /* This instruction puts a constraint on the insn_sequence. */
5555 if (opcode->flags & F_SCAN)
5557 if (insn_sequence->instr)
5559 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5560 mismatch_detail->error = _("instruction opens new dependency "
5561 "sequence without ending previous one");
5562 mismatch_detail->index = -1;
5563 mismatch_detail->non_fatal = true;
5564 res = ERR_VFI;
5567 init_insn_sequence (inst, insn_sequence);
5568 return res;
5571 bool is_new_section = (!encoding && pc == 0);
5572 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5573 insn_sequence))
5575 res = ERR_VFI;
5576 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5577 init_insn_sequence (NULL, insn_sequence);
5580 /* Verify constraints on an existing sequence. */
5581 if (insn_sequence->instr)
5583 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5584 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5585 closed a previous one that we should have. */
5586 if (is_new_section && res == ERR_OK)
5588 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5589 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5590 mismatch_detail->index = -1;
5591 mismatch_detail->non_fatal = true;
5592 res = ERR_VFI;
5593 /* Reset the sequence. */
5594 init_insn_sequence (NULL, insn_sequence);
5595 return res;
5598 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5599 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5601 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5602 instruction for better error messages. */
5603 if (!opcode->avariant
5604 || (!AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
5605 && !AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2)))
5607 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5608 mismatch_detail->error = _("SVE instruction expected after "
5609 "`movprfx'");
5610 mismatch_detail->index = -1;
5611 mismatch_detail->non_fatal = true;
5612 res = ERR_VFI;
5613 goto done;
5616 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5617 instruction that is allowed to be used with a MOVPRFX. */
5618 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5620 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5621 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5622 "expected");
5623 mismatch_detail->index = -1;
5624 mismatch_detail->non_fatal = true;
5625 res = ERR_VFI;
5626 goto done;
5629 /* Next check for usage of the predicate register. */
5630 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5631 aarch64_opnd_info blk_pred, inst_pred;
5632 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5633 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5634 bool predicated = false;
5635 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5637 /* Determine if the movprfx instruction used is predicated or not. */
5638 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5640 predicated = true;
5641 blk_pred = insn_sequence->instr->operands[1];
5644 unsigned char max_elem_size = 0;
5645 unsigned char current_elem_size;
5646 int num_op_used = 0, last_op_usage = 0;
5647 int i, inst_pred_idx = -1;
5648 int num_ops = aarch64_num_of_operands (opcode);
5649 for (i = 0; i < num_ops; i++)
5651 aarch64_opnd_info inst_op = inst->operands[i];
5652 switch (inst_op.type)
5654 case AARCH64_OPND_SVE_Zd:
5655 case AARCH64_OPND_SVE_Zm_5:
5656 case AARCH64_OPND_SVE_Zm_16:
5657 case AARCH64_OPND_SVE_Zn:
5658 case AARCH64_OPND_SVE_Zt:
5659 case AARCH64_OPND_SVE_Vm:
5660 case AARCH64_OPND_SVE_Vn:
5661 case AARCH64_OPND_Va:
5662 case AARCH64_OPND_Vn:
5663 case AARCH64_OPND_Vm:
5664 case AARCH64_OPND_Sn:
5665 case AARCH64_OPND_Sm:
5666 if (inst_op.reg.regno == blk_dest.reg.regno)
5668 num_op_used++;
5669 last_op_usage = i;
5671 current_elem_size
5672 = aarch64_get_qualifier_esize (inst_op.qualifier);
5673 if (current_elem_size > max_elem_size)
5674 max_elem_size = current_elem_size;
5675 break;
5676 case AARCH64_OPND_SVE_Pd:
5677 case AARCH64_OPND_SVE_Pg3:
5678 case AARCH64_OPND_SVE_Pg4_5:
5679 case AARCH64_OPND_SVE_Pg4_10:
5680 case AARCH64_OPND_SVE_Pg4_16:
5681 case AARCH64_OPND_SVE_Pm:
5682 case AARCH64_OPND_SVE_Pn:
5683 case AARCH64_OPND_SVE_Pt:
5684 case AARCH64_OPND_SME_Pm:
5685 inst_pred = inst_op;
5686 inst_pred_idx = i;
5687 break;
5688 default:
5689 break;
5693 assert (max_elem_size != 0);
5694 aarch64_opnd_info inst_dest = inst->operands[0];
5695 /* Determine the size that should be used to compare against the
5696 movprfx size. */
5697 current_elem_size
5698 = opcode->constraints & C_MAX_ELEM
5699 ? max_elem_size
5700 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5702 /* If movprfx is predicated do some extra checks. */
5703 if (predicated)
5705 /* The instruction must be predicated. */
5706 if (inst_pred_idx < 0)
5708 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5709 mismatch_detail->error = _("predicated instruction expected "
5710 "after `movprfx'");
5711 mismatch_detail->index = -1;
5712 mismatch_detail->non_fatal = true;
5713 res = ERR_VFI;
5714 goto done;
5717 /* The instruction must have a merging predicate. */
5718 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5720 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5721 mismatch_detail->error = _("merging predicate expected due "
5722 "to preceding `movprfx'");
5723 mismatch_detail->index = inst_pred_idx;
5724 mismatch_detail->non_fatal = true;
5725 res = ERR_VFI;
5726 goto done;
5729 /* The same register must be used in instruction. */
5730 if (blk_pred.reg.regno != inst_pred.reg.regno)
5732 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5733 mismatch_detail->error = _("predicate register differs "
5734 "from that in preceding "
5735 "`movprfx'");
5736 mismatch_detail->index = inst_pred_idx;
5737 mismatch_detail->non_fatal = true;
5738 res = ERR_VFI;
5739 goto done;
5743 /* Destructive operations by definition must allow one usage of the
5744 same register. */
5745 int allowed_usage
5746 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5748 /* Operand is not used at all. */
5749 if (num_op_used == 0)
5751 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5752 mismatch_detail->error = _("output register of preceding "
5753 "`movprfx' not used in current "
5754 "instruction");
5755 mismatch_detail->index = 0;
5756 mismatch_detail->non_fatal = true;
5757 res = ERR_VFI;
5758 goto done;
5761 /* We now know it's used, now determine exactly where it's used. */
5762 if (blk_dest.reg.regno != inst_dest.reg.regno)
5764 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5765 mismatch_detail->error = _("output register of preceding "
5766 "`movprfx' expected as output");
5767 mismatch_detail->index = 0;
5768 mismatch_detail->non_fatal = true;
5769 res = ERR_VFI;
5770 goto done;
5773 /* Operand used more than allowed for the specific opcode type. */
5774 if (num_op_used > allowed_usage)
5776 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5777 mismatch_detail->error = _("output register of preceding "
5778 "`movprfx' used as input");
5779 mismatch_detail->index = last_op_usage;
5780 mismatch_detail->non_fatal = true;
5781 res = ERR_VFI;
5782 goto done;
5785 /* Now the only thing left is the qualifiers checks. The register
5786 must have the same maximum element size. */
5787 if (inst_dest.qualifier
5788 && blk_dest.qualifier
5789 && current_elem_size
5790 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5792 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5793 mismatch_detail->error = _("register size not compatible with "
5794 "previous `movprfx'");
5795 mismatch_detail->index = 0;
5796 mismatch_detail->non_fatal = true;
5797 res = ERR_VFI;
5798 goto done;
5802 done:
5803 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
5804 /* We've checked the last instruction in the sequence and so
5805 don't need the sequence any more. */
5806 init_insn_sequence (NULL, insn_sequence);
5807 else
5808 add_insn_to_sequence (inst, insn_sequence);
5811 return res;
5815 /* Return true if VALUE cannot be moved into an SVE register using DUP
5816 (with any element size, not just ESIZE) and if using DUPM would
5817 therefore be OK. ESIZE is the number of bytes in the immediate. */
5819 bool
5820 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5822 int64_t svalue = uvalue;
5823 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5825 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5826 return false;
5827 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5829 svalue = (int32_t) uvalue;
5830 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5832 svalue = (int16_t) uvalue;
5833 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5834 return false;
5837 if ((svalue & 0xff) == 0)
5838 svalue /= 256;
5839 return svalue < -128 || svalue >= 128;
5842 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5843 supports the instruction described by INST. */
5845 bool
5846 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant,
5847 aarch64_inst *inst)
5849 if (!inst->opcode->avariant
5850 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
5851 return false;
5853 if (inst->opcode->iclass == sme_fp_sd
5854 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5855 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_F64F64))
5856 return false;
5858 if (inst->opcode->iclass == sme_int_sd
5859 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
5860 && !AARCH64_CPU_HAS_FEATURE (cpu_variant, SME_I16I64))
5861 return false;
5863 return true;
5866 /* Include the opcode description table as well as the operand description
5867 table. */
5868 #define VERIFIER(x) verify_##x
5869 #include "aarch64-tbl.h"