1 # AArch32 VFP instruction descriptions (conditional insns)
3 # Copyright (c) 2019 Linaro, Ltd
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 # This file is processed by scripts/decodetree.py
21 # Encodings for the conditional VFP instructions are here:
22 # generally anything matching A32
23 # cccc 11.. .... .... .... 101. .... ....
25 # 1110 110. .... .... .... 101. .... ....
26 # 1110 1110 .... .... .... 101. .... ....
27 # (but those patterns might also cover some Neon instructions,
28 # which do not live in this file.)
30 # VFP registers have an odd encoding with a four-bit field
31 # and a one-bit field which are assembled in different orders
32 # depending on whether the register is double or single precision.
33 # Each individual instruction function must do the checks for
34 # "double register selected but CPU does not have double support"
35 # and "double register number has bit 4 set but CPU does not
36 # support D16-D31" (which should UNDEF).
49 @vfp_dnm_s ................................ vm=%vm_sp vn=%vn_sp vd=%vd_sp
50 @vfp_dnm_d ................................ vm=%vm_dp vn=%vn_dp vd=%vd_dp
52 @vfp_dm_ss ................................ vm=%vm_sp vd=%vd_sp
53 @vfp_dm_dd ................................ vm=%vm_dp vd=%vd_dp
54 @vfp_dm_ds ................................ vm=%vm_sp vd=%vd_dp
55 @vfp_dm_sd ................................ vm=%vm_dp vd=%vd_sp
57 # VMOV scalar to general-purpose register; note that this does
58 # include some Neon cases.
59 VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
60 vn=%vn_dp size=0 index=%vmov_idx_b
61 VMOV_to_gp ---- 1110 u:1 0. 1 .... rt:4 1011 ..1 1 0000 \
62 vn=%vn_dp size=1 index=%vmov_idx_h
63 VMOV_to_gp ---- 1110 0 0 index:1 1 .... rt:4 1011 .00 1 0000 \
66 VMOV_from_gp ---- 1110 0 1. 0 .... rt:4 1011 ... 1 0000 \
67 vn=%vn_dp size=0 index=%vmov_idx_b
68 VMOV_from_gp ---- 1110 0 0. 0 .... rt:4 1011 ..1 1 0000 \
69 vn=%vn_dp size=1 index=%vmov_idx_h
70 VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
73 VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
76 VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
77 VMOV_half ---- 1110 000 l:1 .... rt:4 1001 . 001 0000 vn=%vn_sp
78 VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 vn=%vn_sp
80 VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... vm=%vm_sp
81 VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... vm=%vm_dp
83 VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
84 VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
85 VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
87 # We split the load/store multiple up into two patterns to avoid
88 # overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
90 # P=0 U=0 W=0 is 64-bit VMOV
91 # P=1 W=0 is VLDR/VSTR
93 # leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple.
94 # These include FSTM/FLDM.
95 VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \
97 VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \
100 VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
101 vd=%vd_sp p=1 u=0 w=1
102 VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
103 vd=%vd_dp p=1 u=0 w=1
105 # 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
106 VMLA_hp ---- 1110 0.00 .... .... 1001 .0.0 .... @vfp_dnm_s
107 VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... @vfp_dnm_s
108 VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... @vfp_dnm_d
110 VMLS_hp ---- 1110 0.00 .... .... 1001 .1.0 .... @vfp_dnm_s
111 VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... @vfp_dnm_s
112 VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... @vfp_dnm_d
114 VNMLS_hp ---- 1110 0.01 .... .... 1001 .0.0 .... @vfp_dnm_s
115 VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... @vfp_dnm_s
116 VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... @vfp_dnm_d
118 VNMLA_hp ---- 1110 0.01 .... .... 1001 .1.0 .... @vfp_dnm_s
119 VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... @vfp_dnm_s
120 VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... @vfp_dnm_d
122 VMUL_hp ---- 1110 0.10 .... .... 1001 .0.0 .... @vfp_dnm_s
123 VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... @vfp_dnm_s
124 VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... @vfp_dnm_d
126 VNMUL_hp ---- 1110 0.10 .... .... 1001 .1.0 .... @vfp_dnm_s
127 VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... @vfp_dnm_s
128 VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... @vfp_dnm_d
130 VADD_hp ---- 1110 0.11 .... .... 1001 .0.0 .... @vfp_dnm_s
131 VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... @vfp_dnm_s
132 VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... @vfp_dnm_d
134 VSUB_hp ---- 1110 0.11 .... .... 1001 .1.0 .... @vfp_dnm_s
135 VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... @vfp_dnm_s
136 VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... @vfp_dnm_d
138 VDIV_hp ---- 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
139 VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
140 VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
142 VFMA_hp ---- 1110 1.10 .... .... 1001 .0. 0 .... @vfp_dnm_s
143 VFMS_hp ---- 1110 1.10 .... .... 1001 .1. 0 .... @vfp_dnm_s
144 VFNMA_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s
145 VFNMS_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s
147 VFMA_sp ---- 1110 1.10 .... .... 1010 .0. 0 .... @vfp_dnm_s
148 VFMS_sp ---- 1110 1.10 .... .... 1010 .1. 0 .... @vfp_dnm_s
149 VFNMA_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s
150 VFNMS_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s
152 VFMA_dp ---- 1110 1.10 .... .... 1011 .0.0 .... @vfp_dnm_d
153 VFMS_dp ---- 1110 1.10 .... .... 1011 .1.0 .... @vfp_dnm_d
154 VFNMA_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d
155 VFNMS_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d
157 VMOV_imm_hp ---- 1110 1.11 .... .... 1001 0000 .... \
158 vd=%vd_sp imm=%vmov_imm
159 VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
160 vd=%vd_sp imm=%vmov_imm
161 VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
162 vd=%vd_dp imm=%vmov_imm
164 VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... @vfp_dm_ss
165 VMOV_reg_dp ---- 1110 1.11 0000 .... 1011 01.0 .... @vfp_dm_dd
167 VABS_hp ---- 1110 1.11 0000 .... 1001 11.0 .... @vfp_dm_ss
168 VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... @vfp_dm_ss
169 VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... @vfp_dm_dd
171 VNEG_hp ---- 1110 1.11 0001 .... 1001 01.0 .... @vfp_dm_ss
172 VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... @vfp_dm_ss
173 VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... @vfp_dm_dd
175 VSQRT_hp ---- 1110 1.11 0001 .... 1001 11.0 .... @vfp_dm_ss
176 VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... @vfp_dm_ss
177 VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... @vfp_dm_dd
179 VCMP_hp ---- 1110 1.11 010 z:1 .... 1001 e:1 1.0 .... \
181 VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
183 VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
186 # VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp
187 VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
189 VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
192 # VCVTB and VCVTT to f16: Vd format is always vd_sp;
193 # Vm format depends on size bit
194 VCVT_b16_f32 ---- 1110 1.11 0011 .... 1001 t:1 1.0 .... \
196 VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
198 VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
201 VRINTR_hp ---- 1110 1.11 0110 .... 1001 01.0 .... @vfp_dm_ss
202 VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... @vfp_dm_ss
203 VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... @vfp_dm_dd
205 VRINTZ_hp ---- 1110 1.11 0110 .... 1001 11.0 .... @vfp_dm_ss
206 VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... @vfp_dm_ss
207 VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... @vfp_dm_dd
209 VRINTX_hp ---- 1110 1.11 0111 .... 1001 01.0 .... @vfp_dm_ss
210 VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... @vfp_dm_ss
211 VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... @vfp_dm_dd
213 # VCVT between single and double:
214 # Vm precision depends on size; Vd is its reverse
215 VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... @vfp_dm_ds
216 VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... @vfp_dm_sd
218 # VCVT from integer to floating point: Vm always single; Vd depends on size
219 VCVT_int_hp ---- 1110 1.11 1000 .... 1001 s:1 1.0 .... \
221 VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
223 VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
226 # VJCVT is always dp to sp
227 VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... @vfp_dm_sd
229 # VCVT between floating-point and fixed-point. The immediate value
230 # is in the same format as a Vm single-precision register number.
231 # We assemble bits 18 (op), 16 (u) and 7 (sx) into a single opc field
232 # for the convenience of the trans_VCVT_fix functions.
233 %vcvt_fix_op 18:1 16:1 7:1
234 VCVT_fix_hp ---- 1110 1.11 1.1. .... 1001 .1.0 .... \
235 vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
236 VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \
237 vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
238 VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
239 vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op
241 # VCVT float to integer (VCVT and VCVTR): Vd always single; Vd depends on size
242 VCVT_hp_int ---- 1110 1.11 110 s:1 .... 1001 rz:1 1.0 .... \
244 VCVT_sp_int ---- 1110 1.11 110 s:1 .... 1010 rz:1 1.0 .... \
246 VCVT_dp_int ---- 1110 1.11 110 s:1 .... 1011 rz:1 1.0 .... \