1 ;; Instruction Classification for ARM for GNU compiler.
3 ;; Copyright (C) 1991-2018 Free Software Foundation, Inc.
4 ;; Contributed by ARM Ltd.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify it
9 ;; under the terms of the GNU General Public License as published
10 ;; by the Free Software Foundation; either version 3, or (at your
11 ;; option) any later version.
13 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
14 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 ;; License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
22 ; TYPE attribute is used to classify instructions for use in scheduling.
24 ; Instruction classification:
26 ; adc_imm add/subtract with carry and with an immediate operand.
27 ; adc_reg add/subtract with carry and no immediate operand.
28 ; adcs_imm as adc_imm, setting condition flags.
29 ; adcs_reg as adc_reg, setting condition flags.
30 ; adr calculate address.
31 ; alu_ext From ARMv8-A: any arithmetic instruction that has a
35 ; alu_imm any arithmetic instruction that doesn't have a shifted
36 ; operand and has an immediate operand. This
37 ; excludes MOV, MVN and RSB(S) immediate.
38 ; alu_sreg any arithmetic instruction that doesn't have a shifted
39 ; or an immediate operand. This excludes
40 ; MOV and MVN but includes MOVT. This also excludes
41 ; DSP-kind instructions. This is also the default.
42 ; alu_shift_imm any arithmetic instruction that has a source operand
43 ; shifted by a constant. This excludes simple shifts.
44 ; alu_shift_reg as alu_shift_imm, with the shift amount specified in a
46 ; alu_dsp_reg any DSP-kind instruction like QSUB8.
47 ; alus_ext From ARMv8-A: as alu_ext, setting condition flags.
49 ; alus_imm as alu_imm, setting condition flags.
50 ; alus_sreg as alu_sreg, setting condition flags.
51 ; alus_shift_imm as alu_shift_imm, setting condition flags.
52 ; alus_shift_reg as alu_shift_reg, setting condition flags.
53 ; bfm bitfield move operation.
54 ; bfx bitfield extract operation.
55 ; block blockage insn, this blocks all functional units.
57 ; call subroutine call.
58 ; clz count leading zeros (CLZ).
59 ; csel From ARMv8-A: conditional select.
60 ; extend extend instruction (SXTB, SXTH, UXTB, UXTH).
61 ; f_cvt conversion between float representations.
62 ; f_cvtf2i conversion between float and integral types.
63 ; f_cvti2f conversion between integral and float types.
64 ; f_flag transfer of co-processor flags to the CPSR.
65 ; f_load[d,s] double/single load from memory. Used for VFP unit.
66 ; f_mcr transfer arm to vfp reg.
67 ; f_mcrr transfer two arm regs to vfp reg.
68 ; f_minmax[d,s] double/single floating point minimum/maximum.
69 ; f_mrc transfer vfp to arm reg.
70 ; f_mrrc transfer vfp to two arm regs.
71 ; f_rint[d,s] double/single floating point rount to integral.
72 ; f_store[d,s] double/single store to memory. Used for VFP unit.
73 ; fadd[d,s] double/single floating-point scalar addition.
74 ; fccmp[d,s] From ARMv8-A: floating-point conditional compare.
75 ; fcmp[d,s] double/single floating-point compare.
76 ; fconst[d,s] double/single load immediate.
77 ; fcsel From ARMv8-A: Floating-point conditional select.
78 ; fdiv[d,s] double/single precision floating point division.
79 ; ffarith[d,s] double/single floating point abs/neg/cpy.
80 ; ffma[d,s] double/single floating point fused multiply-accumulate.
81 ; float floating point arithmetic operation.
82 ; fmac[d,s] double/single floating point multiply-accumulate.
83 ; fmov floating point to floating point register move.
84 ; fmul[d,s] double/single floating point multiply.
85 ; fsqrt[d,s] double/single precision floating point square root.
86 ; load_acq load-acquire.
87 ; load_byte load 1 byte from memory.
88 ; load_4 load 4 bytes from memory.
89 ; load_8 load 8 bytes from memory.
90 ; load_12 load 12 bytes from memory.
91 ; load_16 load 16 bytes from memory.
92 ; logic_imm any logical instruction that doesn't have a shifted
93 ; operand and has an immediate operand.
94 ; logic_reg any logical instruction that doesn't have a shifted
95 ; operand or an immediate operand.
96 ; logic_shift_imm any logical instruction that has a source operand
97 ; shifted by a constant. This excludes simple shifts.
98 ; logic_shift_reg as logic_shift_imm, with the shift amount specified in a
100 ; logics_imm as logic_imm, setting condition flags.
101 ; logics_reg as logic_reg, setting condition flags.
102 ; logics_shift_imm as logic_shift_imm, setting condition flags.
103 ; logics_shift_reg as logic_shift_reg, setting condition flags.
104 ; mla integer multiply accumulate.
105 ; mlas integer multiply accumulate, flag setting.
106 ; mov_imm simple MOV instruction that moves an immediate to
107 ; register. This includes MOVW, but not MOVT.
108 ; mov_reg simple MOV instruction that moves a register to another
109 ; register. This includes MOVW, but not MOVT.
110 ; mov_shift simple MOV instruction, shifted operand by a constant.
111 ; mov_shift_reg simple MOV instruction, shifted operand by a register.
112 ; mrs system/special/co-processor register move.
113 ; mul integer multiply.
114 ; muls integer multiply, flag setting.
115 ; multiple more than one instruction, candidate for future
116 ; splitting, or better modeling.
117 ; mvn_imm inverting move instruction, immediate.
118 ; mvn_reg inverting move instruction, register.
119 ; mvn_shift inverting move instruction, shifted operand by a constant.
120 ; mvn_shift_reg inverting move instruction, shifted operand by a register.
121 ; no_insn an insn which does not represent an instruction in the
122 ; final output, thus having no impact on scheduling.
125 ; rotate_imm rotate by immediate.
126 ; sdiv signed division.
127 ; shift_imm simple shift operation (LSL, LSR, ASR, ROR) with an
129 ; shift_reg simple shift by a register.
130 ; smlad signed multiply accumulate dual.
131 ; smladx signed multiply accumulate dual reverse.
132 ; smlal signed multiply accumulate long.
133 ; smlald signed multiply accumulate long dual.
134 ; smlals signed multiply accumulate long, flag setting.
135 ; smlalxy signed multiply accumulate, 16x16-bit, 64-bit accumulate.
136 ; smlawx signed multiply accumulate, 32x16-bit, 32-bit accumulate.
137 ; smlawy signed multiply accumulate wide, 32x16-bit,
139 ; smlaxy signed multiply accumulate, 16x16-bit, 32-bit accumulate.
140 ; smlsd signed multiply subtract dual.
141 ; smlsdx signed multiply subtract dual reverse.
142 ; smlsld signed multiply subtract long dual.
143 ; smmla signed most significant word multiply accumulate.
144 ; smmul signed most significant word multiply.
145 ; smmulr signed most significant word multiply, rounded.
146 ; smuad signed dual multiply add.
147 ; smuadx signed dual multiply add reverse.
148 ; smull signed multiply long.
149 ; smulls signed multiply long, flag setting.
150 ; smulwy signed multiply wide, 32x16-bit, 32-bit accumulate.
151 ; smulxy signed multiply, 16x16-bit, 32-bit accumulate.
152 ; smusd signed dual multiply subtract.
153 ; smusdx signed dual multiply subtract reverse.
154 ; store_rel store-release.
155 ; store_4 store 4 bytes to memory.
156 ; store_8 store 8 bytes to memory.
157 ; store_12 store 12 bytes to memory.
158 ; store_16 store 16 bytes (or more) to memory.
159 ; trap cause a trap in the kernel.
160 ; udiv unsigned division.
161 ; umaal unsigned multiply accumulate accumulate long.
162 ; umlal unsigned multiply accumulate long.
163 ; umlals unsigned multiply accumulate long, flag setting.
164 ; umull unsigned multiply long.
165 ; umulls unsigned multiply long, flag setting.
166 ; untyped insn without type information - default, and error,
169 ; The classification below is for instructions used by the Wireless MMX
170 ; Technology. Each attribute value is used to classify an instruction of the
171 ; same name or family.
233 ; The classification below is for NEON instructions.
243 ; neon_add_halve_narrow_q
252 ; neon_sub_halve_narrow_q
269 ; neon_compare_zero_q
274 ; neon_reduc_add_long
276 ; neon_reduc_add_acc_q
278 ; neon_reduc_minmax_q
285 ; neon_shift_imm_narrow_q
286 ; neon_shift_imm_long
292 ; neon_sat_shift_imm_q
293 ; neon_sat_shift_imm_narrow_q
295 ; neon_sat_shift_reg_q
338 ; neon_mul_h_scalar_q
340 ; neon_mul_s_scalar_q
341 ; neon_mul_h_scalar_long
342 ; neon_mul_s_scalar_long
349 ; neon_sat_mul_b_long
350 ; neon_sat_mul_h_long
351 ; neon_sat_mul_s_long
352 ; neon_sat_mul_h_scalar
353 ; neon_sat_mul_h_scalar_q
354 ; neon_sat_mul_s_scalar
355 ; neon_sat_mul_s_scalar_q
356 ; neon_sat_mul_h_scalar_long
357 ; neon_sat_mul_s_scalar_long
368 ; neon_mla_h_scalar_q
370 ; neon_mla_s_scalar_q
371 ; neon_mla_h_scalar_long
372 ; neon_mla_s_scalar_long
373 ; neon_sat_mla_b_long
374 ; neon_sat_mla_h_long
375 ; neon_sat_mla_s_long
376 ; neon_sat_mla_h_scalar_long
377 ; neon_sat_mla_s_scalar_long
393 ; neon_load1_all_lanes
394 ; neon_load1_all_lanes_q
395 ; neon_load1_one_lane
396 ; neon_load1_one_lane_q
401 ; neon_load2_all_lanes
402 ; neon_load2_all_lanes_q
403 ; neon_load2_one_lane
404 ; neon_load2_one_lane_q
407 ; neon_load3_all_lanes
408 ; neon_load3_all_lanes_q
409 ; neon_load3_one_lane
410 ; neon_load3_one_lane_q
413 ; neon_load4_all_lanes
414 ; neon_load4_all_lanes_q
415 ; neon_load4_one_lane
416 ; neon_load4_one_lane_q
428 ; neon_store1_one_lane
429 ; neon_store1_one_lane_q
434 ; neon_store2_one_lane
435 ; neon_store2_one_lane_q
438 ; neon_store3_one_lane
439 ; neon_store3_one_lane_q
442 ; neon_store4_one_lane
443 ; neon_store4_one_lane_q
461 ; neon_fp_compare_s_q
463 ; neon_fp_compare_d_q
468 ; neon_fp_reduc_add_s
469 ; neon_fp_reduc_add_s_q
470 ; neon_fp_reduc_add_d
471 ; neon_fp_reduc_add_d_q
472 ; neon_fp_reduc_minmax_s
473 ; neon_fp_reduc_minmax_s_q
474 ; neon_fp_reduc_minmax_d
475 ; neon_fp_reduc_minmax_d_q
476 ; neon_fp_cvt_narrow_s_q
477 ; neon_fp_cvt_narrow_d_q
478 ; neon_fp_cvt_widen_h
479 ; neon_fp_cvt_widen_s
514 ; neon_fp_mul_s_scalar
515 ; neon_fp_mul_s_scalar_q
518 ; neon_fp_mul_d_scalar_q
521 ; neon_fp_mla_s_scalar
522 ; neon_fp_mla_s_scalar_q
525 ; neon_fp_mla_d_scalar_q
535 ; The classification below is for Crypto instructions.
546 ; The classification below is for coprocessor instructions
755 neon_add_halve_narrow_q,\
765 neon_sub_halve_narrow_q,\
786 neon_compare_zero_q,\
792 neon_reduc_add_long,\
794 neon_reduc_add_acc_q,\
796 neon_reduc_minmax_q,\
804 neon_shift_imm_narrow_q,\
805 neon_shift_imm_long,\
811 neon_sat_shift_imm_q,\
812 neon_sat_shift_imm_narrow_q,\
814 neon_sat_shift_reg_q,\
860 neon_mul_h_scalar_q,\
862 neon_mul_s_scalar_q,\
863 neon_mul_h_scalar_long,\
864 neon_mul_s_scalar_long,\
872 neon_sat_mul_b_long,\
873 neon_sat_mul_h_long,\
874 neon_sat_mul_s_long,\
875 neon_sat_mul_h_scalar,\
876 neon_sat_mul_h_scalar_q,\
877 neon_sat_mul_s_scalar,\
878 neon_sat_mul_s_scalar_q,\
879 neon_sat_mul_h_scalar_long,\
880 neon_sat_mul_s_scalar_long,\
892 neon_mla_h_scalar_q,\
894 neon_mla_s_scalar_q,\
895 neon_mla_h_scalar_long,\
896 neon_mla_s_scalar_long,\
898 neon_sat_mla_b_long,\
899 neon_sat_mla_h_long,\
900 neon_sat_mla_s_long,\
901 neon_sat_mla_h_scalar_long,\
902 neon_sat_mla_s_scalar_long,\
920 neon_load1_all_lanes,\
921 neon_load1_all_lanes_q,\
922 neon_load1_one_lane,\
923 neon_load1_one_lane_q,\
929 neon_load2_all_lanes,\
930 neon_load2_all_lanes_q,\
931 neon_load2_one_lane,\
932 neon_load2_one_lane_q,\
936 neon_load3_all_lanes,\
937 neon_load3_all_lanes_q,\
938 neon_load3_one_lane,\
939 neon_load3_one_lane_q,\
943 neon_load4_all_lanes,\
944 neon_load4_all_lanes_q,\
945 neon_load4_one_lane,\
946 neon_load4_one_lane_q,\
959 neon_store1_one_lane,\
960 neon_store1_one_lane_q,\
966 neon_store2_one_lane,\
967 neon_store2_one_lane_q,\
971 neon_store3_one_lane,\
972 neon_store3_one_lane_q,\
976 neon_store4_one_lane,\
977 neon_store4_one_lane_q,\
997 neon_fp_compare_s_q,\
999 neon_fp_compare_d_q,\
1001 neon_fp_minmax_s_q,\
1003 neon_fp_minmax_d_q,\
1005 neon_fp_reduc_add_s,\
1006 neon_fp_reduc_add_s_q,\
1007 neon_fp_reduc_add_d,\
1008 neon_fp_reduc_add_d_q,\
1009 neon_fp_reduc_minmax_s,\
1010 neon_fp_reduc_minmax_s_q,\
1011 neon_fp_reduc_minmax_d,\
1012 neon_fp_reduc_minmax_d_q,\
1014 neon_fp_cvt_narrow_s_q,\
1015 neon_fp_cvt_narrow_d_q,\
1016 neon_fp_cvt_widen_h,\
1017 neon_fp_cvt_widen_s,\
1020 neon_fp_to_int_s_q,\
1022 neon_fp_to_int_d_q,\
1024 neon_int_to_fp_s_q,\
1026 neon_int_to_fp_d_q,\
1046 neon_fp_rsqrte_s_q,\
1048 neon_fp_rsqrte_d_q,\
1050 neon_fp_rsqrts_s_q,\
1052 neon_fp_rsqrts_d_q,\
1056 neon_fp_mul_s_scalar,\
1057 neon_fp_mul_s_scalar_q,\
1060 neon_fp_mul_d_scalar_q,\
1064 neon_fp_mla_s_scalar,\
1065 neon_fp_mla_s_scalar_q,\
1068 neon_fp_mla_d_scalar_q,\
1084 crypto_sha256_fast,\
1085 crypto_sha256_slow,\
1092 (const_string "untyped"))
1094 ; Is this an (integer side) multiply with a 32-bit (or smaller) result?
1095 (define_attr "mul32" "no,yes"
1098 "smulxy,smlaxy,smulwy,smlawx,mul,muls,mla,mlas,smlawy,smuad,smuadx,\
1099 smlad,smladx,smusd,smusdx,smlsd,smlsdx,smmul,smmulr,smmla,smlald,smlsld")
1100 (const_string "yes")
1101 (const_string "no")))
1103 ; Is this an (integer side) multiply with a 64-bit result?
1104 (define_attr "mul64" "no,yes"
1107 "smlalxy,umull,umulls,umaal,umlal,umlals,smull,smulls,smlal,smlals")
1108 (const_string "yes")
1109 (const_string "no")))
1111 ; YES if the "type" attribute assigned to the insn denotes an
1112 ; Advanced SIMD instruction, NO otherwise.
1113 (define_attr "is_neon_type" "yes,no"
1114 (if_then_else (eq_attr "type"
1115 "neon_add, neon_add_q, neon_add_widen, neon_add_long,\
1116 neon_qadd, neon_qadd_q, neon_add_halve, neon_add_halve_q,\
1117 neon_add_halve_narrow_q,\
1118 neon_sub, neon_sub_q, neon_sub_widen, neon_sub_long, neon_qsub,\
1119 neon_qsub_q, neon_sub_halve, neon_sub_halve_q,\
1120 neon_sub_halve_narrow_q,\
1121 neon_abs, neon_abs_q, neon_dot, neon_dot_q, neon_neg, neon_neg_q,\
1122 neon_qneg, neon_qneg_q, neon_qabs, neon_qabs_q, neon_abd, neon_abd_q,\
1123 neon_abd_long, neon_minmax, neon_minmax_q, neon_compare,\
1124 neon_compare_q, neon_compare_zero, neon_compare_zero_q,\
1125 neon_arith_acc, neon_arith_acc_q, neon_reduc_add,\
1126 neon_reduc_add_q, neon_reduc_add_long, neon_reduc_add_acc,\
1127 neon_reduc_add_acc_q, neon_reduc_minmax, neon_reduc_minmax_q,\
1128 neon_logic, neon_logic_q, neon_tst, neon_tst_q,\
1129 neon_shift_imm, neon_shift_imm_q, neon_shift_imm_narrow_q,\
1130 neon_shift_imm_long, neon_shift_reg, neon_shift_reg_q,\
1131 neon_shift_acc, neon_shift_acc_q, neon_sat_shift_imm,\
1132 neon_sat_shift_imm_q, neon_sat_shift_imm_narrow_q,\
1133 neon_sat_shift_reg, neon_sat_shift_reg_q,\
1134 neon_ins, neon_ins_q, neon_move, neon_move_q, neon_move_narrow_q,\
1135 neon_permute, neon_permute_q, neon_zip, neon_zip_q, neon_tbl1,\
1136 neon_tbl1_q, neon_tbl2, neon_tbl2_q, neon_tbl3, neon_tbl3_q,\
1137 neon_tbl4, neon_tbl4_q, neon_bsl, neon_bsl_q, neon_cls,\
1138 neon_cls_q, neon_cnt, neon_cnt_q, neon_dup, neon_dup_q,\
1139 neon_ext, neon_ext_q, neon_rbit, neon_rbit_q,\
1140 neon_rev, neon_rev_q, neon_mul_b, neon_mul_b_q, neon_mul_h,\
1141 neon_mul_h_q, neon_mul_s, neon_mul_s_q, neon_mul_b_long,\
1142 neon_mul_h_long, neon_mul_s_long, neon_mul_d_long, neon_mul_h_scalar,\
1143 neon_mul_h_scalar_q, neon_mul_s_scalar, neon_mul_s_scalar_q,\
1144 neon_mul_h_scalar_long, neon_mul_s_scalar_long, neon_sat_mul_b,\
1145 neon_sat_mul_b_q, neon_sat_mul_h, neon_sat_mul_h_q,\
1146 neon_sat_mul_s, neon_sat_mul_s_q, neon_sat_mul_b_long,\
1147 neon_sat_mul_h_long, neon_sat_mul_s_long, neon_sat_mul_h_scalar,\
1148 neon_sat_mul_h_scalar_q, neon_sat_mul_s_scalar,\
1149 neon_sat_mul_s_scalar_q, neon_sat_mul_h_scalar_long,\
1150 neon_sat_mul_s_scalar_long, neon_mla_b, neon_mla_b_q, neon_mla_h,\
1151 neon_mla_h_q, neon_mla_s, neon_mla_s_q, neon_mla_b_long,\
1152 neon_mla_h_long, neon_mla_s_long, neon_mla_h_scalar,\
1153 neon_mla_h_scalar_q, neon_mla_s_scalar, neon_mla_s_scalar_q,\
1154 neon_mla_h_scalar_long, neon_mla_s_scalar_long,\
1155 neon_sat_mla_b_long, neon_sat_mla_h_long,\
1156 neon_sat_mla_s_long, neon_sat_mla_h_scalar_long,\
1157 neon_sat_mla_s_scalar_long,\
1158 neon_to_gp, neon_to_gp_q, neon_from_gp, neon_from_gp_q,\
1159 neon_ldr, neon_ldp, neon_ldp_q,\
1160 neon_load1_1reg, neon_load1_1reg_q, neon_load1_2reg,\
1161 neon_load1_2reg_q, neon_load1_3reg, neon_load1_3reg_q,\
1162 neon_load1_4reg, neon_load1_4reg_q, neon_load1_all_lanes,\
1163 neon_load1_all_lanes_q, neon_load1_one_lane, neon_load1_one_lane_q,\
1164 neon_load2_2reg, neon_load2_2reg_q, neon_load2_4reg,\
1165 neon_load2_4reg_q, neon_load2_all_lanes, neon_load2_all_lanes_q,\
1166 neon_load2_one_lane, neon_load2_one_lane_q,\
1167 neon_load3_3reg, neon_load3_3reg_q, neon_load3_all_lanes,\
1168 neon_load3_all_lanes_q, neon_load3_one_lane, neon_load3_one_lane_q,\
1169 neon_load4_4reg, neon_load4_4reg_q, neon_load4_all_lanes,\
1170 neon_load4_all_lanes_q, neon_load4_one_lane, neon_load4_one_lane_q,\
1171 neon_str, neon_stp, neon_stp_q,\
1172 neon_store1_1reg, neon_store1_1reg_q, neon_store1_2reg,\
1173 neon_store1_2reg_q, neon_store1_3reg, neon_store1_3reg_q,\
1174 neon_store1_4reg, neon_store1_4reg_q, neon_store1_one_lane,\
1175 neon_store1_one_lane_q, neon_store2_2reg, neon_store2_2reg_q,\
1176 neon_store2_4reg, neon_store2_4reg_q, neon_store2_one_lane,\
1177 neon_store2_one_lane_q, neon_store3_3reg, neon_store3_3reg_q,\
1178 neon_store3_one_lane, neon_store3_one_lane_q, neon_store4_4reg,\
1179 neon_store4_4reg_q, neon_store4_one_lane, neon_store4_one_lane_q,\
1180 neon_fp_abd_s, neon_fp_abd_s_q, neon_fp_abd_d, neon_fp_abd_d_q,\
1181 neon_fp_abs_s, neon_fp_abs_s_q, neon_fp_abs_d, neon_fp_abs_d_q,\
1182 neon_fp_addsub_s, neon_fp_addsub_s_q, neon_fp_addsub_d,\
1183 neon_fp_addsub_d_q, neon_fp_compare_s, neon_fp_compare_s_q,\
1184 neon_fp_compare_d, neon_fp_compare_d_q, neon_fp_minmax_s,\
1185 neon_fp_minmax_s_q, neon_fp_minmax_d, neon_fp_minmax_d_q,\
1186 neon_fp_neg_s, neon_fp_neg_s_q, neon_fp_neg_d, neon_fp_neg_d_q,\
1187 neon_fp_reduc_add_s, neon_fp_reduc_add_s_q, neon_fp_reduc_add_d,\
1188 neon_fp_reduc_add_d_q, neon_fp_reduc_minmax_s,
1189 neon_fp_reduc_minmax_s_q, neon_fp_reduc_minmax_d,\
1190 neon_fp_reduc_minmax_d_q,\
1191 neon_fp_cvt_narrow_s_q, neon_fp_cvt_narrow_d_q,\
1192 neon_fp_cvt_widen_h, neon_fp_cvt_widen_s, neon_fp_to_int_s,\
1193 neon_fp_to_int_s_q, neon_int_to_fp_s, neon_int_to_fp_s_q,\
1194 neon_fp_to_int_d, neon_fp_to_int_d_q,\
1195 neon_int_to_fp_d, neon_int_to_fp_d_q,\
1196 neon_fp_round_s, neon_fp_round_s_q, neon_fp_recpe_s,\
1198 neon_fp_recpe_d, neon_fp_recpe_d_q, neon_fp_recps_s,\
1199 neon_fp_recps_s_q, neon_fp_recps_d, neon_fp_recps_d_q,\
1200 neon_fp_recpx_s, neon_fp_recpx_s_q, neon_fp_recpx_d,\
1201 neon_fp_recpx_d_q, neon_fp_rsqrte_s, neon_fp_rsqrte_s_q,\
1202 neon_fp_rsqrte_d, neon_fp_rsqrte_d_q, neon_fp_rsqrts_s,\
1203 neon_fp_rsqrts_s_q, neon_fp_rsqrts_d, neon_fp_rsqrts_d_q,\
1204 neon_fp_mul_s, neon_fp_mul_s_q, neon_fp_mul_s_scalar,\
1205 neon_fp_mul_s_scalar_q, neon_fp_mul_d, neon_fp_mul_d_q,\
1206 neon_fp_mul_d_scalar_q, neon_fp_mla_s, neon_fp_mla_s_q,\
1207 neon_fp_mla_s_scalar, neon_fp_mla_s_scalar_q, neon_fp_mla_d,\
1208 neon_fp_mla_d_q, neon_fp_mla_d_scalar_q, neon_fp_sqrt_s,\
1209 neon_fp_sqrt_s_q, neon_fp_sqrt_d, neon_fp_sqrt_d_q,\
1210 neon_fp_div_s, neon_fp_div_s_q, neon_fp_div_d, neon_fp_div_d_q, crypto_aese,\
1211 crypto_aesmc, crypto_sha1_xor, crypto_sha1_fast, crypto_sha1_slow,\
1212 crypto_sha256_fast, crypto_sha256_slow")
1213 (const_string "yes")
1214 (const_string "no")))