1 /* { dg-do compile } */
2 /* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
4 #include "riscv_vector.h"
6 vint8mf8_t test___riscv_vmacc_tu(vint8mf8_t vd,vint8mf8_t vs1,vint8mf8_t vs2,size_t vl)
8 return __riscv_vmacc_tu(vd,vs1,vs2,32);
12 vint8mf4_t test___riscv_vmacc_tu(vint8mf4_t vd,vint8mf4_t vs1,vint8mf4_t vs2,size_t vl)
14 return __riscv_vmacc_tu(vd,vs1,vs2,32);
18 vint8mf2_t test___riscv_vmacc_tu(vint8mf2_t vd,vint8mf2_t vs1,vint8mf2_t vs2,size_t vl)
20 return __riscv_vmacc_tu(vd,vs1,vs2,32);
24 vint8m1_t test___riscv_vmacc_tu(vint8m1_t vd,vint8m1_t vs1,vint8m1_t vs2,size_t vl)
26 return __riscv_vmacc_tu(vd,vs1,vs2,32);
30 vint8m2_t test___riscv_vmacc_tu(vint8m2_t vd,vint8m2_t vs1,vint8m2_t vs2,size_t vl)
32 return __riscv_vmacc_tu(vd,vs1,vs2,32);
36 vint8m4_t test___riscv_vmacc_tu(vint8m4_t vd,vint8m4_t vs1,vint8m4_t vs2,size_t vl)
38 return __riscv_vmacc_tu(vd,vs1,vs2,32);
42 vint8m8_t test___riscv_vmacc_tu(vint8m8_t vd,vint8m8_t vs1,vint8m8_t vs2,size_t vl)
44 return __riscv_vmacc_tu(vd,vs1,vs2,32);
48 vint16mf4_t test___riscv_vmacc_tu(vint16mf4_t vd,vint16mf4_t vs1,vint16mf4_t vs2,size_t vl)
50 return __riscv_vmacc_tu(vd,vs1,vs2,32);
54 vint16mf2_t test___riscv_vmacc_tu(vint16mf2_t vd,vint16mf2_t vs1,vint16mf2_t vs2,size_t vl)
56 return __riscv_vmacc_tu(vd,vs1,vs2,32);
60 vint16m1_t test___riscv_vmacc_tu(vint16m1_t vd,vint16m1_t vs1,vint16m1_t vs2,size_t vl)
62 return __riscv_vmacc_tu(vd,vs1,vs2,32);
66 vint16m2_t test___riscv_vmacc_tu(vint16m2_t vd,vint16m2_t vs1,vint16m2_t vs2,size_t vl)
68 return __riscv_vmacc_tu(vd,vs1,vs2,32);
72 vint16m4_t test___riscv_vmacc_tu(vint16m4_t vd,vint16m4_t vs1,vint16m4_t vs2,size_t vl)
74 return __riscv_vmacc_tu(vd,vs1,vs2,32);
78 vint16m8_t test___riscv_vmacc_tu(vint16m8_t vd,vint16m8_t vs1,vint16m8_t vs2,size_t vl)
80 return __riscv_vmacc_tu(vd,vs1,vs2,32);
84 vint32mf2_t test___riscv_vmacc_tu(vint32mf2_t vd,vint32mf2_t vs1,vint32mf2_t vs2,size_t vl)
86 return __riscv_vmacc_tu(vd,vs1,vs2,32);
90 vint32m1_t test___riscv_vmacc_tu(vint32m1_t vd,vint32m1_t vs1,vint32m1_t vs2,size_t vl)
92 return __riscv_vmacc_tu(vd,vs1,vs2,32);
96 vint32m2_t test___riscv_vmacc_tu(vint32m2_t vd,vint32m2_t vs1,vint32m2_t vs2,size_t vl)
98 return __riscv_vmacc_tu(vd,vs1,vs2,32);
102 vint32m4_t test___riscv_vmacc_tu(vint32m4_t vd,vint32m4_t vs1,vint32m4_t vs2,size_t vl)
104 return __riscv_vmacc_tu(vd,vs1,vs2,32);
108 vint32m8_t test___riscv_vmacc_tu(vint32m8_t vd,vint32m8_t vs1,vint32m8_t vs2,size_t vl)
110 return __riscv_vmacc_tu(vd,vs1,vs2,32);
114 vint64m1_t test___riscv_vmacc_tu(vint64m1_t vd,vint64m1_t vs1,vint64m1_t vs2,size_t vl)
116 return __riscv_vmacc_tu(vd,vs1,vs2,32);
120 vint64m2_t test___riscv_vmacc_tu(vint64m2_t vd,vint64m2_t vs1,vint64m2_t vs2,size_t vl)
122 return __riscv_vmacc_tu(vd,vs1,vs2,32);
126 vint64m4_t test___riscv_vmacc_tu(vint64m4_t vd,vint64m4_t vs1,vint64m4_t vs2,size_t vl)
128 return __riscv_vmacc_tu(vd,vs1,vs2,32);
132 vint64m8_t test___riscv_vmacc_tu(vint64m8_t vd,vint64m8_t vs1,vint64m8_t vs2,size_t vl)
134 return __riscv_vmacc_tu(vd,vs1,vs2,32);
138 vuint8mf8_t test___riscv_vmacc_tu(vuint8mf8_t vd,vuint8mf8_t vs1,vuint8mf8_t vs2,size_t vl)
140 return __riscv_vmacc_tu(vd,vs1,vs2,32);
144 vuint8mf4_t test___riscv_vmacc_tu(vuint8mf4_t vd,vuint8mf4_t vs1,vuint8mf4_t vs2,size_t vl)
146 return __riscv_vmacc_tu(vd,vs1,vs2,32);
150 vuint8mf2_t test___riscv_vmacc_tu(vuint8mf2_t vd,vuint8mf2_t vs1,vuint8mf2_t vs2,size_t vl)
152 return __riscv_vmacc_tu(vd,vs1,vs2,32);
156 vuint8m1_t test___riscv_vmacc_tu(vuint8m1_t vd,vuint8m1_t vs1,vuint8m1_t vs2,size_t vl)
158 return __riscv_vmacc_tu(vd,vs1,vs2,32);
162 vuint8m2_t test___riscv_vmacc_tu(vuint8m2_t vd,vuint8m2_t vs1,vuint8m2_t vs2,size_t vl)
164 return __riscv_vmacc_tu(vd,vs1,vs2,32);
168 vuint8m4_t test___riscv_vmacc_tu(vuint8m4_t vd,vuint8m4_t vs1,vuint8m4_t vs2,size_t vl)
170 return __riscv_vmacc_tu(vd,vs1,vs2,32);
174 vuint8m8_t test___riscv_vmacc_tu(vuint8m8_t vd,vuint8m8_t vs1,vuint8m8_t vs2,size_t vl)
176 return __riscv_vmacc_tu(vd,vs1,vs2,32);
180 vuint16mf4_t test___riscv_vmacc_tu(vuint16mf4_t vd,vuint16mf4_t vs1,vuint16mf4_t vs2,size_t vl)
182 return __riscv_vmacc_tu(vd,vs1,vs2,32);
186 vuint16mf2_t test___riscv_vmacc_tu(vuint16mf2_t vd,vuint16mf2_t vs1,vuint16mf2_t vs2,size_t vl)
188 return __riscv_vmacc_tu(vd,vs1,vs2,32);
192 vuint16m1_t test___riscv_vmacc_tu(vuint16m1_t vd,vuint16m1_t vs1,vuint16m1_t vs2,size_t vl)
194 return __riscv_vmacc_tu(vd,vs1,vs2,32);
198 vuint16m2_t test___riscv_vmacc_tu(vuint16m2_t vd,vuint16m2_t vs1,vuint16m2_t vs2,size_t vl)
200 return __riscv_vmacc_tu(vd,vs1,vs2,32);
204 vuint16m4_t test___riscv_vmacc_tu(vuint16m4_t vd,vuint16m4_t vs1,vuint16m4_t vs2,size_t vl)
206 return __riscv_vmacc_tu(vd,vs1,vs2,32);
210 vuint16m8_t test___riscv_vmacc_tu(vuint16m8_t vd,vuint16m8_t vs1,vuint16m8_t vs2,size_t vl)
212 return __riscv_vmacc_tu(vd,vs1,vs2,32);
216 vuint32mf2_t test___riscv_vmacc_tu(vuint32mf2_t vd,vuint32mf2_t vs1,vuint32mf2_t vs2,size_t vl)
218 return __riscv_vmacc_tu(vd,vs1,vs2,32);
222 vuint32m1_t test___riscv_vmacc_tu(vuint32m1_t vd,vuint32m1_t vs1,vuint32m1_t vs2,size_t vl)
224 return __riscv_vmacc_tu(vd,vs1,vs2,32);
228 vuint32m2_t test___riscv_vmacc_tu(vuint32m2_t vd,vuint32m2_t vs1,vuint32m2_t vs2,size_t vl)
230 return __riscv_vmacc_tu(vd,vs1,vs2,32);
234 vuint32m4_t test___riscv_vmacc_tu(vuint32m4_t vd,vuint32m4_t vs1,vuint32m4_t vs2,size_t vl)
236 return __riscv_vmacc_tu(vd,vs1,vs2,32);
240 vuint32m8_t test___riscv_vmacc_tu(vuint32m8_t vd,vuint32m8_t vs1,vuint32m8_t vs2,size_t vl)
242 return __riscv_vmacc_tu(vd,vs1,vs2,32);
246 vuint64m1_t test___riscv_vmacc_tu(vuint64m1_t vd,vuint64m1_t vs1,vuint64m1_t vs2,size_t vl)
248 return __riscv_vmacc_tu(vd,vs1,vs2,32);
252 vuint64m2_t test___riscv_vmacc_tu(vuint64m2_t vd,vuint64m2_t vs1,vuint64m2_t vs2,size_t vl)
254 return __riscv_vmacc_tu(vd,vs1,vs2,32);
258 vuint64m4_t test___riscv_vmacc_tu(vuint64m4_t vd,vuint64m4_t vs1,vuint64m4_t vs2,size_t vl)
260 return __riscv_vmacc_tu(vd,vs1,vs2,32);
264 vuint64m8_t test___riscv_vmacc_tu(vuint64m8_t vd,vuint64m8_t vs1,vuint64m8_t vs2,size_t vl)
266 return __riscv_vmacc_tu(vd,vs1,vs2,32);
271 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
272 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
273 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
274 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
275 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
276 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
277 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
278 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
279 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
280 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
281 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
282 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
283 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
284 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
285 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
286 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
287 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
288 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
289 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
290 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
291 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
292 /* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vma[c-d][c-d]\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */