1 /* { dg-do compile } */
2 /* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
3 /* { dg-final { check-function-bodies "**" "" } } */
9 ** vsetivli\s+zero,\s*1,\s*e32,\s*mf2,\s*t[au],\s*m[au]
10 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
16 void mov0 (float *in
, float *out
)
18 register v1sf v1
asm("v1") = *(v1sf
*)in
;
19 asm volatile ("# %0"::"vr"(v1
));
20 register v1sf v2
asm("v2") = v1
;
22 asm volatile ("# %0"::"vr"(v2
));
27 ** vsetivli\s+zero,\s*2,\s*e32,\s*mf2,\s*t[au],\s*m[au]
28 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
34 void mov1 (float *in
, float *out
)
36 register v2sf v1
asm("v1") = *(v2sf
*)in
;
37 asm volatile ("# %0"::"vr"(v1
));
38 register v2sf v2
asm("v2") = v1
;
40 asm volatile ("# %0"::"vr"(v2
));
45 ** vsetivli\s+zero,\s*4,\s*e32,\s*mf2,\s*t[au],\s*m[au]
46 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
52 void mov2 (float *in
, float *out
)
54 register v4sf v1
asm("v1") = *(v4sf
*)in
;
55 asm volatile ("# %0"::"vr"(v1
));
56 register v4sf v2
asm("v2") = v1
;
58 asm volatile ("# %0"::"vr"(v2
));
63 ** vsetivli\s+zero,\s*8,\s*e32,\s*mf2,\s*t[au],\s*m[au]
64 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
70 void mov3 (float *in
, float *out
)
72 register v8sf v1
asm("v1") = *(v8sf
*)in
;
73 asm volatile ("# %0"::"vr"(v1
));
74 register v8sf v2
asm("v2") = v1
;
76 asm volatile ("# %0"::"vr"(v2
));
81 ** vsetivli\s+zero,\s*16,\s*e32,\s*mf2,\s*t[au],\s*m[au]
82 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
88 void mov4 (float *in
, float *out
)
90 register v16sf v1
asm("v1") = *(v16sf
*)in
;
91 asm volatile ("# %0"::"vr"(v1
));
92 register v16sf v2
asm("v2") = v1
;
94 asm volatile ("# %0"::"vr"(v2
));
100 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]
101 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
107 void mov5 (float *in
, float *out
)
109 register v32sf v1
asm("v1") = *(v32sf
*)in
;
110 asm volatile ("# %0"::"vr"(v1
));
111 register v32sf v2
asm("v2") = v1
;
113 asm volatile ("# %0"::"vr"(v2
));
119 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]
120 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
126 void mov6 (float *in
, float *out
)
128 register v64sf v1
asm("v1") = *(v64sf
*)in
;
129 asm volatile ("# %0"::"vr"(v1
));
130 register v64sf v2
asm("v2") = v1
;
132 asm volatile ("# %0"::"vr"(v2
));
137 ** li\s+[a-x0-9]+,128
138 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]
139 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
145 void mov7 (float *in
, float *out
)
147 register v128sf v1
asm("v1") = *(v128sf
*)in
;
148 asm volatile ("# %0"::"vr"(v1
));
149 register v128sf v2
asm("v2") = v1
;
151 asm volatile ("# %0"::"vr"(v2
));
156 ** li\s+[a-x0-9]+,256
157 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]
158 ** vle32\.v\tv2,0\s*\([a-x0-9]+\)
164 void mov8 (float *in
, float *out
)
166 register v256sf v1
asm("v2") = *(v256sf
*)in
;
167 asm volatile ("# %0"::"vr"(v1
));
168 register v256sf v2
asm("v4") = v1
;
170 asm volatile ("# %0"::"vr"(v2
));
175 ** li\s+[a-x0-9]+,512
176 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]
177 ** vle32\.v\tv4,0\s*\([a-x0-9]+\)
183 void mov9 (float *in
, float *out
)
185 register v512sf v1
asm("v4") = *(v512sf
*)in
;
186 asm volatile ("# %0"::"vr"(v1
));
187 register v512sf v2
asm("v8") = v1
;
189 asm volatile ("# %0"::"vr"(v2
));
194 ** li\s+[a-x0-9]+,1024
195 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]
196 ** vle32\.v\tv8,0\s*\([a-x0-9]+\)
202 void mov10 (float *in
, float *out
)
204 register v1024sf v2
asm("v8") = *(v1024sf
*)in
;
205 asm volatile ("# %0"::"vr"(v2
));
206 register v1024sf v4
asm("v16") = v2
;
208 asm volatile ("# %0"::"vr"(v4
));