RISC-V: Block VLSmodes according to TARGET_MAX_LMUL and BITS_PER_RISCV_VECTOR
[official-gcc.git] / gcc / testsuite / gcc.target / riscv / rvv / autovec / vls / mov-16.c
blob06ab31b3094746de85774997824bda93ed868378
1 /* { dg-do compile } */
2 /* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d --param=riscv-autovec-lmul=m8 -O3 -fno-schedule-insns -fno-schedule-insns2" } */
3 /* { dg-final { check-function-bodies "**" "" } } */
5 #include "def.h"
7 /*
8 ** mov0:
9 ** vsetivli\s+zero,\s*1,\s*e32,\s*mf2,\s*t[au],\s*m[au]
10 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
11 ** ...
12 ** vmv1r\.v\tv2,v1
13 ** ...
14 ** ret
16 void mov0 (float *in, float *out)
18 register v1sf v1 asm("v1") = *(v1sf*)in;
19 asm volatile ("# %0"::"vr"(v1));
20 register v1sf v2 asm("v2") = v1;
21 *(v1sf*)out = v2;
22 asm volatile ("# %0"::"vr"(v2));
26 ** mov1:
27 ** vsetivli\s+zero,\s*2,\s*e32,\s*mf2,\s*t[au],\s*m[au]
28 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
29 ** ...
30 ** vmv1r\.v\tv2,v1
31 ** ...
32 ** ret
34 void mov1 (float *in, float *out)
36 register v2sf v1 asm("v1") = *(v2sf*)in;
37 asm volatile ("# %0"::"vr"(v1));
38 register v2sf v2 asm("v2") = v1;
39 *(v2sf*)out = v2;
40 asm volatile ("# %0"::"vr"(v2));
44 ** mov2:
45 ** vsetivli\s+zero,\s*4,\s*e32,\s*mf2,\s*t[au],\s*m[au]
46 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
47 ** ...
48 ** vmv1r\.v\tv2,v1
49 ** ...
50 ** ret
52 void mov2 (float *in, float *out)
54 register v4sf v1 asm("v1") = *(v4sf*)in;
55 asm volatile ("# %0"::"vr"(v1));
56 register v4sf v2 asm("v2") = v1;
57 *(v4sf*)out = v2;
58 asm volatile ("# %0"::"vr"(v2));
62 ** mov3:
63 ** vsetivli\s+zero,\s*8,\s*e32,\s*mf2,\s*t[au],\s*m[au]
64 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
65 ** ...
66 ** vmv1r\.v\tv2,v1
67 ** ...
68 ** ret
70 void mov3 (float *in, float *out)
72 register v8sf v1 asm("v1") = *(v8sf*)in;
73 asm volatile ("# %0"::"vr"(v1));
74 register v8sf v2 asm("v2") = v1;
75 *(v8sf*)out = v2;
76 asm volatile ("# %0"::"vr"(v2));
80 ** mov4:
81 ** vsetivli\s+zero,\s*16,\s*e32,\s*mf2,\s*t[au],\s*m[au]
82 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
83 ** ...
84 ** vmv1r\.v\tv2,v1
85 ** ...
86 ** ret
88 void mov4 (float *in, float *out)
90 register v16sf v1 asm("v1") = *(v16sf*)in;
91 asm volatile ("# %0"::"vr"(v1));
92 register v16sf v2 asm("v2") = v1;
93 *(v16sf*)out = v2;
94 asm volatile ("# %0"::"vr"(v2));
98 ** mov5:
99 ** li\s+[a-x0-9]+,32
100 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]
101 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
102 ** ...
103 ** vmv1r\.v\tv2,v1
104 ** ...
105 ** ret
107 void mov5 (float *in, float *out)
109 register v32sf v1 asm("v1") = *(v32sf*)in;
110 asm volatile ("# %0"::"vr"(v1));
111 register v32sf v2 asm("v2") = v1;
112 *(v32sf*)out = v2;
113 asm volatile ("# %0"::"vr"(v2));
117 ** mov6:
118 ** li\s+[a-x0-9]+,64
119 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]
120 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
121 ** ...
122 ** vmv1r\.v\tv2,v1
123 ** ...
124 ** ret
126 void mov6 (float *in, float *out)
128 register v64sf v1 asm("v1") = *(v64sf*)in;
129 asm volatile ("# %0"::"vr"(v1));
130 register v64sf v2 asm("v2") = v1;
131 *(v64sf*)out = v2;
132 asm volatile ("# %0"::"vr"(v2));
136 ** mov7:
137 ** li\s+[a-x0-9]+,128
138 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]
139 ** vle32\.v\tv1,0\s*\([a-x0-9]+\)
140 ** ...
141 ** vmv1r\.v\tv2,v1
142 ** ...
143 ** ret
145 void mov7 (float *in, float *out)
147 register v128sf v1 asm("v1") = *(v128sf*)in;
148 asm volatile ("# %0"::"vr"(v1));
149 register v128sf v2 asm("v2") = v1;
150 *(v128sf*)out = v2;
151 asm volatile ("# %0"::"vr"(v2));
155 ** mov8:
156 ** li\s+[a-x0-9]+,256
157 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]
158 ** vle32\.v\tv2,0\s*\([a-x0-9]+\)
159 ** ...
160 ** vmv2r\.v\tv4,v2
161 ** ...
162 ** ret
164 void mov8 (float *in, float *out)
166 register v256sf v1 asm("v2") = *(v256sf*)in;
167 asm volatile ("# %0"::"vr"(v1));
168 register v256sf v2 asm("v4") = v1;
169 *(v256sf*)out = v2;
170 asm volatile ("# %0"::"vr"(v2));
174 ** mov9:
175 ** li\s+[a-x0-9]+,512
176 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]
177 ** vle32\.v\tv4,0\s*\([a-x0-9]+\)
178 ** ...
179 ** vmv4r\.v\tv8,v4
180 ** ...
181 ** ret
183 void mov9 (float *in, float *out)
185 register v512sf v1 asm("v4") = *(v512sf*)in;
186 asm volatile ("# %0"::"vr"(v1));
187 register v512sf v2 asm("v8") = v1;
188 *(v512sf*)out = v2;
189 asm volatile ("# %0"::"vr"(v2));
193 ** mov10:
194 ** li\s+[a-x0-9]+,1024
195 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]
196 ** vle32\.v\tv8,0\s*\([a-x0-9]+\)
197 ** ...
198 ** vmv8r\.v\tv16,v8
199 ** ...
200 ** ret
202 void mov10 (float *in, float *out)
204 register v1024sf v2 asm("v8") = *(v1024sf*)in;
205 asm volatile ("# %0"::"vr"(v2));
206 register v1024sf v4 asm("v16") = v2;
207 *(v1024sf*)out = v4;
208 asm volatile ("# %0"::"vr"(v4));