RISC-V: Enable basic VLS modes support
[official-gcc.git] / gcc / testsuite / gcc.target / riscv / rvv / autovec / vls / mov-7.c
blob46509e367c3e1171ba2fe4cddce56712730b8fb2
1 /* { dg-do compile } */
2 /* { dg-options "-march=rv64gcv_zvfh_zvl4096b -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
3 /* { dg-final { check-function-bodies "**" "" } } */
5 #include "def.h"
7 /*
8 ** mov0:
9 ** ld\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
10 ** sd\s+[a-x0-9]+,0\s*\([a-x0-9]+\)
11 ** ret
13 void mov0 (int64_t *in, int64_t *out)
15 v1di v = *(v1di*)in;
16 *(v1di*)out = v;
20 ** mov1:
21 ** vsetivli\s+zero,\s*2,\s*e64,\s*m1,\s*t[au],\s*m[au]
22 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
23 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
24 ** ret
26 void mov1 (int64_t *in, int64_t *out)
28 v2di v = *(v2di*)in;
29 *(v2di*)out = v;
33 ** mov2:
34 ** vsetivli\s+zero,\s*4,\s*e64,\s*m1,\s*t[au],\s*m[au]
35 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
36 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
37 ** ret
39 void mov2 (int64_t *in, int64_t *out)
41 v4di v = *(v4di*)in;
42 *(v4di*)out = v;
46 ** mov3:
47 ** vsetivli\s+zero,\s*8,\s*e64,\s*m1,\s*t[au],\s*m[au]
48 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
49 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
50 ** ret
52 void mov3 (int64_t *in, int64_t *out)
54 v8di v = *(v8di*)in;
55 *(v8di*)out = v;
59 ** mov4:
60 ** vsetivli\s+zero,\s*16,\s*e64,\s*m1,\s*t[au],\s*m[au]
61 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
62 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
63 ** ret
65 void mov4 (int64_t *in, int64_t *out)
67 v16di v = *(v16di*)in;
68 *(v16di*)out = v;
72 ** mov5:
73 ** li\s+[a-x0-9]+,32
74 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]
75 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
76 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
77 ** ret
79 void mov5 (int64_t *in, int64_t *out)
81 v32di v = *(v32di*)in;
82 *(v32di*)out = v;
86 ** mov6:
87 ** li\s+[a-x0-9]+,64
88 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]
89 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
90 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
91 ** ret
93 void mov6 (int64_t *in, int64_t *out)
95 v64di v = *(v64di*)in;
96 *(v64di*)out = v;
100 ** mov7:
101 ** li\s+[a-x0-9]+,128
102 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]
103 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
104 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
105 ** ret
107 void mov7 (int64_t *in, int64_t *out)
109 v128di v = *(v128di*)in;
110 *(v128di*)out = v;
114 ** mov8:
115 ** li\s+[a-x0-9]+,256
116 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]
117 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
118 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
119 ** ret
121 void mov8 (int64_t *in, int64_t *out)
123 v256di v = *(v256di*)in;
124 *(v256di*)out = v;
128 ** mov9:
129 ** li\s+[a-x0-9]+,512
130 ** vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]
131 ** vle64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
132 ** vse64\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),0\s*\([a-x0-9]+\)
133 ** ret
135 void mov9 (int64_t *in, int64_t *out)
137 v512di v = *(v512di*)in;
138 *(v512di*)out = v;