testcase: Add testcase for PR 117330 [PR117330]
[official-gcc.git] / gcc / testsuite / gcc.target / aarch64 / vec-init-9.c
blob3cf05cf865e21fad482e5ffc8c769d0f15a57e74
1 /* { dg-do compile } */
2 /* { dg-options "-O" } */
3 /* { dg-final { check-function-bodies "**" "" "" { target lp64 } } } */
5 #include <arm_neon.h>
7 void ext();
9 /*
10 ** s64q_1:
11 ** fmov d0, x0
12 ** ins v0\.d\[1\], x1
13 ** ret
15 int64x2_t s64q_1(int64_t a0, int64_t a1) {
16 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
17 return (int64x2_t) { a1, a0 };
18 else
19 return (int64x2_t) { a0, a1 };
22 ** s64q_2:
23 ** fmov d0, x0
24 ** ld1 {v0\.d}\[1\], \[x1\]
25 ** ret
27 int64x2_t s64q_2(int64_t a0, int64_t *ptr) {
28 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
29 return (int64x2_t) { ptr[0], a0 };
30 else
31 return (int64x2_t) { a0, ptr[0] };
34 ** s64q_3:
35 ** ldr d0, \[x0\]
36 ** ins v0\.d\[1\], x1
37 ** ret
39 int64x2_t s64q_3(int64_t *ptr, int64_t a1) {
40 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
41 return (int64x2_t) { a1, ptr[0] };
42 else
43 return (int64x2_t) { ptr[0], a1 };
46 ** s64q_4:
47 ** stp x1, x2, \[x0\]
48 ** ret
50 void s64q_4(int64x2_t *res, int64_t a0, int64_t a1) {
51 res[0] = (int64x2_t) { a0, a1 };
54 ** s64q_5:
55 ** stp x1, x2, \[x0, #?8\]
56 ** ret
58 void s64q_5(uintptr_t res, int64_t a0, int64_t a1) {
59 *(int64x2_t *)(res + 8) = (int64x2_t) { a0, a1 };
62 ** s64q_6:
63 ** ...
64 ** stp x0, x1, .*
65 ** ...
66 ** ldr q0, .*
67 ** ...
68 ** ret
70 int64x2_t s64q_6(int64_t a0, int64_t a1) {
71 int64x2_t res = { a0, a1 };
72 ext ();
73 return res;
77 ** f64q_1:
78 ** uzp1 v0\.2d, v0\.2d, v1\.2d
79 ** ret
81 float64x2_t f64q_1(float64_t a0, float64_t a1) {
82 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
83 return (float64x2_t) { a1, a0 };
84 else
85 return (float64x2_t) { a0, a1 };
88 ** f64q_2:
89 ** ld1 {v0\.d}\[1\], \[x0\]
90 ** ret
92 float64x2_t f64q_2(float64_t a0, float64_t *ptr) {
93 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
94 return (float64x2_t) { ptr[0], a0 };
95 else
96 return (float64x2_t) { a0, ptr[0] };
99 ** f64q_3:
100 ** ldr d0, \[x0\]
101 ** uzp1 v0\.2d, v0\.2d, v1\.2d
102 ** ret
104 float64x2_t f64q_3(float64_t a0, float64_t a1, float64_t *ptr) {
105 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
106 return (float64x2_t) { a1, ptr[0] };
107 else
108 return (float64x2_t) { ptr[0], a1 };
111 ** f64q_4:
112 ** stp d0, d1, \[x0\]
113 ** ret
115 void f64q_4(float64x2_t *res, float64_t a0, float64_t a1) {
116 res[0] = (float64x2_t) { a0, a1 };
119 ** f64q_5:
120 ** stp d0, d1, \[x0, #?8\]
121 ** ret
123 void f64q_5(uintptr_t res, float64_t a0, float64_t a1) {
124 *(float64x2_t *)(res + 8) = (float64x2_t) { a0, a1 };
127 ** f64q_6:
128 ** ...
129 ** stp d0, d1, .*
130 ** ...
131 ** ldr q0, .*
132 ** ...
133 ** ret
135 float64x2_t f64q_6(float64_t a0, float64_t a1) {
136 float64x2_t res = { a0, a1 };
137 ext ();
138 return res;
142 ** s32q_1:
143 ** uzp1 v0\.2d, v0\.2d, v1\.2d
144 ** ret
146 int32x4_t s32q_1(int32x2_t a0, int32x2_t a1) {
147 return vcombine_s32 (a0, a1);
150 ** s32q_2:
151 ** ld1 {v0\.d}\[1\], \[x0\]
152 ** ret
154 int32x4_t s32q_2(int32x2_t a0, int32x2_t *ptr) {
155 return vcombine_s32 (a0, ptr[0]);
158 ** s32q_3:
159 ** ldr d0, \[x0\]
160 ** uzp1 v0\.2d, v0\.2d, v1\.2d
161 ** ret
163 int32x4_t s32q_3(int32x2_t a0, int32x2_t a1, int32x2_t *ptr) {
164 return vcombine_s32 (ptr[0], a1);
167 ** s32q_4:
168 ** stp d0, d1, \[x0\]
169 ** ret
171 void s32q_4(int32x4_t *res, int32x2_t a0, int32x2_t a1) {
172 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
173 res[0] = vcombine_s32 (a1, a0);
174 else
175 res[0] = vcombine_s32 (a0, a1);
178 ** s32q_5:
179 ** stp d0, d1, \[x0, #?8\]
180 ** ret
182 void s32q_5(uintptr_t res, int32x2_t a0, int32x2_t a1) {
183 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
184 *(int32x4_t *)(res + 8) = vcombine_s32 (a1, a0);
185 else
186 *(int32x4_t *)(res + 8) = vcombine_s32 (a0, a1);
189 ** s32q_6:
190 ** ...
191 ** stp d0, d1, .*
192 ** ...
193 ** ldr q0, .*
194 ** ...
195 ** ret
197 int32x4_t s32q_6(int32x2_t a0, int32x2_t a1) {
198 int32x4_t res = (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
199 ? vcombine_s32 (a1, a0)
200 : vcombine_s32 (a0, a1));
201 ext ();
202 return res;
206 ** f32q_1:
207 ** uzp1 v0\.2d, v0\.2d, v1\.2d
208 ** ret
210 float32x4_t f32q_1(float32x2_t a0, float32x2_t a1) {
211 return vcombine_f32 (a0, a1);
214 ** f32q_2:
215 ** ld1 {v0\.d}\[1\], \[x0\]
216 ** ret
218 float32x4_t f32q_2(float32x2_t a0, float32x2_t *ptr) {
219 return vcombine_f32 (a0, ptr[0]);
222 ** f32q_3:
223 ** ldr d0, \[x0\]
224 ** uzp1 v0\.2d, v0\.2d, v1\.2d
225 ** ret
227 float32x4_t f32q_3(float32x2_t a0, float32x2_t a1, float32x2_t *ptr) {
228 return vcombine_f32 (ptr[0], a1);
231 ** f32q_4:
232 ** stp d0, d1, \[x0\]
233 ** ret
235 void f32q_4(float32x4_t *res, float32x2_t a0, float32x2_t a1) {
236 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
237 res[0] = vcombine_f32 (a1, a0);
238 else
239 res[0] = vcombine_f32 (a0, a1);
242 ** f32q_5:
243 ** stp d0, d1, \[x0, #?8\]
244 ** ret
246 void f32q_5(uintptr_t res, float32x2_t a0, float32x2_t a1) {
247 if (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
248 *(float32x4_t *)(res + 8) = vcombine_f32 (a1, a0);
249 else
250 *(float32x4_t *)(res + 8) = vcombine_f32 (a0, a1);
253 ** f32q_6:
254 ** ...
255 ** stp d0, d1, .*
256 ** ...
257 ** ldr q0, .*
258 ** ...
259 ** ret
261 float32x4_t f32q_6(float32x2_t a0, float32x2_t a1) {
262 float32x4_t res = (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
263 ? vcombine_f32 (a1, a0)
264 : vcombine_f32 (a0, a1));
265 ext ();
266 return res;