1 /* { dg-do run { target powerpc*-*-* } } */
2 /* { dg-xfail-if "" { "powerpc-*-eabispe*" "powerpc-ibm-aix*" } { "*" } { "" } } */
3 /* { dg-options "-maltivec -mabi=altivec -O2" } */
5 /* Check that "easy" AltiVec constants are correctly synthesized. */
7 #include "altivec_check.h"
9 extern void abort (void);
11 typedef __attribute__ ((vector_size (16))) unsigned char v16qi
;
12 typedef __attribute__ ((vector_size (16))) unsigned short v8hi
;
13 typedef __attribute__ ((vector_size (16))) unsigned int v4si
;
15 char w
[16] __attribute__((aligned(16)));
18 /* Emulate the vspltis? instructions on a 16-byte array of chars. */
20 void vspltisb (char *v
, int val
)
23 for (i
= 0; i
< 16; i
++)
27 void vspltish (char *v
, int val
)
30 for (i
= 0; i
< 16; i
+= 2)
31 v
[i
] = val
>> 7, v
[i
+ 1] = val
;
34 void vspltisw (char *v
, int val
)
37 for (i
= 0; i
< 16; i
+= 4)
38 v
[i
] = v
[i
+ 1] = v
[i
+ 2] = val
>> 7, v
[i
+ 3] = val
;
42 /* Use three different check functions for each mode-instruction pair.
43 The callers have no typecasting and no addressable vectors, to make
44 the test more robust. */
46 void __attribute__ ((noinline
)) check_v16qi (v16qi v1
, char *v2
)
48 if (memcmp (&v1
, v2
, 16))
52 void __attribute__ ((noinline
)) check_v8hi (v8hi v1
, char *v2
)
54 if (memcmp (&v1
, v2
, 16))
58 void __attribute__ ((noinline
)) check_v4si (v4si v1
, char *v2
)
60 if (memcmp (&v1
, v2
, 16))
67 void v16qi_vspltisb ()
69 v16qi v
= { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };
74 void v16qi_vspltisb_neg ()
76 v16qi v
= { -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5 };
81 void v16qi_vspltisb_addself ()
83 v16qi v
= { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 };
88 void v16qi_vspltisb_neg_addself ()
90 v16qi v
= { -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24 };
95 void v16qi_vspltish ()
97 v16qi v
= { 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15 };
102 void v16qi_vspltish_addself ()
104 v16qi v
= { 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30 };
109 void v16qi_vspltish_neg ()
111 v16qi v
= { -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5 };
116 void v16qi_vspltisw ()
118 v16qi v
= { 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15 };
123 void v16qi_vspltisw_addself ()
125 v16qi v
= { 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30 };
130 void v16qi_vspltisw_neg ()
132 v16qi v
= { -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5 };
140 void v8hi_vspltisb ()
142 v8hi v
= { 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F };
147 void v8hi_vspltisb_addself ()
149 v8hi v
= { 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E };
154 void v8hi_vspltisb_neg ()
156 v8hi v
= { 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB };
161 void v8hi_vspltish ()
163 v8hi v
= { 15, 15, 15, 15, 15, 15, 15, 15 };
168 void v8hi_vspltish_neg ()
170 v8hi v
= { -5, -5, -5, -5, -5, -5, -5, -5 };
175 void v8hi_vspltish_addself ()
177 v8hi v
= { 30, 30, 30, 30, 30, 30, 30, 30 };
182 void v8hi_vspltish_neg_addself ()
184 v8hi v
= { -24, -24, -24, -24, -24, -24, -24, -24 };
189 void v8hi_vspltisw ()
191 v8hi v
= { 0, 15, 0, 15, 0, 15, 0, 15 };
196 void v8hi_vspltisw_addself ()
198 v8hi v
= { 0, 30, 0, 30, 0, 30, 0, 30 };
203 void v8hi_vspltisw_neg ()
205 v8hi v
= { -1, -5, -1, -5, -1, -5, -1, -5 };
212 void v4si_vspltisb ()
214 v4si v
= { 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
219 void v4si_vspltisb_addself ()
221 v4si v
= { 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E };
226 void v4si_vspltisb_neg ()
228 v4si v
= { 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB };
233 void v4si_vspltish ()
235 v4si v
= { 0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F };
240 void v4si_vspltish_addself ()
242 v4si v
= { 0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E };
247 void v4si_vspltish_neg ()
249 v4si v
= { 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB };
254 void v4si_vspltisw ()
256 v4si v
= { 15, 15, 15, 15 };
261 void v4si_vspltisw_neg ()
263 v4si v
= { -5, -5, -5, -5 };
268 void v4si_vspltisw_addself ()
270 v4si v
= { 30, 30, 30, 30 };
275 void v4si_vspltisw_neg_addself ()
277 v4si v
= { -24, -24, -24, -24 };
286 altivec_check (); /* Exit if hardware doesn't support AltiVec. */
289 v16qi_vspltisb_neg ();
290 v16qi_vspltisb_addself ();
291 v16qi_vspltisb_neg_addself ();
293 v16qi_vspltish_addself ();
294 v16qi_vspltish_neg ();
296 v16qi_vspltisw_addself ();
297 v16qi_vspltisw_neg ();
300 v8hi_vspltisb_addself ();
301 v8hi_vspltisb_neg ();
303 v8hi_vspltish_neg ();
304 v8hi_vspltish_addself ();
305 v8hi_vspltish_neg_addself ();
307 v8hi_vspltisw_addself ();
308 v8hi_vspltisw_neg ();
311 v4si_vspltisb_addself ();
312 v4si_vspltisb_neg ();
314 v4si_vspltish_addself ();
315 v4si_vspltish_neg ();
317 v4si_vspltisw_neg ();
318 v4si_vspltisw_addself ();
319 v4si_vspltisw_neg_addself ();