c++: normalizing ttp constraints [PR115656]
[official-gcc.git] / gcc / testsuite / gcc.target / powerpc / ssse3-palignr.c
blobe76bbefdc52448daff9b83a45f2d05658268bffb
1 /* { dg-do run } */
2 /* { dg-options "-O3 -mvsx -Wno-psabi" } */
3 /* { dg-additional-options "-mdejagnu-cpu=power8" { target { ! has_arch_pwr8 } } } */
4 /* { dg-require-effective-target p8vector_hw } */
6 #ifndef CHECK_H
7 #define CHECK_H "ssse3-check.h"
8 #endif
10 #ifndef TEST
11 #define TEST ssse3_test
12 #endif
14 #include CHECK_H
16 #include "ssse3-vals.h"
18 #include <tmmintrin.h>
19 #include <string.h>
21 #ifndef __AVX__
22 /* Test the 64-bit form */
23 static void
24 ssse3_test_palignr (__m64 *i1, __m64 *i2, unsigned int imm, __m64 *r)
26 switch (imm)
28 case 0:
29 *r = _mm_alignr_pi8 (*i1, *i2, 0);
30 break;
31 case 1:
32 *r = _mm_alignr_pi8 (*i1, *i2, 1);
33 break;
34 case 2:
35 *r = _mm_alignr_pi8 (*i1, *i2, 2);
36 break;
37 case 3:
38 *r = _mm_alignr_pi8 (*i1, *i2, 3);
39 break;
40 case 4:
41 *r = _mm_alignr_pi8 (*i1, *i2, 4);
42 break;
43 case 5:
44 *r = _mm_alignr_pi8 (*i1, *i2, 5);
45 break;
46 case 6:
47 *r = _mm_alignr_pi8 (*i1, *i2, 6);
48 break;
49 case 7:
50 *r = _mm_alignr_pi8 (*i1, *i2, 7);
51 break;
52 case 8:
53 *r = _mm_alignr_pi8 (*i1, *i2, 8);
54 break;
55 case 9:
56 *r = _mm_alignr_pi8 (*i1, *i2, 9);
57 break;
58 case 10:
59 *r = _mm_alignr_pi8 (*i1, *i2, 10);
60 break;
61 case 11:
62 *r = _mm_alignr_pi8 (*i1, *i2, 11);
63 break;
64 case 12:
65 *r = _mm_alignr_pi8 (*i1, *i2, 12);
66 break;
67 case 13:
68 *r = _mm_alignr_pi8 (*i1, *i2, 13);
69 break;
70 case 14:
71 *r = _mm_alignr_pi8 (*i1, *i2, 14);
72 break;
73 case 15:
74 *r = _mm_alignr_pi8 (*i1, *i2, 15);
75 break;
76 default:
77 *r = _mm_alignr_pi8 (*i1, *i2, 16);
78 break;
81 _mm_empty();
83 #endif
85 /* Test the 128-bit form */
86 static void
87 ssse3_test_palignr128 (__m128i *i1, __m128i *i2, unsigned int imm, __m128i *r)
89 /* Assumes incoming pointers are 16-byte aligned */
91 switch (imm)
93 case 0:
94 *r = _mm_alignr_epi8 (*i1, *i2, 0);
95 break;
96 case 1:
97 *r = _mm_alignr_epi8 (*i1, *i2, 1);
98 break;
99 case 2:
100 *r = _mm_alignr_epi8 (*i1, *i2, 2);
101 break;
102 case 3:
103 *r = _mm_alignr_epi8 (*i1, *i2, 3);
104 break;
105 case 4:
106 *r = _mm_alignr_epi8 (*i1, *i2, 4);
107 break;
108 case 5:
109 *r = _mm_alignr_epi8 (*i1, *i2, 5);
110 break;
111 case 6:
112 *r = _mm_alignr_epi8 (*i1, *i2, 6);
113 break;
114 case 7:
115 *r = _mm_alignr_epi8 (*i1, *i2, 7);
116 break;
117 case 8:
118 *r = _mm_alignr_epi8 (*i1, *i2, 8);
119 break;
120 case 9:
121 *r = _mm_alignr_epi8 (*i1, *i2, 9);
122 break;
123 case 10:
124 *r = _mm_alignr_epi8 (*i1, *i2, 10);
125 break;
126 case 11:
127 *r = _mm_alignr_epi8 (*i1, *i2, 11);
128 break;
129 case 12:
130 *r = _mm_alignr_epi8 (*i1, *i2, 12);
131 break;
132 case 13:
133 *r = _mm_alignr_epi8 (*i1, *i2, 13);
134 break;
135 case 14:
136 *r = _mm_alignr_epi8 (*i1, *i2, 14);
137 break;
138 case 15:
139 *r = _mm_alignr_epi8 (*i1, *i2, 15);
140 break;
141 case 16:
142 *r = _mm_alignr_epi8 (*i1, *i2, 16);
143 break;
144 case 17:
145 *r = _mm_alignr_epi8 (*i1, *i2, 17);
146 break;
147 case 18:
148 *r = _mm_alignr_epi8 (*i1, *i2, 18);
149 break;
150 case 19:
151 *r = _mm_alignr_epi8 (*i1, *i2, 19);
152 break;
153 case 20:
154 *r = _mm_alignr_epi8 (*i1, *i2, 20);
155 break;
156 case 21:
157 *r = _mm_alignr_epi8 (*i1, *i2, 21);
158 break;
159 case 22:
160 *r = _mm_alignr_epi8 (*i1, *i2, 22);
161 break;
162 case 23:
163 *r = _mm_alignr_epi8 (*i1, *i2, 23);
164 break;
165 case 24:
166 *r = _mm_alignr_epi8 (*i1, *i2, 24);
167 break;
168 case 25:
169 *r = _mm_alignr_epi8 (*i1, *i2, 25);
170 break;
171 case 26:
172 *r = _mm_alignr_epi8 (*i1, *i2, 26);
173 break;
174 case 27:
175 *r = _mm_alignr_epi8 (*i1, *i2, 27);
176 break;
177 case 28:
178 *r = _mm_alignr_epi8 (*i1, *i2, 28);
179 break;
180 case 29:
181 *r = _mm_alignr_epi8 (*i1, *i2, 29);
182 break;
183 case 30:
184 *r = _mm_alignr_epi8 (*i1, *i2, 30);
185 break;
186 case 31:
187 *r = _mm_alignr_epi8 (*i1, *i2, 31);
188 break;
189 default:
190 *r = _mm_alignr_epi8 (*i1, *i2, 32);
191 break;
195 /* Routine to manually compute the results */
196 static void
197 compute_correct_result_128 (signed char *i1, signed char *i2, unsigned int imm,
198 signed char *r)
200 signed char buf [32];
201 int i;
203 memcpy (&buf[0], i2, 16);
204 memcpy (&buf[16], i1, 16);
206 for (i = 0; i < 16; i++)
207 if (imm >= 32 || imm + i >= 32)
208 r[i] = 0;
209 else
210 r[i] = buf[imm + i];
213 #ifndef __AVX__
214 static void
215 compute_correct_result_64 (signed char *i1, signed char *i2, unsigned int imm,
216 signed char *r)
218 signed char buf [16];
219 int i;
221 /* Handle the first half */
222 memcpy (&buf[0], &i2[0], 8);
223 memcpy (&buf[8], &i1[0], 8);
225 for (i = 0; i < 8; i++)
226 if (imm >= 16 || imm + i >= 16)
227 r[i] = 0;
228 else
229 r[i] = buf[imm + i];
231 /* Handle the second half */
232 memcpy (&buf[0], &i2[8], 8);
233 memcpy (&buf[8], &i1[8], 8);
235 for (i = 0; i < 8; i++)
236 if (imm >= 16 || imm + i >= 16)
237 r[i + 8] = 0;
238 else
239 r[i + 8] = buf[imm + i];
241 #endif
243 static void
244 TEST (void)
246 int i;
247 union data r __attribute__ ((aligned(16)));
248 union data ck;
249 unsigned int imm;
250 int fail = 0;
252 for (i = 0; i < ARRAY_SIZE (vals) - 1; i++)
253 for (imm = 0; imm < 100; imm++)
255 #ifndef __AVX__
256 /* Manually compute the result */
257 compute_correct_result_64 (&vals[i + 0].b[0],
258 &vals[i + 1].b[0], imm, &ck.b[0]);
260 /* Run the 64-bit tests */
261 ssse3_test_palignr (&vals[i + 0].ll[0],
262 &vals[i + 1].ll[0], imm, &r.ll[0]);
263 ssse3_test_palignr (&vals[i + 0].ll[1],
264 &vals[i + 1].ll[1], imm, &r.ll[1]);
265 fail += chk_128 (ck.m[0], r.m[0]);
266 #endif
268 /* Recompute the results for 128-bits */
269 compute_correct_result_128 (&vals[i + 0].b[0],
270 &vals[i + 1].b[0], imm, &ck.b[0]);
272 /* Run the 128-bit tests */
273 ssse3_test_palignr128 (&vals[i + 0].m[0],
274 &vals[i + 1].m[0], imm, &r.m[0]);
275 fail += chk_128 (ck.m[0], r.m[0]);
278 if (fail != 0)
279 abort ();