PR middle-end/77357 - strlen of constant strings not folded
[official-gcc.git] / gcc / testsuite / gcc.dg / tree-ssa / forwprop-20.c
blobf21f5b411379f7f58416415cdddc8ea77f4930d9
1 /* { dg-do compile } */
2 /* { dg-require-effective-target double64 } */
3 /* { dg-options "-O -fdump-tree-forwprop1" } */
5 #include <stdint.h>
7 /* All of these optimizations happen for unsupported vector modes as a
8 consequence of the lowering pass. We need to test with a vector mode
9 that is supported by default on at least some architectures, or make
10 the test target specific so we can pass a flag like -mavx. */
12 typedef double vecf __attribute__ ((vector_size (2 * sizeof (double))));
13 typedef int64_t veci __attribute__ ((vector_size (2 * sizeof (int64_t))));
15 void f (double d, vecf* r)
17 vecf x = { -d, 5 };
18 vecf y = { 1, 4 };
19 veci m = { 2, 0 };
20 *r = __builtin_shuffle (x, y, m); // { 1, -d }
23 void g (float d, vecf* r)
25 vecf x = { d, 5 };
26 vecf y = { 1, 4 };
27 veci m = { 2, 1 };
28 *r = __builtin_shuffle (x, y, m); // { 1, 5 }
31 void h (double d, vecf* r)
33 vecf x = { d + 1, 5 };
34 vecf y = { 1 , 4 };
35 veci m = { 2 , 0 };
36 *r = __builtin_shuffle (y, x, m); // { d + 1, 1 }
39 void i (float d, vecf* r)
41 vecf x = { d, 5 };
42 veci m = { 1, 0 };
43 *r = __builtin_shuffle (x, m); // { 5, d }
46 void j (vecf* r)
48 vecf y = { 1, 2 };
49 veci m = { 0, 0 };
50 *r = __builtin_shuffle (y, m); // { 1, 1 }
53 void k (vecf* r)
55 vecf x = { 3, 4 };
56 vecf y = { 1, 2 };
57 veci m = { 3, 0 };
58 *r = __builtin_shuffle (x, y, m); // { 2, 3 }
61 void l (double d, vecf* r)
63 vecf x = { -d, 5 };
64 vecf y = { d, 4 };
65 veci m = { 2, 0 };
66 *r = __builtin_shuffle (x, y, m); // { d, -d }
69 /* { dg-final { scan-tree-dump-not "VEC_PERM_EXPR" "forwprop1" } } */