1 /* { dg-require-effective-target vect_float } */
8 float fa
[N
] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
9 float fb
[N
+4] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
10 float fc
[N
] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
12 /* Check handling of accesses for which the "initial condition" -
13 the expression that represents the first location accessed - is
14 more involved than just an ssa_name. */
16 __attribute__ ((noinline
)) int
17 main1 (float * __restrict__ pa
, float * __restrict__ pb
, float *__restrict__ pc
)
21 for (i
= 0; i
< N
; i
++)
25 asm volatile ("" ::: "memory");
30 asm volatile ("" ::: "memory");
33 for (i
= 0; i
< N
; i
++)
38 for (i
= 0; i
< N
; i
++)
40 if (pa
[i
] != q
[i
] * pc
[i
])
57 /* For targets that don't support misaligned loads we version for the
58 all three accesses (peeling to align the store will not force the
59 two loads to be aligned). */
61 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
62 /* Uncomment when this testcase gets vectorized again:
63 dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 2 "vect" { xfail vect_no_align } }
64 dg-final { scan-tree-dump-times "Alignment of access forced using peeling" 1 "vect" { xfail vect_no_align } }
65 dg-final { scan-tree-dump-times "Alignment of access forced using versioning." 3 "vect" { target vect_no_align } }