1 /* { dg-require-effective-target vect_float } */
8 __attribute__ ((noinline
))
9 void bar (float *pd
, float *pa
, float *pb
, float *pc
)
14 for (i
= 0; i
< N
; i
++)
16 if (pa
[i
] != (pb
[i
] * pc
[i
]))
26 __attribute__ ((noinline
)) int
27 main1 (int n
, float * __restrict__ pd
, float * __restrict__ pa
, float * __restrict__ pb
, float * __restrict__ pc
)
31 for (i
= 0; i
< n
; i
++)
33 pa
[i
] = pb
[i
] * pc
[i
];
45 float a
[N
] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
46 float d
[N
+1] __attribute__ ((__aligned__(__BIGGEST_ALIGNMENT__
)));
47 float b
[N
] = {0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57};
48 float c
[N
] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};
52 main1 (N
,&d
[1],a
,b
,c
);
53 main1 (N
-2,&d
[1],a
,b
,c
);
58 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
59 /* { dg-final { scan-tree-dump-times "Alignment of access forced using peeling" 0 "vect" { xfail {vect_element_align} } } } */
61 /* For targets that support unaligned loads we version for the two unaligned
62 stores and generate misaligned accesses for the loads. For targets that
63 don't support unaligned loads we version for all four accesses. */
65 /* { dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 2 "vect" { xfail { vect_no_align || vect_element_align} } } } */
66 /* { dg-final { scan-tree-dump-times "Alignment of access forced using versioning" 2 "vect" { xfail { vect_no_align || vect_element_align } } } } */
67 /* { dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 0 "vect" { target { vect_no_align && { ! vect_hw_misalign } } } } } */
68 /* { dg-final { scan-tree-dump-times "Alignment of access forced using versioning" 4 "vect" { target { vect_no_align && { ! vect_hw_misalign } } } } } */