Use gather loads for strided accesses
[official-gcc.git] / gcc / testsuite / gcc.target / i386 / avx512bw-vpalignr-2.c
blobdc92889712186145220c14b7b0adbf49576c77c5
1 /* { dg-do run } */
2 /* { dg-options "-O2 -mavx512bw" } */
3 /* { dg-require-effective-target avx512bw } */
5 #define AVX512BW
6 #include "avx512f-helper.h"
8 #include <string.h>
10 #define SIZE (AVX512F_LEN / 8)
11 #include "avx512f-mask-type.h"
13 #define N 0x3
15 void
16 CALC (char *src1, char *src2, char * dst)
18 /* result for EVEX.U1.512 version consists from 4 result block, each of them
19 * has length of 128 bits. */
20 unsigned block_len = 16;
21 unsigned double_block_len = 32;
22 unsigned shift = 0;
23 char buf[double_block_len];
24 char *bout = dst;
25 int bits, i;
27 for (bits = 0; bits < AVX512F_LEN; bits += 128)
29 memcpy (&buf[0], src2 + shift, block_len);
30 memcpy (&buf[block_len], src1 + shift, block_len);
32 for (i = 0; i < block_len; i++)
33 /* shift counts larger than 32 produces zero result. */
34 if (N >= 32 || N + i >= 32)
35 bout[i] = 0;
36 else
37 bout[i] = buf[N + i];
39 shift += block_len;
40 bout += block_len;
44 void
45 TEST (void)
47 UNION_TYPE (AVX512F_LEN, i_b) s1, s2, res1, res2, res3;
48 MASK_TYPE mask = MASK_VALUE;
49 char res_ref[SIZE];
50 int i;
52 for (i = 0; i < SIZE; i++)
54 s1.a[i] = i;
55 s2.a[i] = i * 2;
56 res2.a[i] = DEFAULT_VALUE;
59 res1.x = INTRINSIC (_alignr_epi8) (s1.x, s2.x, N);
60 res2.x = INTRINSIC (_mask_alignr_epi8) (res2.x, mask, s1.x, s2.x, N);
61 res3.x = INTRINSIC (_maskz_alignr_epi8) (mask, s1.x, s2.x, N);
63 CALC (s1.a, s2.a, res_ref);
65 if (UNION_CHECK (AVX512F_LEN, i_b) (res1, res_ref))
66 abort ();
68 MASK_MERGE (i_b) (res_ref, mask, SIZE);
69 if (UNION_CHECK (AVX512F_LEN, i_b) (res2, res_ref))
70 abort ();
72 MASK_ZERO (i_b) (res_ref, mask, SIZE);
73 if (UNION_CHECK (AVX512F_LEN, i_b) (res3, res_ref))
74 abort ();