Add support for conditional reductions using SVE CLASTB
[official-gcc.git] / gcc / testsuite / gcc.target / aarch64 / sve / store_scalar_offset_1.c
blob4f0655f6d7c6aff2373bbfd6d3459dc95738784f
1 /* { dg-do assemble { target aarch64_asm_sve_ok } } */
2 /* { dg-options "-O3 -msve-vector-bits=256 --save-temps" } */
4 #include <stdint.h>
6 typedef int64_t vnx2di __attribute__((vector_size(32)));
7 typedef int32_t vnx4si __attribute__((vector_size(32)));
8 typedef int16_t vnx8hi __attribute__((vector_size(32)));
9 typedef int8_t vnx16qi __attribute__((vector_size(32)));
11 void sve_store_64_z_lsl (uint64_t *a, unsigned long i)
13 asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
16 void sve_store_64_s_lsl (int64_t *a, signed long i)
18 asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
21 void sve_store_32_z_lsl (uint32_t *a, unsigned long i)
23 asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
26 void sve_store_32_s_lsl (int32_t *a, signed long i)
28 asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
31 void sve_store_16_z_lsl (uint16_t *a, unsigned long i)
33 asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
36 void sve_store_16_s_lsl (int16_t *a, signed long i)
38 asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
41 /* ??? The other argument order leads to a redundant move. */
42 void sve_store_8_z (unsigned long i, uint8_t *a)
44 asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
47 void sve_store_8_s (signed long i, int8_t *a)
49 asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
52 /* { dg-final { scan-assembler-times {\tst1d\tz0\.d, p[0-7], \[x0, x1, lsl 3\]\n} 2 } } */
53 /* { dg-final { scan-assembler-times {\tst1w\tz0\.s, p[0-7], \[x0, x1, lsl 2\]\n} 2 } } */
54 /* { dg-final { scan-assembler-times {\tst1h\tz0\.h, p[0-7], \[x0, x1, lsl 1\]\n} 2 } } */
55 /* { dg-final { scan-assembler-times {\tst1b\tz0\.b, p[0-7], \[x1, x0\]\n} 2 } } */