From 8614cbb253484e28c3eb20cde4d1067aad56de58 Mon Sep 17 00:00:00 2001 From: Juzhe-Zhong Date: Thu, 30 Nov 2023 10:36:30 +0800 Subject: [PATCH] RISC-V: Support highpart overlap for floating-point widen instructions This patch leverages the approach of vwcvt/vext.vf2 which has been approved. Their approaches are totally the same. Tested no regression and committed. PR target/112431 gcc/ChangeLog: * config/riscv/vector.md: Add widenning overlap. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/pr112431-10.c: New test. * gcc.target/riscv/rvv/base/pr112431-11.c: New test. * gcc.target/riscv/rvv/base/pr112431-12.c: New test. * gcc.target/riscv/rvv/base/pr112431-13.c: New test. * gcc.target/riscv/rvv/base/pr112431-14.c: New test. * gcc.target/riscv/rvv/base/pr112431-15.c: New test. * gcc.target/riscv/rvv/base/pr112431-7.c: New test. * gcc.target/riscv/rvv/base/pr112431-8.c: New test. * gcc.target/riscv/rvv/base/pr112431-9.c: New test. --- gcc/config/riscv/vector.md | 78 +++++---- .../gcc.target/riscv/rvv/base/pr112431-10.c | 104 ++++++++++++ .../gcc.target/riscv/rvv/base/pr112431-11.c | 68 ++++++++ .../gcc.target/riscv/rvv/base/pr112431-12.c | 51 ++++++ .../gcc.target/riscv/rvv/base/pr112431-13.c | 188 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/pr112431-14.c | 119 +++++++++++++ .../gcc.target/riscv/rvv/base/pr112431-15.c | 86 ++++++++++ .../gcc.target/riscv/rvv/base/pr112431-7.c | 106 ++++++++++++ .../gcc.target/riscv/rvv/base/pr112431-8.c | 68 ++++++++ .../gcc.target/riscv/rvv/base/pr112431-9.c | 51 ++++++ 10 files changed, 882 insertions(+), 37 deletions(-) create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index 74716c73e98..6b891c11324 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -7622,84 +7622,88 @@ ;; ------------------------------------------------------------------------------- (define_insn "@pred_widen_fcvt_x_f" - [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr") + [(set (match_operand:VWCONVERTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr") (if_then_else:VWCONVERTI (unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 4 "vector_length_operand" " rK, rK") - (match_operand 5 "const_int_operand" " i, i") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") - (match_operand 8 "const_int_operand" " i, i") + [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1") + (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK") + (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 8 "const_int_operand" " i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM) (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE) (unspec:VWCONVERTI - [(match_operand: 3 "register_operand" " vr, vr")] VFCVTS) - (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))] + [(match_operand: 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")] VFCVTS) + (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))] "TARGET_VECTOR" "vfwcvt.x.f.v\t%0,%3%p1" [(set_attr "type" "vfwcvtftoi") (set_attr "mode" "") (set (attr "frm_mode") - (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))]) + (symbol_ref "riscv_vector::get_frm_mode (operands[8])")) + (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")]) (define_insn "@pred_widen_" - [(set (match_operand:VWCONVERTI 0 "register_operand" "=&vr, &vr") + [(set (match_operand:VWCONVERTI 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr") (if_then_else:VWCONVERTI (unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 4 "vector_length_operand" " rK, rK") - (match_operand 5 "const_int_operand" " i, i") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") + [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1") + (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK") + (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (any_fix:VWCONVERTI - (match_operand: 3 "register_operand" " vr, vr")) - (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0")))] + (match_operand: 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")) + (match_operand:VWCONVERTI 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))] "TARGET_VECTOR" "vfwcvt.rtz.x.f.v\t%0,%3%p1" [(set_attr "type" "vfwcvtftoi") - (set_attr "mode" "")]) + (set_attr "mode" "") + (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")]) (define_insn "@pred_widen_" - [(set (match_operand:V_VLSF 0 "register_operand" "=&vr, &vr") + [(set (match_operand:V_VLSF 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr") (if_then_else:V_VLSF (unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 4 "vector_length_operand" " rK, rK") - (match_operand 5 "const_int_operand" " i, i") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") + [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1") + (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK") + (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (any_float:V_VLSF - (match_operand: 3 "register_operand" " vr, vr")) - (match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0")))] + (match_operand: 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")) + (match_operand:V_VLSF 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))] "TARGET_VECTOR" "vfwcvt.f.x.v\t%0,%3%p1" [(set_attr "type" "vfwcvtitof") - (set_attr "mode" "")]) + (set_attr "mode" "") + (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")]) (define_insn "@pred_extend" - [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=&vr, &vr") + [(set (match_operand:VWEXTF_ZVFHMIN 0 "register_operand" "=vr, vr, vr, vr, vr, vr, ?&vr, ?&vr") (if_then_else:VWEXTF_ZVFHMIN (unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 4 "vector_length_operand" " rK, rK") - (match_operand 5 "const_int_operand" " i, i") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") + [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1,vmWc1") + (match_operand 4 "vector_length_operand" " rK, rK, rK, rK, rK, rK, rK, rK") + (match_operand 5 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 6 "const_int_operand" " i, i, i, i, i, i, i, i") + (match_operand 7 "const_int_operand" " i, i, i, i, i, i, i, i") (reg:SI VL_REGNUM) (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) (float_extend:VWEXTF_ZVFHMIN - (match_operand: 3 "register_operand" " vr, vr")) - (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0")))] + (match_operand: 3 "register_operand" " W21, W21, W42, W42, W84, W84, vr, vr")) + (match_operand:VWEXTF_ZVFHMIN 2 "vector_merge_operand" " vu, 0, vu, 0, vu, 0, vu, 0")))] "TARGET_VECTOR" "vfwcvt.f.f.v\t%0,%3%p1" [(set_attr "type" "vfwcvtftof") - (set_attr "mode" "")]) + (set_attr "mode" "") + (set_attr "group_overlap" "W21,W21,W42,W42,W84,W84,none,none")]) ;; ------------------------------------------------------------------------------- ;; ---- Predicated floating-point narrow conversions diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c new file mode 100644 index 00000000000..5f161b31fa1 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-10.c @@ -0,0 +1,104 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7, double sum8, double sum9, + double sum10, double sum11, double sum12, double sum13, double sum14, + double sum15) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9 + + sum10 + sum11 + sum12 + sum13 + sum14 + sum15; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vint32m1_t v0 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v1 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v2 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v3 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v4 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v5 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v6 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v7 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v8 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v9 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v10 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v11 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v12 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v13 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v14 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + vint32m1_t v15 = __riscv_vle32_v_i32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m2_t vw0 = __riscv_vfwcvt_f_x_v_f64m2 (v0, vl); + vfloat64m2_t vw1 = __riscv_vfwcvt_f_x_v_f64m2 (v1, vl); + vfloat64m2_t vw2 = __riscv_vfwcvt_f_x_v_f64m2 (v2, vl); + vfloat64m2_t vw3 = __riscv_vfwcvt_f_x_v_f64m2 (v3, vl); + vfloat64m2_t vw4 = __riscv_vfwcvt_f_x_v_f64m2 (v4, vl); + vfloat64m2_t vw5 = __riscv_vfwcvt_f_x_v_f64m2 (v5, vl); + vfloat64m2_t vw6 = __riscv_vfwcvt_f_x_v_f64m2 (v6, vl); + vfloat64m2_t vw7 = __riscv_vfwcvt_f_x_v_f64m2 (v7, vl); + vfloat64m2_t vw8 = __riscv_vfwcvt_f_x_v_f64m2 (v8, vl); + vfloat64m2_t vw9 = __riscv_vfwcvt_f_x_v_f64m2 (v9, vl); + vfloat64m2_t vw10 = __riscv_vfwcvt_f_x_v_f64m2 (v10, vl); + vfloat64m2_t vw11 = __riscv_vfwcvt_f_x_v_f64m2 (v11, vl); + vfloat64m2_t vw12 = __riscv_vfwcvt_f_x_v_f64m2 (v12, vl); + vfloat64m2_t vw13 = __riscv_vfwcvt_f_x_v_f64m2 (v13, vl); + vfloat64m2_t vw14 = __riscv_vfwcvt_f_x_v_f64m2 (v14, vl); + vfloat64m2_t vw15 = __riscv_vfwcvt_f_x_v_f64m2 (v15, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3); + double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4); + double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5); + double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6); + double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7); + double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8); + double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9); + double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10); + double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11); + double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12); + double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13); + double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14); + double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8, + sum9, sum10, sum11, sum12, sum13, sum14, sum15); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c new file mode 100644 index 00000000000..82827d14e34 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-11.c @@ -0,0 +1,68 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vint32m2_t v0 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v1 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v2 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v3 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v4 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v5 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v6 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + vint32m2_t v7 = __riscv_vle32_v_i32m2 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m4_t vw0 = __riscv_vfwcvt_f_x_v_f64m4 (v0, vl); + vfloat64m4_t vw1 = __riscv_vfwcvt_f_x_v_f64m4 (v1, vl); + vfloat64m4_t vw2 = __riscv_vfwcvt_f_x_v_f64m4 (v2, vl); + vfloat64m4_t vw3 = __riscv_vfwcvt_f_x_v_f64m4 (v3, vl); + vfloat64m4_t vw4 = __riscv_vfwcvt_f_x_v_f64m4 (v4, vl); + vfloat64m4_t vw5 = __riscv_vfwcvt_f_x_v_f64m4 (v5, vl); + vfloat64m4_t vw6 = __riscv_vfwcvt_f_x_v_f64m4 (v6, vl); + vfloat64m4_t vw7 = __riscv_vfwcvt_f_x_v_f64m4 (v7, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3); + double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4); + double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5); + double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6); + double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c new file mode 100644 index 00000000000..c4ae60755ea --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-12.c @@ -0,0 +1,51 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3) +{ + return sum0 + sum1 + sum2 + sum3; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vint32m4_t v0 = __riscv_vle32_v_i32m4 ((void *) it, vl); + it += vl; + vint32m4_t v1 = __riscv_vle32_v_i32m4 ((void *) it, vl); + it += vl; + vint32m4_t v2 = __riscv_vle32_v_i32m4 ((void *) it, vl); + it += vl; + vint32m4_t v3 = __riscv_vle32_v_i32m4 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m8_t vw0 = __riscv_vfwcvt_f_x_v_f64m8 (v0, vl); + vfloat64m8_t vw1 = __riscv_vfwcvt_f_x_v_f64m8 (v1, vl); + vfloat64m8_t vw2 = __riscv_vfwcvt_f_x_v_f64m8 (v2, vl); + vfloat64m8_t vw3 = __riscv_vfwcvt_f_x_v_f64m8 (v3, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3); + + sum += sumation (sum0, sum1, sum2, sum3); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c new file mode 100644 index 00000000000..fde7076d34f --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-13.c @@ -0,0 +1,188 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7, double sum8, double sum9, + double sum10, double sum11, double sum12, double sum13, double sum14, + double sum15) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9 + + sum10 + sum11 + sum12 + sum13 + sum14 + sum15; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl); + vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl); + vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl); + vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl); + vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl); + vint64m2_t vw8 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v8, vl); + vint64m2_t vw9 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v9, vl); + vint64m2_t vw10 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v10, vl); + vint64m2_t vw11 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v11, vl); + vint64m2_t vw12 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v12, vl); + vint64m2_t vw13 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v13, vl); + vint64m2_t vw14 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v14, vl); + vint64m2_t vw15 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v15, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4); + double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5); + double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6); + double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7); + double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8); + double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9); + double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10); + double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11); + double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12); + double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13); + double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14); + double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8, + sum9, sum10, sum11, sum12, sum13, sum14, sum15); + } + return sum; +} + +double +foo2 (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl); + vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl); + vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl); + vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl); + vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl); + vint64m2_t vw8 = __riscv_vfwcvt_x_f_v_i64m2 (v8, vl); + vint64m2_t vw9 = __riscv_vfwcvt_x_f_v_i64m2 (v9, vl); + vint64m2_t vw10 = __riscv_vfwcvt_x_f_v_i64m2 (v10, vl); + vint64m2_t vw11 = __riscv_vfwcvt_x_f_v_i64m2 (v11, vl); + vint64m2_t vw12 = __riscv_vfwcvt_x_f_v_i64m2 (v12, vl); + vint64m2_t vw13 = __riscv_vfwcvt_x_f_v_i64m2 (v13, vl); + vint64m2_t vw14 = __riscv_vfwcvt_x_f_v_i64m2 (v14, vl); + vint64m2_t vw15 = __riscv_vfwcvt_x_f_v_i64m2 (v15, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4); + double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5); + double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6); + double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7); + double sum8 = __riscv_vmv_x_s_i64m2_i64 (vw8); + double sum9 = __riscv_vmv_x_s_i64m2_i64 (vw9); + double sum10 = __riscv_vmv_x_s_i64m2_i64 (vw10); + double sum11 = __riscv_vmv_x_s_i64m2_i64 (vw11); + double sum12 = __riscv_vmv_x_s_i64m2_i64 (vw12); + double sum13 = __riscv_vmv_x_s_i64m2_i64 (vw13); + double sum14 = __riscv_vmv_x_s_i64m2_i64 (vw14); + double sum15 = __riscv_vmv_x_s_i64m2_i64 (vw15); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8, + sum9, sum10, sum11, sum12, sum13, sum14, sum15); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c new file mode 100644 index 00000000000..535ea7ce34b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-14.c @@ -0,0 +1,119 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl); + vint64m2_t vw4 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v4, vl); + vint64m2_t vw5 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v5, vl); + vint64m2_t vw6 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v6, vl); + vint64m2_t vw7 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v7, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4); + double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5); + double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6); + double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); + } + return sum; +} + +double +foo2 (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl); + vint64m2_t vw4 = __riscv_vfwcvt_x_f_v_i64m2 (v4, vl); + vint64m2_t vw5 = __riscv_vfwcvt_x_f_v_i64m2 (v5, vl); + vint64m2_t vw6 = __riscv_vfwcvt_x_f_v_i64m2 (v6, vl); + vint64m2_t vw7 = __riscv_vfwcvt_x_f_v_i64m2 (v7, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + double sum4 = __riscv_vmv_x_s_i64m2_i64 (vw4); + double sum5 = __riscv_vmv_x_s_i64m2_i64 (vw5); + double sum6 = __riscv_vmv_x_s_i64m2_i64 (vw6); + double sum7 = __riscv_vmv_x_s_i64m2_i64 (vw7); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c new file mode 100644 index 00000000000..3d46e4a829a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-15.c @@ -0,0 +1,86 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3) +{ + return sum0 + sum1 + sum2 + sum3; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_rtz_x_f_v_i64m2 (v3, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + + sum += sumation (sum0, sum1, sum2, sum3); + } + return sum; +} + +double +foo2 (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vint64m2_t vw0 = __riscv_vfwcvt_x_f_v_i64m2 (v0, vl); + vint64m2_t vw1 = __riscv_vfwcvt_x_f_v_i64m2 (v1, vl); + vint64m2_t vw2 = __riscv_vfwcvt_x_f_v_i64m2 (v2, vl); + vint64m2_t vw3 = __riscv_vfwcvt_x_f_v_i64m2 (v3, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vmv_x_s_i64m2_i64 (vw0); + double sum1 = __riscv_vmv_x_s_i64m2_i64 (vw1); + double sum2 = __riscv_vmv_x_s_i64m2_i64 (vw2); + double sum3 = __riscv_vmv_x_s_i64m2_i64 (vw3); + + sum += sumation (sum0, sum1, sum2, sum3); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c new file mode 100644 index 00000000000..7064471496c --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-7.c @@ -0,0 +1,106 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7, double sum8, double sum9, + double sum10, double sum11, double sum12, double sum13, double sum14, + double sum15) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7 + sum8 + sum9 + + sum10 + sum11 + sum12 + sum13 + sum14 + sum15; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m1_t v0 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v1 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v2 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v3 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v4 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v5 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v6 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v7 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v8 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v9 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v10 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v11 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v12 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v13 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v14 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + vfloat32m1_t v15 = __riscv_vle32_v_f32m1 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m2_t vw0 = __riscv_vfwcvt_f_f_v_f64m2 (v0, vl); + vfloat64m2_t vw1 = __riscv_vfwcvt_f_f_v_f64m2 (v1, vl); + vfloat64m2_t vw2 = __riscv_vfwcvt_f_f_v_f64m2 (v2, vl); + vfloat64m2_t vw3 = __riscv_vfwcvt_f_f_v_f64m2 (v3, vl); + vfloat64m2_t vw4 = __riscv_vfwcvt_f_f_v_f64m2 (v4, vl); + vfloat64m2_t vw5 = __riscv_vfwcvt_f_f_v_f64m2 (v5, vl); + vfloat64m2_t vw6 = __riscv_vfwcvt_f_f_v_f64m2 (v6, vl); + vfloat64m2_t vw7 = __riscv_vfwcvt_f_f_v_f64m2 (v7, vl); + vfloat64m2_t vw8 = __riscv_vfwcvt_f_f_v_f64m2 (v8, vl); + vfloat64m2_t vw9 = __riscv_vfwcvt_f_f_v_f64m2 (v9, vl); + vfloat64m2_t vw10 = __riscv_vfwcvt_f_f_v_f64m2 (v10, vl); + vfloat64m2_t vw11 = __riscv_vfwcvt_f_f_v_f64m2 (v11, vl); + vfloat64m2_t vw12 = __riscv_vfwcvt_f_f_v_f64m2 (v12, vl); + vfloat64m2_t vw13 = __riscv_vfwcvt_f_f_v_f64m2 (v13, vl); + vfloat64m2_t vw14 = __riscv_vfwcvt_f_f_v_f64m2 (v14, vl); + vfloat64m2_t vw15 = __riscv_vfwcvt_f_f_v_f64m2 (v15, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m2_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m2_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m2_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m2_f64 (vw3); + double sum4 = __riscv_vfmv_f_s_f64m2_f64 (vw4); + double sum5 = __riscv_vfmv_f_s_f64m2_f64 (vw5); + double sum6 = __riscv_vfmv_f_s_f64m2_f64 (vw6); + double sum7 = __riscv_vfmv_f_s_f64m2_f64 (vw7); + double sum8 = __riscv_vfmv_f_s_f64m2_f64 (vw8); + double sum9 = __riscv_vfmv_f_s_f64m2_f64 (vw9); + double sum10 = __riscv_vfmv_f_s_f64m2_f64 (vw10); + double sum11 = __riscv_vfmv_f_s_f64m2_f64 (vw11); + double sum12 = __riscv_vfmv_f_s_f64m2_f64 (vw12); + double sum13 = __riscv_vfmv_f_s_f64m2_f64 (vw13); + double sum14 = __riscv_vfmv_f_s_f64m2_f64 (vw14); + double sum15 = __riscv_vfmv_f_s_f64m2_f64 (vw15); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8, + sum9, sum10, sum11, sum12, sum13, sum14, sum15); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ + + diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c new file mode 100644 index 00000000000..ab56d0d69af --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-8.c @@ -0,0 +1,68 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3, double sum4, + double sum5, double sum6, double sum7) +{ + return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m4_t vw0 = __riscv_vfwcvt_f_f_v_f64m4 (v0, vl); + vfloat64m4_t vw1 = __riscv_vfwcvt_f_f_v_f64m4 (v1, vl); + vfloat64m4_t vw2 = __riscv_vfwcvt_f_f_v_f64m4 (v2, vl); + vfloat64m4_t vw3 = __riscv_vfwcvt_f_f_v_f64m4 (v3, vl); + vfloat64m4_t vw4 = __riscv_vfwcvt_f_f_v_f64m4 (v4, vl); + vfloat64m4_t vw5 = __riscv_vfwcvt_f_f_v_f64m4 (v5, vl); + vfloat64m4_t vw6 = __riscv_vfwcvt_f_f_v_f64m4 (v6, vl); + vfloat64m4_t vw7 = __riscv_vfwcvt_f_f_v_f64m4 (v7, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m4_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m4_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m4_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m4_f64 (vw3); + double sum4 = __riscv_vfmv_f_s_f64m4_f64 (vw4); + double sum5 = __riscv_vfmv_f_s_f64m4_f64 (vw5); + double sum6 = __riscv_vfmv_f_s_f64m4_f64 (vw6); + double sum7 = __riscv_vfmv_f_s_f64m4_f64 (vw7); + + sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c new file mode 100644 index 00000000000..82f369c0cd9 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-9.c @@ -0,0 +1,51 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ + +#include "riscv_vector.h" + +double __attribute__ ((noinline)) +sumation (double sum0, double sum1, double sum2, double sum3) +{ + return sum0 + sum1 + sum2 + sum3; +} + +double +foo (char const *buf, size_t len) +{ + double sum = 0; + size_t vl = __riscv_vsetvlmax_e8m8 (); + size_t step = vl * 4; + const char *it = buf, *end = buf + len; + for (; it + step <= end;) + { + vfloat32m4_t v0 = __riscv_vle32_v_f32m4 ((void *) it, vl); + it += vl; + vfloat32m4_t v1 = __riscv_vle32_v_f32m4 ((void *) it, vl); + it += vl; + vfloat32m4_t v2 = __riscv_vle32_v_f32m4 ((void *) it, vl); + it += vl; + vfloat32m4_t v3 = __riscv_vle32_v_f32m4 ((void *) it, vl); + it += vl; + + asm volatile("nop" ::: "memory"); + vfloat64m8_t vw0 = __riscv_vfwcvt_f_f_v_f64m8 (v0, vl); + vfloat64m8_t vw1 = __riscv_vfwcvt_f_f_v_f64m8 (v1, vl); + vfloat64m8_t vw2 = __riscv_vfwcvt_f_f_v_f64m8 (v2, vl); + vfloat64m8_t vw3 = __riscv_vfwcvt_f_f_v_f64m8 (v3, vl); + + asm volatile("nop" ::: "memory"); + double sum0 = __riscv_vfmv_f_s_f64m8_f64 (vw0); + double sum1 = __riscv_vfmv_f_s_f64m8_f64 (vw1); + double sum2 = __riscv_vfmv_f_s_f64m8_f64 (vw2); + double sum3 = __riscv_vfmv_f_s_f64m8_f64 (vw3); + + sum += sumation (sum0, sum1, sum2, sum3); + } + return sum; +} + +/* { dg-final { scan-assembler-not {vmv1r} } } */ +/* { dg-final { scan-assembler-not {vmv2r} } } */ +/* { dg-final { scan-assembler-not {vmv4r} } } */ +/* { dg-final { scan-assembler-not {vmv8r} } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ -- 2.11.4.GIT