qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v8 46/62] target/riscv: vector single-width integer reduction


From: Alistair Francis
Subject: Re: [PATCH v8 46/62] target/riscv: vector single-width integer reduction instructions
Date: Fri, 29 May 2020 13:58:14 -0700

On Thu, May 21, 2020 at 4:17 AM LIU Zhiwei <zhiwei_liu@c-sky.com> wrote:
>
> Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/helper.h                   | 33 +++++++++++
>  target/riscv/insn32.decode              |  8 +++
>  target/riscv/insn_trans/trans_rvv.inc.c | 18 ++++++
>  target/riscv/vector_helper.c            | 74 +++++++++++++++++++++++++
>  4 files changed, 133 insertions(+)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index 82c5d1129e..93a7a303ee 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -1033,3 +1033,36 @@ DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, 
> i32)
>  DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
>  DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
>  DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
> +
> +DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmaxu_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmaxu_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmaxu_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmaxu_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmax_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredminu_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredminu_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredminu_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredminu_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmin_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredand_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredand_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredand_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredand_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredor_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredor_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredor_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredor_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredxor_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredxor_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredxor_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vredxor_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index 57ac4de1c2..773b32f0b4 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -529,6 +529,14 @@ vfncvt_x_f_v    100010 . ..... 10001 001 ..... 1010111 
> @r2_vm
>  vfncvt_f_xu_v   100010 . ..... 10010 001 ..... 1010111 @r2_vm
>  vfncvt_f_x_v    100010 . ..... 10011 001 ..... 1010111 @r2_vm
>  vfncvt_f_f_v    100010 . ..... 10100 001 ..... 1010111 @r2_vm
> +vredsum_vs      000000 . ..... ..... 010 ..... 1010111 @r_vm
> +vredand_vs      000001 . ..... ..... 010 ..... 1010111 @r_vm
> +vredor_vs       000010 . ..... ..... 010 ..... 1010111 @r_vm
> +vredxor_vs      000011 . ..... ..... 010 ..... 1010111 @r_vm
> +vredminu_vs     000100 . ..... ..... 010 ..... 1010111 @r_vm
> +vredmin_vs      000101 . ..... ..... 010 ..... 1010111 @r_vm
> +vredmaxu_vs     000110 . ..... ..... 010 ..... 1010111 @r_vm
> +vredmax_vs      000111 . ..... ..... 010 ..... 1010111 @r_vm
>
>  vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm
>  vsetvl          1000000 ..... ..... 111 ..... 1010111  @r
> diff --git a/target/riscv/insn_trans/trans_rvv.inc.c 
> b/target/riscv/insn_trans/trans_rvv.inc.c
> index e63b88a4cc..9dfb9358a2 100644
> --- a/target/riscv/insn_trans/trans_rvv.inc.c
> +++ b/target/riscv/insn_trans/trans_rvv.inc.c
> @@ -2315,3 +2315,21 @@ GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
>  GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
>  GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
>  GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
> +
> +/*
> + *** Vector Reduction Operations
> + */
> +/* Vector Single-Width Integer Reduction Instructions */
> +static bool reduction_check(DisasContext *s, arg_rmrr *a)
> +{
> +    return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
> +}
> +
> +GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredand_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredor_vs, reduction_check)
> +GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index 8e525720d1..00ed6a75a5 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4331,3 +4331,77 @@ RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, 
> vfncvtffv16)
>  RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32)
>  GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh)
>  GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
> +
> +/*
> + *** Vector Reduction Operations
> + */
> +/* Vector Single-Width Integer Reduction Instructions */
> +#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
> +void HELPER(NAME)(void *vd, void *v0, void *vs1,          \
> +        void *vs2, CPURISCVState *env, uint32_t desc)     \
> +{                                                         \
> +    uint32_t mlen = vext_mlen(desc);                      \
> +    uint32_t vm = vext_vm(desc);                          \
> +    uint32_t vl = env->vl;                                \
> +    uint32_t i;                                           \
> +    uint32_t tot = env_archcpu(env)->cfg.vlen / 8;        \
> +    TD s1 =  *((TD *)vs1 + HD(0));                        \
> +                                                          \
> +    for (i = 0; i < vl; i++) {                            \
> +        TS2 s2 = *((TS2 *)vs2 + HS2(i));                  \
> +        if (!vm && !vext_elem_mask(v0, mlen, i)) {        \
> +            continue;                                     \
> +        }                                                 \
> +        s1 = OP(s1, (TD)s2);                              \
> +    }                                                     \
> +    *((TD *)vd + HD(0)) = s1;                             \
> +    CLEAR_FN(vd, 1, sizeof(TD), tot);                     \
> +}
> +
> +/* vd[0] = sum(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb)
> +GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh)
> +GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl)
> +GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq)
> +
> +/* vd[0] = maxu(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb)
> +GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh)
> +GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl)
> +GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq)
> +
> +/* vd[0] = max(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb)
> +GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh)
> +GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl)
> +GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq)
> +
> +/* vd[0] = minu(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb)
> +GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh)
> +GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl)
> +GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq)
> +
> +/* vd[0] = min(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb)
> +GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh)
> +GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl)
> +GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq)
> +
> +/* vd[0] = and(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb)
> +GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh)
> +GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl)
> +GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq)
> +
> +/* vd[0] = or(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb)
> +GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh)
> +GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl)
> +GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq)
> +
> +/* vd[0] = xor(vs1[0], vs2[*]) */
> +GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb)
> +GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh)
> +GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl)
> +GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq)
> --
> 2.23.0
>
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]