[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH qemu v12 14/15] target/riscv: rvv: Add tail agnostic for vector p
From: |
~eopxd |
Subject: |
[PATCH qemu v12 14/15] target/riscv: rvv: Add tail agnostic for vector permutation instructions |
Date: |
Thu, 28 Apr 2022 06:40:06 -0000 |
From: eopXD <eop.chen@sifive.com>
Signed-off-by: eop Chen <eop.chen@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
---
target/riscv/insn_trans/trans_rvv.c.inc | 22 ++++++++++++++
target/riscv/vector_helper.c | 40 +++++++++++++++++++++++++
2 files changed, 62 insertions(+)
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
b/target/riscv/insn_trans/trans_rvv.c.inc
index ab9f876c28..2c9993844a 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3744,6 +3744,16 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr
*a)
}
if (a->vm && s->vl_eq_vlmax) {
+ if (s->vta && s->lmul < 0) {
+ /*
+ * tail elements may pass vlmax when lmul < 0
+ * set tail elements to 1s
+ */
+ uint32_t vlenb = s->cfg_ptr->vlen >> 3;
+ tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rd), -1,
+ vlenb, vlenb);
+ }
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
@@ -3777,6 +3787,16 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr
*a)
}
if (a->vm && s->vl_eq_vlmax) {
+ if (s->vta && s->lmul < 0) {
+ /*
+ * tail elements may pass vlmax when lmul < 0
+ * set tail elements to 1s
+ */
+ uint32_t vlenb = s->cfg_ptr->vlen >> 3;
+ tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rd), -1,
+ vlenb, vlenb);
+ }
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
@@ -3829,6 +3849,7 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
cpu_env, s->cfg_ptr->vlen / 8,
@@ -3934,6 +3955,7 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a,
uint8_t seq)
}
data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), cpu_env,
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 9a663a406d..b6be95a7e4 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -4911,6 +4911,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
target_ulong offset = s1, i_min, i; \
\
i_min = MAX(env->vstart, offset); \
@@ -4920,6 +4923,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
} \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
@@ -4935,6 +4940,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
target_ulong i_max, i; \
\
i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
@@ -4951,6 +4959,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
} \
\
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
@@ -4966,6 +4976,9 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
typedef uint##BITWIDTH##_t ETYPE; \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -4979,6 +4992,8 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VSLIE1UP(8, H1)
@@ -5006,6 +5021,9 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
typedef uint##BITWIDTH##_t ETYPE; \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5019,6 +5037,8 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VSLIDE1DOWN(8, H1)
@@ -5072,6 +5092,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(TS2); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint64_t index; \
uint32_t i; \
\
@@ -5087,6 +5110,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
@@ -5107,6 +5132,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint64_t index = s1; \
uint32_t i; \
\
@@ -5121,6 +5149,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
@@ -5135,6 +5165,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5145,6 +5178,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
num++; \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5181,6 +5216,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
\
{ \
uint32_t vl = env->vl; \
uint32_t vm = vext_vm(desc); \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5190,6 +5228,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
\
*((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
--
2.34.2
- [PATCH qemu v12 02/15] target/riscv: rvv: Rename ambiguous esz, (continued)
- [PATCH qemu v12 02/15] target/riscv: rvv: Rename ambiguous esz, ~eopxd, 2022/04/28
- [PATCH qemu v12 03/15] target/riscv: rvv: Early exit when vstart >= vl, ~eopxd, 2022/04/28
- [PATCH qemu v12 04/15] target/riscv: rvv: Add tail agnostic for vv instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 06/15] target/riscv: rvv: Add tail agnostic for vx, vvm, vxm instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 05/15] target/riscv: rvv: Add tail agnostic for vector load / store instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 07/15] target/riscv: rvv: Add tail agnostic for vector integer shift instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 10/15] target/riscv: rvv: Add tail agnostic for vector fix-point arithmetic instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 08/15] target/riscv: rvv: Add tail agnostic for vector integer comparison instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 01/15] target/riscv: rvv: Prune redundant ESZ, DSZ parameter passed, ~eopxd, 2022/04/28
- [PATCH qemu v12 09/15] target/riscv: rvv: Add tail agnostic for vector integer merge and move instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 14/15] target/riscv: rvv: Add tail agnostic for vector permutation instructions,
~eopxd <=
- [PATCH qemu v12 12/15] target/riscv: rvv: Add tail agnostic for vector reduction instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 13/15] target/riscv: rvv: Add tail agnostic for vector mask instructions, ~eopxd, 2022/04/28
- [PATCH qemu v12 15/15] target/riscv: rvv: Add option 'rvv_ta_all_1s' to enable optional tail agnostic behavior, ~eopxd, 2022/04/28
- [PATCH qemu v12 11/15] target/riscv: rvv: Add tail agnostic for vector floating-point instructions, ~eopxd, 2022/04/28