|
From: | Max Chou |
Subject: | Re: [PATCH for-9.0 v14 4/8] target/riscv/vector_helpers: do early exit when vstart >= vl |
Date: | Thu, 14 Mar 2024 21:14:48 +0800 |
User-agent: | Mozilla Thunderbird |
According v spec section 7.9. Vector Load/Store Whole Register Instructions
"The instructions operate with an effective vector length, evl=NFIELDS*VLEN/EEW, regardless of current settings in vtype and vl. The usual property that no elements are written if vstart ≥ vl does not apply to these instructions. Instead, no elements are written if vstart ≥ evl."
The VSTART_CHECK_EARLY_EXIT in vext_ldst_whole function may causes unexpected result. We may replace the VSTART_CHECK_EARLY_EXIT function by
- VSTART_CHECK_EARLY_EXIT(env); + if (env->vstart >= ((vlenb * nf) >> log2_esz)) { + env->vstart = 0; + return; + }
@@ -572,6 +580,8 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t vlenb = riscv_cpu_cfg(env)->vlenb; uint32_t max_elems = vlenb >> log2_esz; + VSTART_CHECK_EARLY_EXIT(env); + k = env->vstart / max_elems; off = env->vstart % max_elems; @@ -877,6 +887,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -909,6 +921,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE carry = vext_elem_mask(v0, i); \ @@ -944,6 +958,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -982,6 +998,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE carry = !vm && vext_elem_mask(v0, i); \ @@ -1078,6 +1096,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -1125,6 +1145,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -1187,6 +1209,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1252,6 +1276,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1799,6 +1825,8 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ *((ETYPE *)vd + H(i)) = s1; \ @@ -1823,6 +1851,8 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ } \ @@ -1846,6 +1876,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \ *((ETYPE *)vd + H(i)) = *(vt + H(i)); \ @@ -1870,6 +1902,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \ @@ -1915,6 +1949,8 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivv2_rm_fn *fn, uint32_t vma, uint32_t esz) { + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2040,6 +2076,8 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivx2_rm_fn *fn, uint32_t vma, uint32_t esz) { + VSTART_CHECK_EARLY_EXIT(env); + for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2837,6 +2875,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -2880,6 +2920,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -3466,6 +3508,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ if (vl == 0) { \ return; \ } \ @@ -3987,6 +4031,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4027,6 +4073,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4220,6 +4268,8 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ *((ETYPE *)vd + H(i)) = \ @@ -4386,6 +4436,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4472,6 +4524,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4544,6 +4598,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ int a, b; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ a = vext_elem_mask(vs1, i); \ b = vext_elem_mask(vs2, i); \ @@ -4585,6 +4641,11 @@ target_ulong HELPER(vcpop_m)(void *v0, void *vs2, CPURISCVState *env, uint32_t vl = env->vl; int i; + if (env->vstart >= env->vl) { + env->vstart = 0; + return 0; + } + for (i = env->vstart; i < vl; i++) { if (vm || vext_elem_mask(v0, i)) { if (vext_elem_mask(vs2, i)) { @@ -4604,6 +4665,11 @@ target_ulong HELPER(vfirst_m)(void *v0, void *vs2, CPURISCVState *env, uint32_t vl = env->vl; int i; + if (env->vstart >= env->vl) { + env->vstart = 0; + return 0; + } + for (i = env->vstart; i < vl; i++) { if (vm || vext_elem_mask(v0, i)) { if (vext_elem_mask(vs2, i)) { @@ -4632,6 +4698,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, int i; bool first_mask_bit = false; + VSTART_CHECK_EARLY_EXIT(env); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -4704,6 +4772,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ uint32_t sum = 0; \ int i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4737,6 +4807,8 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ uint32_t vma = vext_vma(desc); \ int i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4772,6 +4844,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong offset = s1, i_min, i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ i_min = MAX(env->vstart, offset); \ for (i = i_min; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4805,6 +4879,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong i_max, i_min, i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \ i_max = MAX(i_min, env->vstart); \ for (i = env->vstart; i < i_max; ++i) { \ @@ -4847,6 +4923,8 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4896,6 +4974,8 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4971,6 +5051,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint64_t index; \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -5014,6 +5096,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint64_t index = s1; \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -5048,6 +5132,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t num = 0, i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vext_elem_mask(vs1, i)) { \ continue; \ @@ -5100,6 +5186,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c index 12f5964fbb..996c21eb31 100644 --- a/target/riscv/vector_internals.c +++ b/target/riscv/vector_internals.c @@ -44,6 +44,8 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; + VSTART_CHECK_EARLY_EXIT(env); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -68,6 +70,8 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; + VSTART_CHECK_EARLY_EXIT(env); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h index 842765f6c1..9e1e15b575 100644 --- a/target/riscv/vector_internals.h +++ b/target/riscv/vector_internals.h @@ -24,6 +24,13 @@ #include "tcg/tcg-gvec-desc.h" #include "internals.h" +#define VSTART_CHECK_EARLY_EXIT(env) do { \ + if (env->vstart >= env->vl) { \ + env->vstart = 0; \ + return; \ + } \ +} while (0) + static inline uint32_t vext_nf(uint32_t desc) { return FIELD_EX32(simd_data(desc), VDATA, NF); @@ -151,6 +158,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ + VSTART_CHECK_EARLY_EXIT(env); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \
[Prev in Thread] | Current Thread | [Next in Thread] |