[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-arm] [Qemu-devel] [PATCH v6 07/35] target/arm: Implement SVE F
From: |
Alex Bennée |
Subject: |
Re: [Qemu-arm] [Qemu-devel] [PATCH v6 07/35] target/arm: Implement SVE FP Multiply-Add Group |
Date: |
Thu, 28 Jun 2018 11:53:40 +0100 |
User-agent: |
mu4e 1.1.0; emacs 26.1.50 |
Richard Henderson <address@hidden> writes:
> Signed-off-by: Richard Henderson <address@hidden>
>
> ---
> v6: Add some decode commentary.
> ---
> target/arm/helper-sve.h | 16 ++++
> target/arm/sve_helper.c | 158 +++++++++++++++++++++++++++++++++++++
> target/arm/translate-sve.c | 49 ++++++++++++
> target/arm/sve.decode | 18 +++++
> 4 files changed, 241 insertions(+)
>
> diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
> index 4097b55f0e..eb0645dd43 100644
> --- a/target/arm/helper-sve.h
> +++ b/target/arm/helper-sve.h
> @@ -827,6 +827,22 @@ DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG,
> DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG,
> void, ptr, ptr, ptr, ptr, i32)
>
> +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +
> +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +
> +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +
> +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +DEF_HELPER_FLAGS_3(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, void, env, ptr, i32)
> +
> DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
> index 3401662397..2f416e5e28 100644
> --- a/target/arm/sve_helper.c
> +++ b/target/arm/sve_helper.c
> @@ -2938,6 +2938,164 @@ DO_ZPZ_FP(sve_ucvt_dd, uint64_t, ,
> uint64_to_float64)
>
> #undef DO_ZPZ_FP
>
> +/* 4-operand predicated multiply-add. This requires 7 operands to pass
> + * "properly", so we need to encode some of the registers into DESC.
> + */
How about:
With potential optimisations using movpfx we could end up with a 4
operand multiply-add (result = A * B + C) which together with prefix,
floating point status, vector description adds up to 7 operands for the
"proper" encoding. Instead we encode the source registers numbers in the
spare space of DESC and index into env in the helper making for a more
efficient call frame.
> +QEMU_BUILD_BUG_ON(SIMD_DATA_SHIFT + 20 > 32);
> +
> +static void do_fmla_zpzzz_h(CPUARMState *env, void *vg, uint32_t desc,
> + uint16_t neg1, uint16_t neg3)
> +{
> + intptr_t i = simd_oprsz(desc);
> + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5);
> + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5);
> + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5);
> + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5);
> + void *vd = &env->vfp.zregs[rd];
> + void *vn = &env->vfp.zregs[rn];
> + void *vm = &env->vfp.zregs[rm];
> + void *va = &env->vfp.zregs[ra];
> + uint64_t *g = vg;
> +
> + do {
> + uint64_t pg = g[(i - 1) >> 6];
> + do {
> + i -= 2;
> + if (likely((pg >> (i & 63)) & 1)) {
> + float16 e1, e2, e3, r;
> +
> + e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1;
> + e2 = *(uint16_t *)(vm + H1_2(i));
> + e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3;
> + r = float16_muladd(e1, e2, e3, 0, &env->vfp.fp_status);
> + *(uint16_t *)(vd + H1_2(i)) = r;
> + }
> + } while (i & 63);
> + } while (i != 0);
> +}
> +
> +void HELPER(sve_fmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_h(env, vg, desc, 0, 0);
> +}
> +
> +void HELPER(sve_fmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0);
> +}
> +
> +void HELPER(sve_fnmla_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_h(env, vg, desc, 0x8000, 0x8000);
> +}
> +
> +void HELPER(sve_fnmls_zpzzz_h)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_h(env, vg, desc, 0, 0x8000);
> +}
> +
> +static void do_fmla_zpzzz_s(CPUARMState *env, void *vg, uint32_t desc,
> + uint32_t neg1, uint32_t neg3)
> +{
> + intptr_t i = simd_oprsz(desc);
> + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5);
> + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5);
> + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5);
> + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5);
> + void *vd = &env->vfp.zregs[rd];
> + void *vn = &env->vfp.zregs[rn];
> + void *vm = &env->vfp.zregs[rm];
> + void *va = &env->vfp.zregs[ra];
> + uint64_t *g = vg;
> +
> + do {
> + uint64_t pg = g[(i - 1) >> 6];
> + do {
> + i -= 4;
> + if (likely((pg >> (i & 63)) & 1)) {
> + float32 e1, e2, e3, r;
> +
> + e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1;
> + e2 = *(uint32_t *)(vm + H1_4(i));
> + e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3;
> + r = float32_muladd(e1, e2, e3, 0, &env->vfp.fp_status);
> + *(uint32_t *)(vd + H1_4(i)) = r;
> + }
> + } while (i & 63);
> + } while (i != 0);
> +}
> +
> +void HELPER(sve_fmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_s(env, vg, desc, 0, 0);
> +}
> +
> +void HELPER(sve_fmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0);
> +}
> +
> +void HELPER(sve_fnmla_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_s(env, vg, desc, 0x80000000, 0x80000000);
> +}
> +
> +void HELPER(sve_fnmls_zpzzz_s)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_s(env, vg, desc, 0, 0x80000000);
> +}
> +
> +static void do_fmla_zpzzz_d(CPUARMState *env, void *vg, uint32_t desc,
> + uint64_t neg1, uint64_t neg3)
> +{
> + intptr_t i = simd_oprsz(desc);
> + unsigned rd = extract32(desc, SIMD_DATA_SHIFT, 5);
> + unsigned rn = extract32(desc, SIMD_DATA_SHIFT + 5, 5);
> + unsigned rm = extract32(desc, SIMD_DATA_SHIFT + 10, 5);
> + unsigned ra = extract32(desc, SIMD_DATA_SHIFT + 15, 5);
> + void *vd = &env->vfp.zregs[rd];
> + void *vn = &env->vfp.zregs[rn];
> + void *vm = &env->vfp.zregs[rm];
> + void *va = &env->vfp.zregs[ra];
> + uint64_t *g = vg;
> +
> + do {
> + uint64_t pg = g[(i - 1) >> 6];
> + do {
> + i -= 8;
> + if (likely((pg >> (i & 63)) & 1)) {
> + float64 e1, e2, e3, r;
> +
> + e1 = *(uint64_t *)(vn + i) ^ neg1;
> + e2 = *(uint64_t *)(vm + i);
> + e3 = *(uint64_t *)(va + i) ^ neg3;
> + r = float64_muladd(e1, e2, e3, 0, &env->vfp.fp_status);
> + *(uint64_t *)(vd + i) = r;
> + }
> + } while (i & 63);
> + } while (i != 0);
> +}
> +
> +void HELPER(sve_fmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_d(env, vg, desc, 0, 0);
> +}
> +
> +void HELPER(sve_fmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, 0);
> +}
> +
> +void HELPER(sve_fnmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_d(env, vg, desc, INT64_MIN, INT64_MIN);
> +}
> +
> +void HELPER(sve_fnmls_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc)
> +{
> + do_fmla_zpzzz_d(env, vg, desc, 0, INT64_MIN);
> +}
> +
> /*
> * Load contiguous data, protected by a governing predicate.
> */
> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
> index 4df5360da9..acad6374ef 100644
> --- a/target/arm/translate-sve.c
> +++ b/target/arm/translate-sve.c
> @@ -3472,6 +3472,55 @@ DO_FP3(FMULX, fmulx)
>
> #undef DO_FP3
>
> +typedef void gen_helper_sve_fmla(TCGv_env, TCGv_ptr, TCGv_i32);
> +
> +static bool do_fmla(DisasContext *s, arg_rprrr_esz *a, gen_helper_sve_fmla
> *fn)
> +{
> + if (fn == NULL) {
> + return false;
> + }
> + if (!sve_access_check(s)) {
> + return true;
> + }
> +
> + unsigned vsz = vec_full_reg_size(s);
> + unsigned desc;
> + TCGv_i32 t_desc;
> + TCGv_ptr pg = tcg_temp_new_ptr();
> +
> + /* We would need 7 operands to pass these arguments "properly".
> + * So we encode all the register numbers into the descriptor.
> + */
> + desc = deposit32(a->rd, 5, 5, a->rn);
> + desc = deposit32(desc, 10, 5, a->rm);
> + desc = deposit32(desc, 15, 5, a->ra);
> + desc = simd_desc(vsz, vsz, desc);
If this ends up being repeated in future it might be worth having a
helper, maybe get_packed_desc()?
Anyway:
Reviewed-by: Alex Bennée <address@hidden>
--
Alex Bennée
- [Qemu-arm] [PATCH v6 02/35] target/arm: Implement SVE Contiguous Load, first-fault and no-fault, (continued)
- [Qemu-arm] [PATCH v6 02/35] target/arm: Implement SVE Contiguous Load, first-fault and no-fault, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 01/35] target/arm: Implement SVE Memory Contiguous Load Group, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 05/35] target/arm: Implement SVE integer convert to floating-point, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 03/35] target/arm: Implement SVE Memory Contiguous Store Group, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 06/35] target/arm: Implement SVE floating-point arithmetic (predicated), Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 08/35] target/arm: Implement SVE Floating Point Accumulating Reduction Group, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 07/35] target/arm: Implement SVE FP Multiply-Add Group, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 09/35] target/arm: Implement SVE load and broadcast element, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 10/35] target/arm: Implement SVE store vector/predicate register, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 12/35] target/arm: Implement SVE prefetches, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 15/35] target/arm: Implement SVE scatter store vector immediate, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 16/35] target/arm: Implement SVE floating-point compare vectors, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 11/35] target/arm: Implement SVE scatter stores, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 13/35] target/arm: Implement SVE gather loads, Richard Henderson, 2018/06/27
- [Qemu-arm] [PATCH v6 14/35] target/arm: Implement SVE first-fault gather loads, Richard Henderson, 2018/06/27