[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 100/100] target/arm: Implement SVE2 fp multiply-add long
From: |
Richard Henderson |
Subject: |
[PATCH v2 100/100] target/arm: Implement SVE2 fp multiply-add long |
Date: |
Wed, 17 Jun 2020 21:26:44 -0700 |
From: Stephen Long <steplong@quicinc.com>
Implements both vectored and indexed FMLALB, FMLALT, FMLSLB, FMLSLT
Signed-off-by: Stephen Long <steplong@quicinc.com>
Message-Id: <20200504171240.11220-1-steplong@quicinc.com>
[rth: Rearrange to use float16_to_float32_by_bits.]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper.h | 5 +++
target/arm/sve.decode | 12 ++++++
target/arm/translate-sve.c | 75 ++++++++++++++++++++++++++++++++++++++
target/arm/vec_helper.c | 51 ++++++++++++++++++++++++++
4 files changed, 143 insertions(+)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index bfeb327272..f471ff27d1 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -847,6 +847,11 @@ DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index d2f33d96f3..6708c048e0 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1601,3 +1601,15 @@ FCVTLT_sd 01100100 11 0010 11 101 ... ..... .....
@rd_pg_rn_e0
### SVE2 floating-point convert to integer
FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz
+
+### SVE2 floating-point multiply-add long (vectors)
+FMLALB_zzzw 01100100 .. 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm
+FMLALT_zzzw 01100100 .. 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm
+FMLSLB_zzzw 01100100 .. 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm
+FMLSLT_zzzw 01100100 .. 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm
+
+### SVE2 floating-point multiply-add long (indexed)
+FMLALB_zzxw 01100100 .. 1 ..... 0100.0 ..... ..... @rrxw_s
+FMLALT_zzxw 01100100 .. 1 ..... 0100.1 ..... ..... @rrxw_s
+FMLSLB_zzxw 01100100 .. 1 ..... 0110.0 ..... ..... @rrxw_s
+FMLSLT_zzxw 01100100 .. 1 ..... 0110.1 ..... ..... @rrxw_s
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 2b2e186988..8b51b9f5a6 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -8059,3 +8059,78 @@ static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a)
};
return do_sve2_zpz_ool(s, a, fns[a->esz]);
}
+
+static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
+{
+ if (a->esz != MO_32 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz, (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzzw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, true);
+}
+
+static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
+{
+ if (a->esz != MO_32 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz,
+ (a->index << 2) | (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzxw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, true);
+}
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index a51cbf2c7e..09847788d9 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -29,10 +29,14 @@
so addressing units smaller than that needs a host-endian fixup. */
#ifdef HOST_WORDS_BIGENDIAN
#define H1(x) ((x) ^ 7)
+#define H1_2(x) ((x) ^ 6)
+#define H1_4(x) ((x) ^ 4)
#define H2(x) ((x) ^ 3)
#define H4(x) ((x) ^ 1)
#else
#define H1(x) (x)
+#define H1_2(x) (x)
+#define H1_4(x) (x)
#define H2(x) (x)
#define H4(x) (x)
#endif
@@ -1606,6 +1610,27 @@ void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
}
+void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn;
+ float16 mm_16 = *(float16 *)(vm + H1_2(i + sel));
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i));
+
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ }
+}
+
static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
uint32_t desc, bool fz16)
{
@@ -1650,6 +1675,32 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void
*vm,
get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
}
+void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += 16) {
+ float16 mm_16 = *(float16 *)(vm + i + idx);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+
+ for (j = 0; j < 16; j += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn;
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i + j));
+
+ *(float32 *)(vd + H1_4(i + j)) =
+ float32_muladd(nn, mm, aa, 0, status);
+ }
+ }
+}
+
void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc);
--
2.25.1
- [PATCH v2 090/100] target/arm: Implement SVE2 FCVTNT, (continued)
- [PATCH v2 090/100] target/arm: Implement SVE2 FCVTNT, Richard Henderson, 2020/06/18
- [PATCH v2 091/100] target/arm: Implement SVE2 FCVTLT, Richard Henderson, 2020/06/18
- [PATCH v2 092/100] target/arm: Implement SVE2 FCVTXNT, FCVTX, Richard Henderson, 2020/06/18
- [PATCH v2 093/100] softfloat: Add float16_is_normal, Richard Henderson, 2020/06/18
- [PATCH v2 094/100] target/arm: Implement SVE2 FLOGB, Richard Henderson, 2020/06/18
- [PATCH v2 095/100] tcg: Implement 256-bit dup for tcg_gen_gvec_dup_mem, Richard Henderson, 2020/06/18
- [PATCH v2 096/100] target/arm: Share table of sve load functions, Richard Henderson, 2020/06/18
- [PATCH v2 097/100] target/arm: Implement SVE2 LD1RO, Richard Henderson, 2020/06/18
- [PATCH v2 099/100] target/arm: Implement SVE2 bitwise shift immediate, Richard Henderson, 2020/06/18
- [PATCH v2 098/100] target/arm: Implement 128-bit ZIP, UZP, TRN, Richard Henderson, 2020/06/18
- [PATCH v2 100/100] target/arm: Implement SVE2 fp multiply-add long,
Richard Henderson <=
- Re: [PATCH v2 000/100] target/arm: Implement SVE2, no-reply, 2020/06/18
- Re: [PATCH v2 000/100] target/arm: Implement SVE2, no-reply, 2020/06/18