[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 25/78] target/arm: Implement SVE2 floating-point pairwise
From: |
Richard Henderson |
Subject: |
[PATCH v4 25/78] target/arm: Implement SVE2 floating-point pairwise |
Date: |
Tue, 9 Mar 2021 08:19:48 -0800 |
From: Stephen Long <steplong@quicinc.com>
Signed-off-by: Stephen Long <steplong@quicinc.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: Load all inputs before writing any output (laurent desnogues)
---
target/arm/helper-sve.h | 35 +++++++++++++++++++++++++++++
target/arm/sve.decode | 8 +++++++
target/arm/sve_helper.c | 46 ++++++++++++++++++++++++++++++++++++++
target/arm/translate-sve.c | 25 +++++++++++++++++++++
4 files changed, 114 insertions(+)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index b302203ce8..a033b5f6b2 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2443,3 +2443,38 @@ DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void,
ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 19866ec4c6..9c75ac94c0 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1284,3 +1284,11 @@ UQXTNB 01000101 .. 1 ..... 010 010 ..... .....
@rd_rn_tszimm_shl
UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 floating-point pairwise operations
+
+FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
+FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm
+FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm
+FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm
+FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 115daa50a5..6da734e7b4 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -890,6 +890,52 @@ DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
#undef DO_ZPZZ_PAIR
#undef DO_ZPZZ_PAIR_D
+#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, , float64_add)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, , float64_maxnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, , float64_minnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, , float64_max)
+
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, , float64_min)
+
+#undef DO_ZPZZ_PAIR_FP
+
/* Three-operand expander, controlled by a predicate, in which the
* third operand is "wide". That is, for D = N op M, the same 64-bit
* value of M is used with all of the narrower values of N.
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 65f1b11848..ae2ac20302 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -6698,3 +6698,28 @@ static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz
*a)
};
return do_sve2_narrow_extract(s, a, ops);
}
+
+static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzz_fp(s, a, fn);
+}
+
+#define DO_SVE2_ZPZZ_FP(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_zpzz_h, \
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
+ }; \
+ return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZPZZ_FP(FADDP, faddp)
+DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
+DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
+DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
+DO_SVE2_ZPZZ_FP(FMINP, fminp)
--
2.25.1
- [PATCH v4 13/78] target/arm: Implement SVE2 integer multiply long, (continued)
- [PATCH v4 13/78] target/arm: Implement SVE2 integer multiply long, Richard Henderson, 2021/03/09
- [PATCH v4 08/78] target/arm: Implement SVE2 integer pairwise arithmetic, Richard Henderson, 2021/03/09
- [PATCH v4 12/78] target/arm: Implement SVE2 integer add/subtract wide, Richard Henderson, 2021/03/09
- [PATCH v4 09/78] target/arm: Implement SVE2 saturating add/subtract (predicated), Richard Henderson, 2021/03/09
- [PATCH v4 14/78] target/arm: Implement PMULLB and PMULLT, Richard Henderson, 2021/03/09
- [PATCH v4 17/78] target/arm: Implement SVE2 bitwise permute, Richard Henderson, 2021/03/09
- [PATCH v4 16/78] target/arm: Implement SVE2 bitwise exclusive-or interleaved, Richard Henderson, 2021/03/09
- [PATCH v4 19/78] target/arm: Implement SVE2 integer absolute difference and accumulate long, Richard Henderson, 2021/03/09
- [PATCH v4 18/78] target/arm: Implement SVE2 complex integer add, Richard Henderson, 2021/03/09
- [PATCH v4 20/78] target/arm: Implement SVE2 integer add/subtract long with carry, Richard Henderson, 2021/03/09
- [PATCH v4 25/78] target/arm: Implement SVE2 floating-point pairwise,
Richard Henderson <=
- [PATCH v4 24/78] target/arm: Implement SVE2 saturating extract narrow, Richard Henderson, 2021/03/09
- [PATCH v4 23/78] target/arm: Implement SVE2 integer absolute difference and accumulate, Richard Henderson, 2021/03/09
- [PATCH v4 27/78] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN, Richard Henderson, 2021/03/09
- [PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN, Richard Henderson, 2021/03/09
- [PATCH v4 15/78] target/arm: Implement SVE2 bitwise shift left long, Richard Henderson, 2021/03/09
- [PATCH v4 30/78] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS, Richard Henderson, 2021/03/09
- [PATCH v4 32/78] target/arm: Implement SVE2 bitwise ternary operations, Richard Henderson, 2021/03/09
- [PATCH v4 34/78] target/arm: Implement SVE2 saturating multiply-add long, Richard Henderson, 2021/03/09
- [PATCH v4 22/78] target/arm: Implement SVE2 bitwise shift and insert, Richard Henderson, 2021/03/09
- [PATCH v4 21/78] target/arm: Implement SVE2 bitwise shift right and accumulate, Richard Henderson, 2021/03/09