[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (index
From: |
Richard Henderson |
Subject: |
[PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (indexed) |
Date: |
Tue, 9 Mar 2021 08:20:25 -0800 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/cpu.h | 5 +++
target/arm/helper.h | 4 +++
target/arm/sve.decode | 4 +++
target/arm/translate-sve.c | 18 ++++++++++
target/arm/vec_helper.c | 68 ++++++++++++++++++++++++++++++++++++++
5 files changed, 99 insertions(+)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 3f19c4cbed..8c84bfb62b 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4213,6 +4213,11 @@ static inline bool isar_feature_aa64_sve2_bitperm(const
ARMISARegisters *id)
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
}
+static inline bool isar_feature_aa64_sve2_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
+}
+
static inline bool isar_feature_aa64_sve2_f32mm(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
diff --git a/target/arm/helper.h b/target/arm/helper.h
index e7c463fff5..e4c6458f98 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -621,6 +621,10 @@ DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 9117196352..499d989087 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -815,6 +815,10 @@ SQRDMLSH_zzxz_h 01000100 .. 1 ..... 000101 ..... .....
@rrxr_h
SQRDMLSH_zzxz_s 01000100 .. 1 ..... 000101 ..... ..... @rrxr_s
SQRDMLSH_zzxz_d 01000100 .. 1 ..... 000101 ..... ..... @rrxr_d
+# SVE mixed sign dot product (indexed)
+USDOT_zzxw_s 01000100 .. 1 ..... 000110 ..... ..... @rrxr_s
+SUDOT_zzxw_s 01000100 .. 1 ..... 000111 ..... ..... @rrxr_s
+
# SVE2 saturating multiply-add (indexed)
SQDMLALB_zzxw_s 01000100 .. 1 ..... 0010.0 ..... ..... @rrxw_s
SQDMLALB_zzxw_d 01000100 .. 1 ..... 0010.0 ..... ..... @rrxw_d
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 371e8cf05a..6f80c944d3 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -3838,6 +3838,24 @@ DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h)
DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b)
DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
+static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_data(s, a->rd, a->rn, a->rm, a->ra,
+ gen_helper_gvec_sudot_idx_b, a->index);
+}
+
+static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_data(s, a->rd, a->rn, a->rm, a->ra,
+ gen_helper_gvec_usdot_idx_b, a->index);
+}
+
#undef DO_RRXR
static bool do_sve2_zzx_data(DisasContext *s, arg_rrx_esz *a,
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 8b7269d8e1..98b707f4f5 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -677,6 +677,74 @@ void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(gvec_sudot_idx_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
+ intptr_t index = simd_data(desc);
+ int32_t *d = vd, *a = va;
+ int8_t *n = vn;
+ uint8_t *m_indexed = (uint8_t *)vm + index * 4;
+
+ /*
+ * Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
+ * Otherwise opr_sz is a multiple of 16.
+ */
+ segend = MIN(4, opr_sz_4);
+ i = 0;
+ do {
+ uint8_t m0 = m_indexed[i * 4 + 0];
+ uint8_t m1 = m_indexed[i * 4 + 1];
+ uint8_t m2 = m_indexed[i * 4 + 2];
+ uint8_t m3 = m_indexed[i * 4 + 3];
+
+ do {
+ d[i] = (a[i] +
+ n[i * 4 + 0] * m0 +
+ n[i * 4 + 1] * m1 +
+ n[i * 4 + 2] * m2 +
+ n[i * 4 + 3] * m3);
+ } while (++i < segend);
+ segend = i + 4;
+ } while (i < opr_sz_4);
+
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_usdot_idx_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
+ intptr_t index = simd_data(desc);
+ uint32_t *d = vd, *a = va;
+ uint8_t *n = vn;
+ int8_t *m_indexed = (int8_t *)vm + index * 4;
+
+ /*
+ * Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
+ * Otherwise opr_sz is a multiple of 16.
+ */
+ segend = MIN(4, opr_sz_4);
+ i = 0;
+ do {
+ int8_t m0 = m_indexed[i * 4 + 0];
+ int8_t m1 = m_indexed[i * 4 + 1];
+ int8_t m2 = m_indexed[i * 4 + 2];
+ int8_t m3 = m_indexed[i * 4 + 3];
+
+ do {
+ d[i] = (a[i] +
+ n[i * 4 + 0] * m0 +
+ n[i * 4 + 1] * m1 +
+ n[i * 4 + 2] * m2 +
+ n[i * 4 + 3] * m3);
+ } while (++i < segend);
+ segend = i + 4;
+ } while (i < opr_sz_4);
+
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm,
void *va, uint32_t desc)
{
--
2.25.1
- [PATCH v4 43/78] target/arm: Implement SVE2 XAR, (continued)
- [PATCH v4 43/78] target/arm: Implement SVE2 XAR, Richard Henderson, 2021/03/09
- [PATCH v4 44/78] target/arm: Implement SVE2 scatter store insns, Richard Henderson, 2021/03/09
- [PATCH v4 46/78] target/arm: Implement SVE2 FMMLA, Richard Henderson, 2021/03/09
- [PATCH v4 47/78] target/arm: Implement SVE2 SPLICE, EXT, Richard Henderson, 2021/03/09
- [PATCH v4 45/78] target/arm: Implement SVE2 gather load insns, Richard Henderson, 2021/03/09
- [PATCH v4 48/78] target/arm: Pass separate addend to {U, S}DOT helpers, Richard Henderson, 2021/03/09
- [PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers, Richard Henderson, 2021/03/09
- [PATCH v4 54/78] target/arm: Implement SVE2 saturating multiply-add high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 50/78] target/arm: Split out formats for 2 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 55/78] target/arm: Implement SVE2 saturating multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (indexed),
Richard Henderson <=
- [PATCH v4 51/78] target/arm: Split out formats for 3 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 52/78] target/arm: Implement SVE2 integer multiply (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 53/78] target/arm: Implement SVE2 integer multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 59/78] target/arm: Implement SVE2 saturating multiply high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 63/78] target/arm: Implement SVE mixed sign dot product, Richard Henderson, 2021/03/09
- [PATCH v4 57/78] target/arm: Implement SVE2 saturating multiply (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 58/78] target/arm: Implement SVE2 signed saturating doubling multiply high, Richard Henderson, 2021/03/09
- [PATCH v4 65/78] target/arm: Implement SVE2 crypto destructive binary operations, Richard Henderson, 2021/03/09
- [PATCH v4 66/78] target/arm: Implement SVE2 crypto constructive binary operations, Richard Henderson, 2021/03/09