[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (index
From: |
Richard Henderson |
Subject: |
[PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (indexed) |
Date: |
Tue, 9 Mar 2021 08:20:19 -0800 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sve.h | 5 +++++
target/arm/sve.decode | 16 ++++++++++++++++
target/arm/sve_helper.c | 23 +++++++++++++++++++++++
target/arm/translate-sve.c | 24 ++++++++++++++++++++----
4 files changed, 64 insertions(+), 4 deletions(-)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 08398800bd..a3690082af 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2688,3 +2688,8 @@ DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 6900e79492..ebb892748b 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -257,6 +257,12 @@
@rrx_d ........ 11 . index:1 rm:4 ...... rn:5 rd:5 \
&rrx_esz esz=3
+# Two registers and a scalar by index, wide
+@rrxl_s ........ 10 ... rm:3 ...... rn:5 rd:5 \
+ &rrx_esz index=%index3_19_11 esz=2
+@rrxl_d ........ 11 .. rm:4 ...... rn:5 rd:5 \
+ &rrx_esz index=%index2_20_11 esz=3
+
# Three registers and a scalar by index
@rrxr_h ........ 0. . .. rm:3 ...... rn:5 rd:5 \
&rrxr_esz ra=%reg_movprfx index=%index3_22_19 esz=1
@@ -819,6 +825,16 @@ SQDMLSLB_zzxw_d 01000100 .. 1 ..... 0011.0 ..... .....
@rrxw_d
SQDMLSLT_zzxw_s 01000100 .. 1 ..... 0011.1 ..... ..... @rrxw_s
SQDMLSLT_zzxw_d 01000100 .. 1 ..... 0011.1 ..... ..... @rrxw_d
+# SVE2 integer multiply long (indexed)
+SMULLB_zzx_s 01000100 .. 1 ..... 1100.0 ..... ..... @rrxl_s
+SMULLB_zzx_d 01000100 .. 1 ..... 1100.0 ..... ..... @rrxl_d
+SMULLT_zzx_s 01000100 .. 1 ..... 1100.1 ..... ..... @rrxl_s
+SMULLT_zzx_d 01000100 .. 1 ..... 1100.1 ..... ..... @rrxl_d
+UMULLB_zzx_s 01000100 .. 1 ..... 1101.0 ..... ..... @rrxl_s
+UMULLB_zzx_d 01000100 .. 1 ..... 1101.0 ..... ..... @rrxl_d
+UMULLT_zzx_s 01000100 .. 1 ..... 1101.1 ..... ..... @rrxl_s
+UMULLT_zzx_d 01000100 .. 1 ..... 1101.1 ..... ..... @rrxl_d
+
# SVE2 integer multiply (indexed)
MUL_zzx_h 01000100 .. 1 ..... 111110 ..... ..... @rrx_h
MUL_zzx_s 01000100 .. 1 ..... 111110 ..... ..... @rrx_s
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 6ad1158455..b64e1f243d 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1560,6 +1560,29 @@ DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, ,
H1_4, DO_SQDMLSL_D)
#undef DO_ZZXW
+#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
+ for (i = 0; i < oprsz; i += 16) { \
+ TYPEW mm = *(TYPEN *)(vm + i + idx); \
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \
+ } \
+ } \
+}
+
+DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, , H1_4, DO_MUL)
+
+DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, , H1_4, DO_MUL)
+
+#undef DO_ZZX
+
#define DO_BITPERM(NAME, TYPE, OP) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
{ \
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index a3261561c0..6376a21b98 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -3840,8 +3840,8 @@ DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
#undef DO_RRXR
-static bool do_sve2_zzx_ool(DisasContext *s, arg_rrx_esz *a,
- gen_helper_gvec_3 *fn)
+static bool do_sve2_zzx_data(DisasContext *s, arg_rrx_esz *a,
+ gen_helper_gvec_3 *fn, int data)
{
if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
return false;
@@ -3851,14 +3851,14 @@ static bool do_sve2_zzx_ool(DisasContext *s,
arg_rrx_esz *a,
tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
- vsz, vsz, a->index, fn);
+ vsz, vsz, data, fn);
}
return true;
}
#define DO_SVE2_RRX(NAME, FUNC) \
static bool NAME(DisasContext *s, arg_rrx_esz *a) \
- { return do_sve2_zzx_ool(s, a, FUNC); }
+ { return do_sve2_zzx_data(s, a, FUNC, a->index); }
DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
@@ -3866,6 +3866,22 @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
#undef DO_SVE2_RRX
+#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { return do_sve2_zzx_data(s, a, FUNC, (a->index << 1) | TOP); }
+
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
+
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
+
+#undef DO_SVE2_RRX_TB
+
static bool do_sve2_zzxz_data(DisasContext *s, arg_rrxr_esz *a,
gen_helper_gvec_4 *fn, int data)
{
--
2.25.1
- [PATCH v4 47/78] target/arm: Implement SVE2 SPLICE, EXT, (continued)
- [PATCH v4 47/78] target/arm: Implement SVE2 SPLICE, EXT, Richard Henderson, 2021/03/09
- [PATCH v4 45/78] target/arm: Implement SVE2 gather load insns, Richard Henderson, 2021/03/09
- [PATCH v4 48/78] target/arm: Pass separate addend to {U, S}DOT helpers, Richard Henderson, 2021/03/09
- [PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers, Richard Henderson, 2021/03/09
- [PATCH v4 54/78] target/arm: Implement SVE2 saturating multiply-add high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 50/78] target/arm: Split out formats for 2 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 55/78] target/arm: Implement SVE2 saturating multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 51/78] target/arm: Split out formats for 3 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 52/78] target/arm: Implement SVE2 integer multiply (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (indexed),
Richard Henderson <=
- [PATCH v4 53/78] target/arm: Implement SVE2 integer multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 59/78] target/arm: Implement SVE2 saturating multiply high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 63/78] target/arm: Implement SVE mixed sign dot product, Richard Henderson, 2021/03/09
- [PATCH v4 57/78] target/arm: Implement SVE2 saturating multiply (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 58/78] target/arm: Implement SVE2 signed saturating doubling multiply high, Richard Henderson, 2021/03/09
- [PATCH v4 65/78] target/arm: Implement SVE2 crypto destructive binary operations, Richard Henderson, 2021/03/09
- [PATCH v4 66/78] target/arm: Implement SVE2 crypto constructive binary operations, Richard Henderson, 2021/03/09
- [PATCH v4 67/78] target/arm: Implement SVE2 TBL, TBX, Richard Henderson, 2021/03/09
- [PATCH v4 68/78] target/arm: Implement SVE2 FCVTNT, Richard Henderson, 2021/03/09
- [PATCH v4 64/78] target/arm: Implement SVE2 crypto unary operations, Richard Henderson, 2021/03/09