[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH for-6.2 25/53] target/arm: Implement MVE VMLADAV and VMLSLDAV
From: |
Peter Maydell |
Subject: |
[PATCH for-6.2 25/53] target/arm: Implement MVE VMLADAV and VMLSLDAV |
Date: |
Thu, 29 Jul 2021 12:14:44 +0100 |
Implement the MVE VMLADAV and VMLSLDAV insns. Like the VMLALDAV and
VMLSLDAV insns already implemented, these accumulate multiplied
vector elements; but they accumulate a 32-bit result rather than a
64-bit one.
Note that these encodings overlap with what would be RdaHi=0b111 for
VMLALDAV, VMLSLDAV, VRMLALDAVH and VRMLSLDAVH.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-mve.h | 17 ++++++++++
target/arm/mve.decode | 33 +++++++++++++++++---
target/arm/mve_helper.c | 41 ++++++++++++++++++++++++
target/arm/translate-mve.c | 64 ++++++++++++++++++++++++++++++++++++++
4 files changed, 150 insertions(+), 5 deletions(-)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 17484f74323..34d644a519c 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -392,6 +392,23 @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64,
env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index d295a693b18..cec5a51b0ee 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -320,32 +320,55 @@ VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1
0000 @vdup size=2
%size_16 16:1 !function=plus_1
&vmlaldav rdahi rdalo size qn qm x a
+&vmladav rda size qn qm x a
@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
-VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
-VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+ qn=%qn rda=%rdalo size=%size_16 &vmladav
+@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+ qn=%qn rda=%rdalo size=0 &vmladav
-VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
+{
+ VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+ VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+{
+ VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+ VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+
+{
+ VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav
+ VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
+}
+
+{
+ VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz
+ VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
+}
+
+VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
+VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
{
VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv
VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv
+ VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
}
{
VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
+ VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
}
-VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
-
# Scalar operations
VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 72c30f360ac..ea206c932bc 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -1189,6 +1189,47 @@ DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
+/*
+ * Multiply add dual accumulate ops
+ */
+#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint32_t a) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *n = vn, *m = vm; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ if (e & 1) { \
+ a ODDACC \
+ n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } else { \
+ a EVENACC \
+ n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return a; \
+ }
+
+#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
+
+#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
+
+DO_DAV_S(vmladavs, false, +=, +=)
+DO_DAV_U(vmladavu, false, +=, +=)
+DO_DAV_S(vmlsdav, false, +=, -=)
+DO_DAV_S(vmladavsx, true, +=, +=)
+DO_DAV_S(vmlsdavx, true, +=, -=)
+
/*
* Rounding multiply add long dual accumulate high. In the pseudocode
* this is implemented with a 72-bit internal accumulator value of which
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 676411e05cb..92ed1be83e7 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -46,6 +46,7 @@ typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr,
TCGv_i32, TCGv_i32, TC
typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr,
TCGv_i32);
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
static inline long mve_qreg_offset(unsigned reg)
@@ -765,6 +766,69 @@ static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav
*a)
return do_long_dual_acc(s, a, fns[a->x]);
}
+static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
+{
+ TCGv_ptr qn, qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qn) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current rda value, not 0.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ rda = load_reg(s, a->rda);
+ } else {
+ rda = tcg_const_i32(0);
+ }
+
+ fn(rda, cpu_env, qn, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_DUAL_ACC(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vmladav *a) \
+ { \
+ static MVEGenDualAccOpFn * const fns[4][2] = { \
+ { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb }, \
+ { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh }, \
+ { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw }, \
+ { NULL, NULL }, \
+ }; \
+ return do_dual_acc(s, a, fns[a->size][a->x]); \
+ }
+
+DO_DUAL_ACC(VMLADAV_S, vmladavs)
+DO_DUAL_ACC(VMLSDAV, vmlsdav)
+
+static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a)
+{
+ static MVEGenDualAccOpFn * const fns[4][2] = {
+ { gen_helper_mve_vmladavub, NULL },
+ { gen_helper_mve_vmladavuh, NULL },
+ { gen_helper_mve_vmladavuw, NULL },
+ { NULL, NULL },
+ };
+ return do_dual_acc(s, a, fns[a->size][a->x]);
+}
+
static void gen_vpst(DisasContext *s, uint32_t mask)
{
/*
--
2.20.1
- [PATCH for-6.2 12/53] target/arm: Implement MVE VMULL (polynomial), (continued)
- [PATCH for-6.2 12/53] target/arm: Implement MVE VMULL (polynomial), Peter Maydell, 2021/07/29
- [PATCH for-6.2 14/53] target/arm: Factor out gen_vpst(), Peter Maydell, 2021/07/29
- [PATCH for-6.2 21/53] target/arm: Implement MVE integer min/max across vector, Peter Maydell, 2021/07/29
- [PATCH for-6.2 20/53] target/arm: Move 'x' and 'a' bit definitions into vmlaldav formats, Peter Maydell, 2021/07/29
- [PATCH for-6.2 15/53] target/arm: Implement MVE integer vector comparisons, Peter Maydell, 2021/07/29
- [PATCH for-6.2 17/53] target/arm: Implement MVE VPSEL, Peter Maydell, 2021/07/29
- [PATCH for-6.2 19/53] target/arm: Implement MVE shift-by-scalar, Peter Maydell, 2021/07/29
- [PATCH for-6.2 23/53] target/arm: Implement MVE narrowing moves, Peter Maydell, 2021/07/29
- [PATCH for-6.2 27/53] target/arm: Implement MVE saturating doubling multiply accumulates, Peter Maydell, 2021/07/29
- [PATCH for-6.2 25/53] target/arm: Implement MVE VMLADAV and VMLSLDAV,
Peter Maydell <=
- [PATCH for-6.2 24/53] target/arm: Rename MVEGenDualAccOpFn to MVEGenLongDualAccOpFn, Peter Maydell, 2021/07/29
- [PATCH for-6.2 29/53] target/arm: Implement MVE VMAXA, VMINA, Peter Maydell, 2021/07/29
- [PATCH for-6.2 22/53] target/arm: Implement MVE VABAV, Peter Maydell, 2021/07/29
- [PATCH for-6.2 18/53] target/arm: Implement MVE VMLAS, Peter Maydell, 2021/07/29
- [PATCH for-6.2 32/53] target/arm: Implement MVE VCTP, Peter Maydell, 2021/07/29
- [PATCH for-6.2 28/53] target/arm: Implement MVE VQABS, VQNEG, Peter Maydell, 2021/07/29
- [PATCH for-6.2 31/53] target/arm: Implement MVE VPNOT, Peter Maydell, 2021/07/29
- [PATCH for-6.2 33/53] target/arm: Implement MVE scatter-gather insns, Peter Maydell, 2021/07/29
- [PATCH for-6.2 38/53] target/arm: Implement MVE VCADD, Peter Maydell, 2021/07/29