[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v6 23/45] target/arm: Implement SME ADDHA, ADDVA
From: |
Richard Henderson |
Subject: |
[PATCH v6 23/45] target/arm: Implement SME ADDHA, ADDVA |
Date: |
Fri, 8 Jul 2022 20:45:18 +0530 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sme.h | 5 +++
target/arm/sme.decode | 11 +++++
target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++
target/arm/translate-sme.c | 31 +++++++++++++
4 files changed, 137 insertions(+)
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
index 95f6e88bdd..753e9e624c 100644
--- a/target/arm/helper-sme.h
+++ b/target/arm/helper-sme.h
@@ -115,3 +115,8 @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void,
env, ptr, ptr, tl, i
DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl,
i32)
DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl,
i32)
DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl,
i32)
+
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
index f1ebd857a5..8cb6c4053c 100644
--- a/target/arm/sme.decode
+++ b/target/arm/sme.decode
@@ -53,3 +53,14 @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0
za_imm:4 \
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
+
+### SME Add Vector to Array
+
+&adda zad zn pm pn
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
+
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
index 10fd1ed910..f1e924db74 100644
--- a/target/arm/sme_helper.c
+++ b/target/arm/sme_helper.c
@@ -828,3 +828,93 @@ DO_ST(q, _be, MO_128)
DO_ST(q, _le, MO_128)
#undef DO_ST
+
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
+ uint64_t *pn = vpn, *pm = vpm;
+ uint32_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ) {
+ uint64_t pa = pn[row >> 4];
+ do {
+ if (pa & 1) {
+ for (col = 0; col < oprsz; ) {
+ uint64_t pb = pm[col >> 4];
+ do {
+ if (pb & 1) {
+ zda[tile_vslice_index(row) + H4(col)] +=
zn[H4(col)];
+ }
+ pb >>= 4;
+ } while (++col & 15);
+ }
+ }
+ pa >>= 4;
+ } while (++row & 15);
+ }
+}
+
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pn = vpn, *pm = vpm;
+ uint64_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ++row) {
+ if (pn[H1(row)] & 1) {
+ for (col = 0; col < oprsz; ++col) {
+ if (pm[H1(col)] & 1) {
+ zda[tile_vslice_index(row) + col] += zn[col];
+ }
+ }
+ }
+ }
+}
+
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
+ uint64_t *pn = vpn, *pm = vpm;
+ uint32_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ) {
+ uint64_t pa = pn[row >> 4];
+ do {
+ if (pa & 1) {
+ uint32_t zn_row = zn[H4(row)];
+ for (col = 0; col < oprsz; ) {
+ uint64_t pb = pm[col >> 4];
+ do {
+ if (pb & 1) {
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
+ }
+ pb >>= 4;
+ } while (++col & 15);
+ }
+ }
+ pa >>= 4;
+ } while (++row & 15);
+ }
+}
+
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
+ void *vpm, uint32_t desc)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
+ uint8_t *pn = vpn, *pm = vpm;
+ uint64_t *zda = vzda, *zn = vzn;
+
+ for (row = 0; row < oprsz; ++row) {
+ if (pn[H1(row)] & 1) {
+ uint64_t zn_row = zn[row];
+ for (col = 0; col < oprsz; ++col) {
+ if (pm[H1(col)] & 1) {
+ zda[tile_vslice_index(row) + col] += zn_row;
+ }
+ }
+ }
+ }
+}
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
index 35c2644812..d3b9cdd5c4 100644
--- a/target/arm/translate-sme.c
+++ b/target/arm/translate-sme.c
@@ -267,3 +267,34 @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a,
GenLdStR *fn)
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
+
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
+ gen_helper_gvec_4 *fn)
+{
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, 0);
+ TCGv_ptr za, zn, pn, pm;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ /* Sum XZR+zad to find ZAd. */
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
+ zn = vec_full_reg_ptr(s, a->zn);
+ pn = pred_full_reg_ptr(s, a->pn);
+ pm = pred_full_reg_ptr(s, a->pm);
+
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
+
+ tcg_temp_free_ptr(za);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(pn);
+ tcg_temp_free_ptr(pm);
+ return true;
+}
+
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
--
2.34.1
- [PATCH v6 14/45] target/arm: Mark LD1RO as non-streaming, (continued)
- [PATCH v6 14/45] target/arm: Mark LD1RO as non-streaming, Richard Henderson, 2022/07/08
- [PATCH v6 15/45] target/arm: Add SME enablement checks, Richard Henderson, 2022/07/08
- [PATCH v6 13/45] target/arm: Mark LDFF1 and LDNF1 as non-streaming, Richard Henderson, 2022/07/08
- [PATCH v6 16/45] target/arm: Handle SME in sve_access_check, Richard Henderson, 2022/07/08
- [PATCH v6 17/45] target/arm: Implement SME RDSVL, ADDSVL, ADDSPL, Richard Henderson, 2022/07/08
- [PATCH v6 18/45] target/arm: Implement SME ZERO, Richard Henderson, 2022/07/08
- [PATCH v6 19/45] target/arm: Implement SME MOVA, Richard Henderson, 2022/07/08
- [PATCH v6 20/45] target/arm: Implement SME LD1, ST1, Richard Henderson, 2022/07/08
- [PATCH v6 21/45] target/arm: Export unpredicated ld/st from translate-sve.c, Richard Henderson, 2022/07/08
- [PATCH v6 22/45] target/arm: Implement SME LDR, STR, Richard Henderson, 2022/07/08
- [PATCH v6 23/45] target/arm: Implement SME ADDHA, ADDVA,
Richard Henderson <=
- [PATCH v6 24/45] target/arm: Implement FMOPA, FMOPS (non-widening), Richard Henderson, 2022/07/08
- [PATCH v6 25/45] target/arm: Implement BFMOPA, BFMOPS, Richard Henderson, 2022/07/08
- [PATCH v6 26/45] target/arm: Implement FMOPA, FMOPS (widening), Richard Henderson, 2022/07/08
- [PATCH v6 27/45] target/arm: Implement SME integer outer product, Richard Henderson, 2022/07/08
- [PATCH v6 28/45] target/arm: Implement PSEL, Richard Henderson, 2022/07/08
- [PATCH v6 29/45] target/arm: Implement REVD, Richard Henderson, 2022/07/08
- [PATCH v6 30/45] target/arm: Implement SCLAMP, UCLAMP, Richard Henderson, 2022/07/08
- [PATCH v6 31/45] target/arm: Reset streaming sve state on exception boundaries, Richard Henderson, 2022/07/08
- [PATCH v6 32/45] target/arm: Enable SME for -cpu max, Richard Henderson, 2022/07/08
- [PATCH v6 37/45] linux-user/aarch64: Do not allow duplicate or short sve records, Richard Henderson, 2022/07/08