[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 101/114] target/arm: Implement 128-bit ZIP, UZP, TRN
From: |
Peter Maydell |
Subject: |
[PULL 101/114] target/arm: Implement 128-bit ZIP, UZP, TRN |
Date: |
Tue, 25 May 2021 16:07:23 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210525010358.152808-80-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper-sve.h | 3 ++
target/arm/sve.decode | 8 ++++++
target/arm/sve_helper.c | 29 +++++++++++++------
target/arm/translate-sve.c | 58 ++++++++++++++++++++++++++++++++++++++
4 files changed, 90 insertions(+), 8 deletions(-)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 342bb837214..b43ffce23ac 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -689,16 +689,19 @@ DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr,
ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_zip_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uzp_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_trn_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 884c5358eb1..5469ce04143 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -590,6 +590,14 @@ UZP2_z 00000101 .. 1 ..... 011 011 ..... .....
@rd_rn_rm
TRN1_z 00000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
TRN2_z 00000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
+# SVE2 permute vector segments
+ZIP1_q 00000101 10 1 ..... 000 000 ..... ..... @rd_rn_rm_e0
+ZIP2_q 00000101 10 1 ..... 000 001 ..... ..... @rd_rn_rm_e0
+UZP1_q 00000101 10 1 ..... 000 010 ..... ..... @rd_rn_rm_e0
+UZP2_q 00000101 10 1 ..... 000 011 ..... ..... @rd_rn_rm_e0
+TRN1_q 00000101 10 1 ..... 000 110 ..... ..... @rd_rn_rm_e0
+TRN2_q 00000101 10 1 ..... 000 111 ..... ..... @rd_rn_rm_e0
+
### SVE Permute - Predicated Group
# SVE compress active elements
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index a0518549849..d088b1f74ce 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -3492,36 +3492,45 @@ void HELPER(NAME)(void *vd, void *vn, void *vm,
uint32_t desc) \
*(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \
*(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \
} \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
}
DO_ZIP(sve_zip_b, uint8_t, H1)
DO_ZIP(sve_zip_h, uint16_t, H1_2)
DO_ZIP(sve_zip_s, uint32_t, H1_4)
DO_ZIP(sve_zip_d, uint64_t, )
+DO_ZIP(sve2_zip_q, Int128, )
#define DO_UZP(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
{ \
intptr_t oprsz = simd_oprsz(desc); \
- intptr_t oprsz_2 = oprsz / 2; \
intptr_t odd_ofs = simd_data(desc); \
- intptr_t i; \
+ intptr_t i, p; \
ARMVectorReg tmp_m; \
if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
vm = memcpy(&tmp_m, vm, oprsz); \
} \
- for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
- *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(2 * i + odd_ofs)); \
- } \
- for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
- *(TYPE *)(vd + H(oprsz_2 + i)) = *(TYPE *)(vm + H(2 * i + odd_ofs)); \
- } \
+ i = 0, p = odd_ofs; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ p -= oprsz; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vm + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ tcg_debug_assert(i == oprsz); \
}
DO_UZP(sve_uzp_b, uint8_t, H1)
DO_UZP(sve_uzp_h, uint16_t, H1_2)
DO_UZP(sve_uzp_s, uint32_t, H1_4)
DO_UZP(sve_uzp_d, uint64_t, )
+DO_UZP(sve2_uzp_q, Int128, )
#define DO_TRN(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
@@ -3535,12 +3544,16 @@ void HELPER(NAME)(void *vd, void *vn, void *vm,
uint32_t desc) \
*(TYPE *)(vd + H(i + 0)) = ae; \
*(TYPE *)(vd + H(i + sizeof(TYPE))) = be; \
} \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
}
DO_TRN(sve_trn_b, uint8_t, H1)
DO_TRN(sve_trn_h, uint16_t, H1_2)
DO_TRN(sve_trn_s, uint32_t, H1_4)
DO_TRN(sve_trn_d, uint64_t, )
+DO_TRN(sve2_trn_q, Int128, )
#undef DO_ZIP
#undef DO_UZP
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 1dcdbac0af0..b2aa9130b64 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -2624,6 +2624,32 @@ static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a)
return do_zip(s, a, true);
}
+static bool do_zip_q(DisasContext *s, arg_rrr_esz *a, bool high)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned high_ofs = high ? QEMU_ALIGN_DOWN(vsz, 32) / 2 : 0;
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn) + high_ofs,
+ vec_full_reg_offset(s, a->rm) + high_ofs,
+ vsz, vsz, 0, gen_helper_sve2_zip_q);
+ }
+ return true;
+}
+
+static bool trans_ZIP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, false);
+}
+
+static bool trans_ZIP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, true);
+}
+
static gen_helper_gvec_3 * const uzp_fns[4] = {
gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
@@ -2639,6 +2665,22 @@ static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a)
return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]);
}
+static bool trans_UZP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_uzp_q);
+}
+
+static bool trans_UZP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_uzp_q);
+}
+
static gen_helper_gvec_3 * const trn_fns[4] = {
gen_helper_sve_trn_b, gen_helper_sve_trn_h,
gen_helper_sve_trn_s, gen_helper_sve_trn_d,
@@ -2654,6 +2696,22 @@ static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a)
return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]);
}
+static bool trans_TRN1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_trn_q);
+}
+
+static bool trans_TRN2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_trn_q);
+}
+
/*
*** SVE Permute Vector - Predicated Group
*/
--
2.20.1
- [PULL 095/114] target/arm: Implement SVE2 FCVTLT, Peter Maydell, 2021/05/25
- [PULL 096/114] target/arm: Implement SVE2 FCVTXNT, FCVTX, Peter Maydell, 2021/05/25
- [PULL 099/114] target/arm: Tidy do_ldrq, Peter Maydell, 2021/05/25
- [PULL 100/114] target/arm: Implement SVE2 LD1RO, Peter Maydell, 2021/05/25
- [PULL 103/114] target/arm: Move endian adjustment macros to vec_internal.h, Peter Maydell, 2021/05/25
- [PULL 102/114] target/arm: Implement SVE2 bitwise shift immediate, Peter Maydell, 2021/05/25
- [PULL 106/114] target/arm: Split out do_neon_ddda_fpst, Peter Maydell, 2021/05/25
- [PULL 108/114] target/arm: Fix decode for VDOT (indexed), Peter Maydell, 2021/05/25
- [PULL 109/114] target/arm: Split out do_neon_ddda, Peter Maydell, 2021/05/25
- [PULL 101/114] target/arm: Implement 128-bit ZIP, UZP, TRN,
Peter Maydell <=
- [PULL 104/114] target/arm: Implement SVE2 fp multiply-add long, Peter Maydell, 2021/05/25
- [PULL 105/114] target/arm: Implement aarch64 SUDOT, USDOT, Peter Maydell, 2021/05/25
- [PULL 110/114] target/arm: Split decode of VSDOT and VUDOT, Peter Maydell, 2021/05/25
- [PULL 114/114] target/arm: Enable SVE2 and related extensions, Peter Maydell, 2021/05/25
- [PULL 107/114] target/arm: Remove unused fpst from VDOT_scalar, Peter Maydell, 2021/05/25
- [PULL 112/114] target/arm: Implement integer matrix multiply accumulate, Peter Maydell, 2021/05/25
- [PULL 113/114] linux-user/aarch64: Enable hwcap bits for sve2 and related extensions, Peter Maydell, 2021/05/25
- [PULL 098/114] target/arm: Share table of sve load functions, Peter Maydell, 2021/05/25
- [PULL 097/114] target/arm: Implement SVE2 FLOGB, Peter Maydell, 2021/05/25
- [PULL 111/114] target/arm: Implement aarch32 VSUDOT, VUSDOT, Peter Maydell, 2021/05/25