[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v7 59/92] target/arm: Implement SVE2 saturating multiply high (in
From: |
Richard Henderson |
Subject: |
[PATCH v7 59/92] target/arm: Implement SVE2 saturating multiply high (indexed) |
Date: |
Mon, 24 May 2021 18:03:25 -0700 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper.h | 14 ++++++
target/arm/sve.decode | 8 ++++
target/arm/translate-sve.c | 8 ++++
target/arm/vec_helper.c | 88 ++++++++++++++++++++++++++++++++++++++
4 files changed, 118 insertions(+)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index eb94b6b1e6..e7c463fff5 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -967,6 +967,20 @@ DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void,
ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
#ifdef TARGET_AARCH64
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 407d3019d1..35010d755f 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -829,6 +829,14 @@ SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... .....
@rrx_2a esz=3
SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2
SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3
+# SVE2 saturating multiply high (indexed)
+SQDMULH_zzx_h 01000100 0. 1 ..... 111100 ..... ..... @rrx_3 esz=1
+SQDMULH_zzx_s 01000100 10 1 ..... 111100 ..... ..... @rrx_2 esz=2
+SQDMULH_zzx_d 01000100 11 1 ..... 111100 ..... ..... @rrx_1 esz=3
+SQRDMULH_zzx_h 01000100 0. 1 ..... 111101 ..... ..... @rrx_3 esz=1
+SQRDMULH_zzx_s 01000100 10 1 ..... 111101 ..... ..... @rrx_2 esz=2
+SQRDMULH_zzx_d 01000100 11 1 ..... 111101 ..... ..... @rrx_1 esz=3
+
# SVE2 integer multiply (indexed)
MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 001432eccc..a03fce003e 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -3864,6 +3864,14 @@ DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
+DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
+DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
+DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
+
+DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
+
#undef DO_SVE2_RRX
#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 25061c15e1..8b7269d8e1 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -240,6 +240,36 @@ void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm,
uint32_t desc)
}
}
+void HELPER(sve2_sqdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
+
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
bool neg, bool round, uint32_t *sat)
@@ -373,6 +403,36 @@ void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm,
uint32_t desc)
}
}
+void HELPER(sve2_sqdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
+
/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
static int64_t do_sat128_d(Int128 r)
{
@@ -452,6 +512,34 @@ void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm,
uint32_t desc)
}
}
+void HELPER(sve2_sqdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
+
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, false);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
+
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, true);
+ }
+ }
+}
+
/* Integer 8 and 16-bit dot-product.
*
* Note that for the loops herein, host endianness does not matter
--
2.25.1
- [PATCH v7 36/92] target/arm: Implement SVE2 integer multiply-add long, (continued)
- [PATCH v7 36/92] target/arm: Implement SVE2 integer multiply-add long, Richard Henderson, 2021/05/24
- [PATCH v7 40/92] target/arm: Implement SVE2 SUBHNB, SUBHNT, Richard Henderson, 2021/05/24
- [PATCH v7 42/92] target/arm: Implement SVE2 HISTCNT, HISTSEG, Richard Henderson, 2021/05/24
- [PATCH v7 43/92] target/arm: Implement SVE2 XAR, Richard Henderson, 2021/05/24
- [PATCH v7 45/92] target/arm: Implement SVE2 gather load insns, Richard Henderson, 2021/05/24
- [PATCH v7 44/92] target/arm: Implement SVE2 scatter store insns, Richard Henderson, 2021/05/24
- [PATCH v7 41/92] target/arm: Implement SVE2 RSUBHNB, RSUBHNT, Richard Henderson, 2021/05/24
- [PATCH v7 48/92] target/arm: Use correct output type for gvec_sdot_*_b, Richard Henderson, 2021/05/24
- [PATCH v7 47/92] target/arm: Implement SVE2 SPLICE, EXT, Richard Henderson, 2021/05/24
- [PATCH v7 55/92] target/arm: Implement SVE2 saturating multiply-add high (indexed), Richard Henderson, 2021/05/24
- [PATCH v7 59/92] target/arm: Implement SVE2 saturating multiply high (indexed),
Richard Henderson <=
- [PATCH v7 67/92] target/arm: Implement SVE mixed sign dot product, Richard Henderson, 2021/05/24
- [PATCH v7 57/92] target/arm: Implement SVE2 saturating multiply (indexed), Richard Henderson, 2021/05/24
- [PATCH v7 66/92] target/arm: Implement SVE mixed sign dot product (indexed), Richard Henderson, 2021/05/24
- [PATCH v7 58/92] target/arm: Implement SVE2 signed saturating doubling multiply high, Richard Henderson, 2021/05/24
- [PATCH v7 63/92] target/arm: Implement SVE2 complex integer dot product, Richard Henderson, 2021/05/24
- [PATCH v7 65/92] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h}, Richard Henderson, 2021/05/24
- [PATCH v7 61/92] target/arm: Implement SVE2 integer multiply long (indexed), Richard Henderson, 2021/05/24
- [PATCH v7 62/92] target/arm: Implement SVE2 complex integer multiply-add (indexed), Richard Henderson, 2021/05/24
- [PATCH v7 70/92] target/arm: Implement SVE2 crypto constructive binary operations, Richard Henderson, 2021/05/24
- [PATCH v7 60/92] target/arm: Implement SVE2 multiply-add long (indexed), Richard Henderson, 2021/05/24