[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN
From: |
Richard Henderson |
Subject: |
[PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN |
Date: |
Tue, 9 Mar 2021 08:19:52 -0800 |
This completes the section "SVE2 bitwise shift right narrow".
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sve.h | 16 ++++++
target/arm/sve.decode | 4 ++
target/arm/sve_helper.c | 24 +++++++++
target/arm/translate-sve.c | 105 +++++++++++++++++++++++++++++++++++++
4 files changed, 149 insertions(+)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index ba6a24fc8b..1c7fe8e417 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2476,6 +2476,22 @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG,
void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 13b5da0856..0674464695 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1296,6 +1296,10 @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... .....
@rd_rn_tszimm_shr
SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
+SQSHRNB 01000101 .. 1 ..... 00 1000 ..... ..... @rd_rn_tszimm_shr
+SQSHRNT 01000101 .. 1 ..... 00 1001 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNB 01000101 .. 1 ..... 00 1010 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNT 01000101 .. 1 ..... 00 1011 ..... ..... @rd_rn_tszimm_shr
UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 83d3547f67..4b487d6f5f 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1971,6 +1971,30 @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1,
DO_SQRSHRUN_H)
DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
+#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
+#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
+#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
+DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
+DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D)
+
+DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
+DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
+DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, , H1_4, DO_SQSHRN_D)
+
+#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
+#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
+#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
+DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
+DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D)
+
+DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H)
+DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S)
+DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRN_D)
+
#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 6a3c34a4fc..5380ed26c1 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -6957,6 +6957,111 @@ static bool trans_SQRSHRUNT(DisasContext *s,
arg_rri_esz *a)
return do_sve2_shr_narrow(s, a, ops);
}
+static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnb_h },
+ { .fno = gen_helper_sve2_sqrshrnb_s },
+ { .fno = gen_helper_sve2_sqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnt_h },
+ { .fno = gen_helper_sve2_sqrshrnt_s },
+ { .fno = gen_helper_sve2_sqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
TCGv_vec n, int64_t shr)
{
--
2.25.1
- [PATCH v4 14/78] target/arm: Implement PMULLB and PMULLT, (continued)
- [PATCH v4 14/78] target/arm: Implement PMULLB and PMULLT, Richard Henderson, 2021/03/09
- [PATCH v4 17/78] target/arm: Implement SVE2 bitwise permute, Richard Henderson, 2021/03/09
- [PATCH v4 16/78] target/arm: Implement SVE2 bitwise exclusive-or interleaved, Richard Henderson, 2021/03/09
- [PATCH v4 19/78] target/arm: Implement SVE2 integer absolute difference and accumulate long, Richard Henderson, 2021/03/09
- [PATCH v4 18/78] target/arm: Implement SVE2 complex integer add, Richard Henderson, 2021/03/09
- [PATCH v4 20/78] target/arm: Implement SVE2 integer add/subtract long with carry, Richard Henderson, 2021/03/09
- [PATCH v4 25/78] target/arm: Implement SVE2 floating-point pairwise, Richard Henderson, 2021/03/09
- [PATCH v4 24/78] target/arm: Implement SVE2 saturating extract narrow, Richard Henderson, 2021/03/09
- [PATCH v4 23/78] target/arm: Implement SVE2 integer absolute difference and accumulate, Richard Henderson, 2021/03/09
- [PATCH v4 27/78] target/arm: Implement SVE2 SQSHRUN, SQRSHRUN, Richard Henderson, 2021/03/09
- [PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN,
Richard Henderson <=
- [PATCH v4 15/78] target/arm: Implement SVE2 bitwise shift left long, Richard Henderson, 2021/03/09
- [PATCH v4 30/78] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS, Richard Henderson, 2021/03/09
- [PATCH v4 32/78] target/arm: Implement SVE2 bitwise ternary operations, Richard Henderson, 2021/03/09
- [PATCH v4 34/78] target/arm: Implement SVE2 saturating multiply-add long, Richard Henderson, 2021/03/09
- [PATCH v4 22/78] target/arm: Implement SVE2 bitwise shift and insert, Richard Henderson, 2021/03/09
- [PATCH v4 21/78] target/arm: Implement SVE2 bitwise shift right and accumulate, Richard Henderson, 2021/03/09
- [PATCH v4 37/78] target/arm: Implement SVE2 complex integer multiply-add, Richard Henderson, 2021/03/09
- [PATCH v4 26/78] target/arm: Implement SVE2 SHRN, RSHRN, Richard Henderson, 2021/03/09
- [PATCH v4 35/78] target/arm: Implement SVE2 saturating multiply-add high, Richard Henderson, 2021/03/09
- [PATCH v4 28/78] target/arm: Implement SVE2 UQSHRN, UQRSHRN, Richard Henderson, 2021/03/09