[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers
From: |
Richard Henderson |
Subject: |
[PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers |
Date: |
Tue, 9 Mar 2021 08:20:12 -0800 |
For SVE, we potentially have a 4th argument coming from the
movprfx instruction. Currently we do not optimize movprfx,
so the problem is not visible.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper.h | 20 ++++++-------
target/arm/translate-a64.c | 28 ++++++++++++++----
target/arm/translate-sve.c | 5 ++--
target/arm/vec_helper.c | 50 +++++++++++++--------------------
target/arm/translate-neon.c.inc | 10 ++++---
5 files changed, 62 insertions(+), 51 deletions(-)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index f4b092ee1c..72c5bf6aca 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -629,16 +629,16 @@ DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlah, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_fcmlad, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index c221f1b4ea..d3a1368090 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -710,6 +710,23 @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q,
int rd, int rn,
is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
}
+/*
+ * Expand a 4-operand + fpstatus pointer + simd data value operation using
+ * an out-of-line helper.
+ */
+static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, bool is_fp16, int data,
+ gen_helper_gvec_4_ptr *fn)
+{
+ TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+ tcg_temp_free_ptr(fpst);
+}
+
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent.
*/
@@ -12211,15 +12228,15 @@ static void
disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
rot = extract32(opcode, 0, 2);
switch (size) {
case 1:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
gen_helper_gvec_fcmlah);
break;
case 2:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
gen_helper_gvec_fcmlas);
break;
case 3:
- gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
gen_helper_gvec_fcmlad);
break;
default:
@@ -13470,9 +13487,10 @@ static void disas_simd_indexed(DisasContext *s,
uint32_t insn)
{
int rot = extract32(insn, 13, 2);
int data = (index << 2) | rot;
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
vec_full_reg_offset(s, rn),
- vec_full_reg_offset(s, rm), fpst,
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, rd), fpst,
is_q ? 16 : 8, vec_full_reg_size(s), data,
size == MO_64
? gen_helper_gvec_fcmlas_idx
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 37f14af35a..7439dd928b 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -4383,7 +4383,7 @@ static bool trans_FCMLA_zpzzz(DisasContext *s,
arg_FCMLA_zpzzz *a)
static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
{
- static gen_helper_gvec_3_ptr * const fns[2] = {
+ static gen_helper_gvec_4_ptr * const fns[2] = {
gen_helper_gvec_fcmlah_idx,
gen_helper_gvec_fcmlas_idx,
};
@@ -4393,9 +4393,10 @@ static bool trans_FCMLA_zzxz(DisasContext *s,
arg_FCMLA_zzxz *a)
if (sve_access_check(s)) {
unsigned vsz = vec_full_reg_size(s);
TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 :
FPST_FPCR);
- tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
vec_full_reg_offset(s, a->rn),
vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
status, vsz, vsz,
a->index * 4 + a->rot,
fns[a->esz - 1]);
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index f88e572132..b19877e0d3 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -657,13 +657,11 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float16 *d = vd;
- float16 *n = vn;
- float16 *m = vm;
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -680,19 +678,17 @@ void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm,
float16 e4 = e2;
float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
- d[H2(i)] = float16_muladd(e2, e1, d[H2(i)], 0, fpst);
- d[H2(i + 1)] = float16_muladd(e4, e3, d[H2(i + 1)], 0, fpst);
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float16 *d = vd;
- float16 *n = vn;
- float16 *m = vm;
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -716,20 +712,18 @@ void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm,
float16 e2 = n[H2(j + flip)];
float16 e4 = e2;
- d[H2(j)] = float16_muladd(e2, e1, d[H2(j)], 0, fpst);
- d[H2(j + 1)] = float16_muladd(e4, e3, d[H2(j + 1)], 0, fpst);
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float32 *d = vd;
- float32 *n = vn;
- float32 *m = vm;
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -746,19 +740,17 @@ void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm,
float32 e4 = e2;
float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
- d[H4(i)] = float32_muladd(e2, e1, d[H4(i)], 0, fpst);
- d[H4(i + 1)] = float32_muladd(e4, e3, d[H4(i + 1)], 0, fpst);
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float32 *d = vd;
- float32 *n = vn;
- float32 *m = vm;
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -782,20 +774,18 @@ void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm,
float32 e2 = n[H4(j + flip)];
float32 e4 = e2;
- d[H4(j)] = float32_muladd(e2, e1, d[H4(j)], 0, fpst);
- d[H4(j + 1)] = float32_muladd(e4, e3, d[H4(j + 1)], 0, fpst);
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
+void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
void *vfpst, uint32_t desc)
{
uintptr_t opr_sz = simd_oprsz(desc);
- float64 *d = vd;
- float64 *n = vn;
- float64 *m = vm;
+ float64 *d = vd, *n = vn, *m = vm, *a = va;
float_status *fpst = vfpst;
intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
@@ -812,8 +802,8 @@ void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm,
float64 e4 = e2;
float64 e3 = m[i + 1 - flip] ^ neg_imag;
- d[i] = float64_muladd(e2, e1, d[i], 0, fpst);
- d[i + 1] = float64_muladd(e4, e3, d[i + 1], 0, fpst);
+ d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
index 4f8903a6a4..17657c2310 100644
--- a/target/arm/translate-neon.c.inc
+++ b/target/arm/translate-neon.c.inc
@@ -146,7 +146,7 @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
{
int opr_sz;
TCGv_ptr fpst;
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
+ gen_helper_gvec_4_ptr *fn_gvec_ptr;
if (!dc_isar_feature(aa32_vcma, s)
|| (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
@@ -171,9 +171,10 @@ static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
fn_gvec_ptr = (a->size == MO_16) ?
gen_helper_gvec_fcmlah : gen_helper_gvec_fcmlas;
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(1, a->vn),
vfp_reg_offset(1, a->vm),
+ vfp_reg_offset(1, a->vd),
fpst, opr_sz, opr_sz, a->rot,
fn_gvec_ptr);
tcg_temp_free_ptr(fpst);
@@ -284,7 +285,7 @@ static bool trans_VFML(DisasContext *s, arg_VFML *a)
static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
{
- gen_helper_gvec_3_ptr *fn_gvec_ptr;
+ gen_helper_gvec_4_ptr *fn_gvec_ptr;
int opr_sz;
TCGv_ptr fpst;
@@ -313,9 +314,10 @@ static bool trans_VCMLA_scalar(DisasContext *s,
arg_VCMLA_scalar *a)
gen_helper_gvec_fcmlah_idx : gen_helper_gvec_fcmlas_idx;
opr_sz = (1 + a->q) * 8;
fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
- tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, a->vd),
vfp_reg_offset(1, a->vn),
vfp_reg_offset(1, a->vm),
+ vfp_reg_offset(1, a->vd),
fpst, opr_sz, opr_sz,
(a->index << 2) | a->rot, fn_gvec_ptr);
tcg_temp_free_ptr(fpst);
--
2.25.1
- [PATCH v4 38/78] target/arm: Implement SVE2 ADDHNB, ADDHNT, (continued)
- [PATCH v4 38/78] target/arm: Implement SVE2 ADDHNB, ADDHNT, Richard Henderson, 2021/03/09
- [PATCH v4 40/78] target/arm: Implement SVE2 SUBHNB, SUBHNT, Richard Henderson, 2021/03/09
- [PATCH v4 41/78] target/arm: Implement SVE2 RSUBHNB, RSUBHNT, Richard Henderson, 2021/03/09
- [PATCH v4 42/78] target/arm: Implement SVE2 HISTCNT, HISTSEG, Richard Henderson, 2021/03/09
- [PATCH v4 43/78] target/arm: Implement SVE2 XAR, Richard Henderson, 2021/03/09
- [PATCH v4 44/78] target/arm: Implement SVE2 scatter store insns, Richard Henderson, 2021/03/09
- [PATCH v4 46/78] target/arm: Implement SVE2 FMMLA, Richard Henderson, 2021/03/09
- [PATCH v4 47/78] target/arm: Implement SVE2 SPLICE, EXT, Richard Henderson, 2021/03/09
- [PATCH v4 45/78] target/arm: Implement SVE2 gather load insns, Richard Henderson, 2021/03/09
- [PATCH v4 48/78] target/arm: Pass separate addend to {U, S}DOT helpers, Richard Henderson, 2021/03/09
- [PATCH v4 49/78] target/arm: Pass separate addend to FCMLA helpers,
Richard Henderson <=
- [PATCH v4 54/78] target/arm: Implement SVE2 saturating multiply-add high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 50/78] target/arm: Split out formats for 2 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 55/78] target/arm: Implement SVE2 saturating multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 62/78] target/arm: Implement SVE mixed sign dot product (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 51/78] target/arm: Split out formats for 3 vectors + 1 index, Richard Henderson, 2021/03/09
- [PATCH v4 52/78] target/arm: Implement SVE2 integer multiply (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 56/78] target/arm: Implement SVE2 integer multiply long (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 53/78] target/arm: Implement SVE2 integer multiply-add (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 59/78] target/arm: Implement SVE2 saturating multiply high (indexed), Richard Henderson, 2021/03/09
- [PATCH v4 63/78] target/arm: Implement SVE mixed sign dot product, Richard Henderson, 2021/03/09