[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 18/23] i386: Dot product AVX helper prep
From: |
Paolo Bonzini |
Subject: |
[PATCH v3 18/23] i386: Dot product AVX helper prep |
Date: |
Thu, 1 Sep 2022 09:48:37 +0200 |
From: Paul Brook <paul@nowt.org>
Make the dpps and dppd helpers AVX-ready
I can't see any obvious reason why dppd shouldn't work on 256 bit ymm
registers, but both AMD and Intel agree that it's xmm only.
Signed-off-by: Paul Brook <paul@nowt.org>
Message-Id: <20220424220204.2493824-17-paul@nowt.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/ops_sse.h | 80 ++++++++++++++++++++++++-------------------
1 file changed, 45 insertions(+), 35 deletions(-)
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index de874e136f..59ed30071e 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -1903,55 +1903,64 @@ SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
-void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mask)
{
+ Reg *v = d;
float32 prod1, prod2, temp2, temp3, temp4;
+ int i;
- /*
- * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
- * to correctly round the intermediate results
- */
- if (mask & (1 << 4)) {
- prod1 = float32_mul(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
- } else {
- prod1 = float32_zero;
- }
- if (mask & (1 << 5)) {
- prod2 = float32_mul(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
- } else {
- prod2 = float32_zero;
- }
- temp2 = float32_add(prod1, prod2, &env->sse_status);
- if (mask & (1 << 6)) {
- prod1 = float32_mul(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
- } else {
- prod1 = float32_zero;
- }
- if (mask & (1 << 7)) {
- prod2 = float32_mul(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
- } else {
- prod2 = float32_zero;
- }
- temp3 = float32_add(prod1, prod2, &env->sse_status);
- temp4 = float32_add(temp2, temp3, &env->sse_status);
+ for (i = 0; i < 2 << SHIFT; i += 4) {
+ /*
+ * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
+ * to correctly round the intermediate results
+ */
+ if (mask & (1 << 4)) {
+ prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status);
+ } else {
+ prod1 = float32_zero;
+ }
+ if (mask & (1 << 5)) {
+ prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1),
&env->sse_status);
+ } else {
+ prod2 = float32_zero;
+ }
+ temp2 = float32_add(prod1, prod2, &env->sse_status);
+ if (mask & (1 << 6)) {
+ prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2),
&env->sse_status);
+ } else {
+ prod1 = float32_zero;
+ }
+ if (mask & (1 << 7)) {
+ prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3),
&env->sse_status);
+ } else {
+ prod2 = float32_zero;
+ }
+ temp3 = float32_add(prod1, prod2, &env->sse_status);
+ temp4 = float32_add(temp2, temp3, &env->sse_status);
- d->ZMM_S(0) = (mask & (1 << 0)) ? temp4 : float32_zero;
- d->ZMM_S(1) = (mask & (1 << 1)) ? temp4 : float32_zero;
- d->ZMM_S(2) = (mask & (1 << 2)) ? temp4 : float32_zero;
- d->ZMM_S(3) = (mask & (1 << 3)) ? temp4 : float32_zero;
+ d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero;
+ d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero;
+ d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero;
+ d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero;
+ }
}
-void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+#if SHIFT == 1
+/* Oddly, there is no ymm version of dppd */
+void glue(helper_dppd, SUFFIX)(CPUX86State *env,
+ Reg *d, Reg *s, uint32_t mask)
{
+ Reg *v = d;
float64 prod1, prod2, temp2;
if (mask & (1 << 4)) {
- prod1 = float64_mul(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+ prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
} else {
prod1 = float64_zero;
}
if (mask & (1 << 5)) {
- prod2 = float64_mul(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
+ prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
} else {
prod2 = float64_zero;
}
@@ -1959,6 +1968,7 @@ void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d,
Reg *s, uint32_t mask)
d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero;
d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero;
}
+#endif
void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t offset)
--
2.37.1
- Re: [PATCH v3 07/23] i386: check SSE table flags instead of hardcoding opcodes, (continued)
- [PATCH v3 05/23] i386: Rework sse_op_table6/7, Paolo Bonzini, 2022/09/01
- [PATCH v3 03/23] i386: Add ZMM_OFFSET macro, Paolo Bonzini, 2022/09/01
- [PATCH v3 06/23] i386: Move 3DNOW decoder, Paolo Bonzini, 2022/09/01
- [PATCH v3 08/23] i386: isolate MMX code more, Paolo Bonzini, 2022/09/01
- [PATCH v3 09/23] i386: Add size suffix to vector FP helpers, Paolo Bonzini, 2022/09/01
- [PATCH v3 04/23] i386: Rework sse_op_table1, Paolo Bonzini, 2022/09/01
- [PATCH v3 10/23] i386: do not cast gen_helper_* function pointers, Paolo Bonzini, 2022/09/01
- [PATCH v3 11/23] i386: Add CHECK_NO_VEX, Paolo Bonzini, 2022/09/01
- [PATCH v3 18/23] i386: Dot product AVX helper prep,
Paolo Bonzini <=
- [PATCH v3 21/23] i386: Rewrite blendv helpers, Paolo Bonzini, 2022/09/01
- [PATCH v3 12/23] i386: Rewrite vector shift helper, Paolo Bonzini, 2022/09/01
- [PATCH v3 14/23] i386: Misc integer AVX helper prep, Paolo Bonzini, 2022/09/01
- [PATCH v3 13/23] i386: Rewrite simple integer vector helpers, Paolo Bonzini, 2022/09/01
- [PATCH v3 17/23] i386: reimplement AVX comparison helpers, Paolo Bonzini, 2022/09/01
- [PATCH v3 20/23] i386: Misc AVX helper prep, Paolo Bonzini, 2022/09/01
- [PATCH v3 19/23] i386: Destructive FP helpers for AVX, Paolo Bonzini, 2022/09/01
- [PATCH v3 15/23] i386: Destructive vector helpers for AVX, Paolo Bonzini, 2022/09/01