[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64}
From: |
Richard Henderson |
Subject: |
[PATCH v4 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64} |
Date: |
Sat, 7 Jan 2023 18:37:00 -0800 |
Normally this is automatically handled by the CF_PARALLEL checks
with in tcg_gen_atomic_cmpxchg_i{32,64}, but x86 has a special
case of !PREFIX_LOCK where it always wants the non-atomic version.
Split these out so that x86 does not have to roll its own.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/tcg/tcg-op.h | 4 ++
tcg/tcg-op.c | 154 +++++++++++++++++++++++++++----------------
2 files changed, 101 insertions(+), 57 deletions(-)
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 31bf3d287e..839d91c0c7 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -910,6 +910,10 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64,
TCGv_i64,
void tcg_gen_atomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128,
TCGArg, MemOp);
+void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
+ TCGArg, MemOp);
+void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
+ TCGArg, MemOp);
void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128, TCGv, TCGv_i128, TCGv_i128,
TCGArg, MemOp);
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 66f9c894ad..e7e4951a3c 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -3323,82 +3323,122 @@ static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP)
+ 1] = {
WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be)
};
+void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
+ TCGv_i32 newv, TCGArg idx, MemOp memop)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
+
+ tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
+ tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
+ tcg_gen_qemu_st_i32(t2, addr, idx, memop);
+ tcg_temp_free_i32(t2);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i32(retv, t1, memop);
+ } else {
+ tcg_gen_mov_i32(retv, t1);
+ }
+ tcg_temp_free_i32(t1);
+}
+
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
TCGv_i32 newv, TCGArg idx, MemOp memop)
{
- memop = tcg_canonicalize_memop(memop, 0, 0);
+ gen_atomic_cx_i32 gen;
+ MemOpIdx oi;
if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_temp_new_i32();
-
- tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE);
-
- tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN);
- tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1);
- tcg_gen_qemu_st_i32(t2, addr, idx, memop);
- tcg_temp_free_i32(t2);
-
- if (memop & MO_SIGN) {
- tcg_gen_ext_i32(retv, t1, memop);
- } else {
- tcg_gen_mov_i32(retv, t1);
- }
- tcg_temp_free_i32(t1);
- } else {
- gen_atomic_cx_i32 gen;
- MemOpIdx oi;
-
- gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
- tcg_debug_assert(gen != NULL);
-
- oi = make_memop_idx(memop & ~MO_SIGN, idx);
- gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
-
- if (memop & MO_SIGN) {
- tcg_gen_ext_i32(retv, retv, memop);
- }
+ tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop);
+ return;
}
+
+ memop = tcg_canonicalize_memop(memop, 0, 0);
+ gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
+ tcg_debug_assert(gen != NULL);
+
+ oi = make_memop_idx(memop & ~MO_SIGN, idx);
+ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i32(retv, retv, memop);
+ }
+}
+
+void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
+ TCGv_i64 newv, TCGArg idx, MemOp memop)
+{
+ TCGv_i64 t1, t2;
+
+ if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
+ tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
+ TCGV_LOW(newv), idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
+ } else {
+ tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
+ }
+ return;
+ }
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
+
+ tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
+ tcg_gen_qemu_st_i64(t2, addr, idx, memop);
+ tcg_temp_free_i64(t2);
+
+ if (memop & MO_SIGN) {
+ tcg_gen_ext_i64(retv, t1, memop);
+ } else {
+ tcg_gen_mov_i64(retv, t1);
+ }
+ tcg_temp_free_i64(t1);
}
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
TCGv_i64 newv, TCGArg idx, MemOp memop)
{
- memop = tcg_canonicalize_memop(memop, 1, 0);
-
if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop);
+ return;
+ }
- tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
-
- tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
- tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
- tcg_gen_qemu_st_i64(t2, addr, idx, memop);
- tcg_temp_free_i64(t2);
-
- if (memop & MO_SIGN) {
- tcg_gen_ext_i64(retv, t1, memop);
- } else {
- tcg_gen_mov_i64(retv, t1);
- }
- tcg_temp_free_i64(t1);
- } else if ((memop & MO_SIZE) == MO_64) {
-#ifdef CONFIG_ATOMIC64
+ if ((memop & MO_SIZE) == MO_64) {
gen_atomic_cx_i64 gen;
- MemOpIdx oi;
+ memop = tcg_canonicalize_memop(memop, 1, 0);
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
- tcg_debug_assert(gen != NULL);
+ if (gen) {
+ MemOpIdx oi = make_memop_idx(memop, idx);
+ gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
+ return;
+ }
- oi = make_memop_idx(memop, idx);
- gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi));
-#else
gen_helper_exit_atomic(cpu_env);
- /* Produce a result, so that we have a well-formed opcode stream
- with respect to uses of the result in the (dead) code following. */
+
+ /*
+ * Produce a result for a well-formed opcode stream. This satisfies
+ * liveness for set before used, which happens before this dead code
+ * is removed.
+ */
tcg_gen_movi_i64(retv, 0);
-#endif /* CONFIG_ATOMIC64 */
+ return;
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv),
+ TCGV_LOW(newv), idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31);
+ } else {
+ tcg_gen_movi_i32(TCGV_HIGH(retv), 0);
+ }
} else {
TCGv_i32 c32 = tcg_temp_new_i32();
TCGv_i32 n32 = tcg_temp_new_i32();
--
2.34.1
- [PATCH v4 10/36] tcg/tci: Fix big-endian return register ordering, (continued)
- [PATCH v4 10/36] tcg/tci: Fix big-endian return register ordering, Richard Henderson, 2023/01/07
- [PATCH v4 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 12/36] tcg: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 13/36] tcg: Add temp allocation for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 14/36] tcg: Add basic data movement for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 15/36] tcg: Add guest load/store primitives for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 16/36] tcg: Add tcg_gen_{non}atomic_cmpxchg_i128, Richard Henderson, 2023/01/07
- [PATCH v4 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64},
Richard Henderson <=
- [PATCH v4 18/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for STXP, Richard Henderson, 2023/01/07
- [PATCH v4 19/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for CASP, Richard Henderson, 2023/01/07
- [PATCH v4 20/36] target/ppc: Use tcg_gen_atomic_cmpxchg_i128 for STQCX, Richard Henderson, 2023/01/07
- [PATCH v4 21/36] tests/tcg/s390x: Add div.c, Richard Henderson, 2023/01/07
- [PATCH v4 22/36] tests/tcg/s390x: Add clst.c, Richard Henderson, 2023/01/07
- [PATCH v4 23/36] tests/tcg/s390x: Add long-double.c, Richard Henderson, 2023/01/07
- [PATCH v4 24/36] target/s390x: Use a single return for helper_divs32/u32, Richard Henderson, 2023/01/07
- [PATCH v4 25/36] target/s390x: Use a single return for helper_divs64/u64, Richard Henderson, 2023/01/07
- [PATCH v4 27/36] target/s390x: Use Int128 for return from CKSM, Richard Henderson, 2023/01/07
- [PATCH v4 26/36] target/s390x: Use Int128 for return from CLST, Richard Henderson, 2023/01/07