[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH for-8.0 09/29] tcg/tci: Use cpu_{ld,st}_mmu
From: |
Richard Henderson |
Subject: |
[PATCH for-8.0 09/29] tcg/tci: Use cpu_{ld,st}_mmu |
Date: |
Fri, 18 Nov 2022 01:47:34 -0800 |
Unify the softmmu and the user-only paths by using the
official memory interface. Avoid double logging of memory
operations to plugins by relying on the ones within the
cpu_*_mmu functions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-op.c | 9 +++-
tcg/tci.c | 127 ++++++++-------------------------------------------
2 files changed, 26 insertions(+), 110 deletions(-)
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index e7e4951a3c..1f81c3dbb3 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -2914,7 +2914,12 @@ static void tcg_gen_req_mo(TCGBar type)
static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
{
-#ifdef CONFIG_PLUGIN
+ /*
+ * With TCI, we get memory tracing via cpu_{ld,st}_mmu.
+ * No need to instrument memory operations inline, and
+ * we don't want to log the same memory operation twice.
+ */
+#if defined(CONFIG_PLUGIN) && !defined(CONFIG_TCG_INTERPRETER)
if (tcg_ctx->plugin_insn != NULL) {
/* Save a copy of the vaddr for use after a load. */
TCGv temp = tcg_temp_new();
@@ -2928,7 +2933,7 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi,
enum qemu_plugin_mem_rw rw)
{
-#ifdef CONFIG_PLUGIN
+#if defined(CONFIG_PLUGIN) && !defined(CONFIG_TCG_INTERPRETER)
if (tcg_ctx->plugin_insn != NULL) {
qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
plugin_gen_empty_mem_callback(vaddr, info);
diff --git a/tcg/tci.c b/tcg/tci.c
index 022fe9d0f8..52fdd3f5ec 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -293,87 +293,34 @@ static uint64_t tci_qemu_ld(CPUArchState *env,
target_ulong taddr,
MemOp mop = get_memop(oi);
uintptr_t ra = (uintptr_t)tb_ptr;
-#ifdef CONFIG_SOFTMMU
switch (mop & (MO_BSWAP | MO_SSIZE)) {
case MO_UB:
- return helper_ret_ldub_mmu(env, taddr, oi, ra);
+ return cpu_ldb_mmu(env, taddr, oi, ra);
case MO_SB:
- return helper_ret_ldsb_mmu(env, taddr, oi, ra);
+ return (int8_t)cpu_ldb_mmu(env, taddr, oi, ra);
case MO_LEUW:
- return helper_le_lduw_mmu(env, taddr, oi, ra);
+ return cpu_ldw_le_mmu(env, taddr, oi, ra);
case MO_LESW:
- return helper_le_ldsw_mmu(env, taddr, oi, ra);
+ return (int16_t)cpu_ldw_le_mmu(env, taddr, oi, ra);
case MO_LEUL:
- return helper_le_ldul_mmu(env, taddr, oi, ra);
+ return cpu_ldl_le_mmu(env, taddr, oi, ra);
case MO_LESL:
- return helper_le_ldsl_mmu(env, taddr, oi, ra);
+ return (int32_t)cpu_ldl_le_mmu(env, taddr, oi, ra);
case MO_LEUQ:
- return helper_le_ldq_mmu(env, taddr, oi, ra);
+ return cpu_ldq_le_mmu(env, taddr, oi, ra);
case MO_BEUW:
- return helper_be_lduw_mmu(env, taddr, oi, ra);
+ return cpu_ldw_be_mmu(env, taddr, oi, ra);
case MO_BESW:
- return helper_be_ldsw_mmu(env, taddr, oi, ra);
+ return (int16_t)cpu_ldw_be_mmu(env, taddr, oi, ra);
case MO_BEUL:
- return helper_be_ldul_mmu(env, taddr, oi, ra);
+ return cpu_ldl_be_mmu(env, taddr, oi, ra);
case MO_BESL:
- return helper_be_ldsl_mmu(env, taddr, oi, ra);
+ return (int32_t)cpu_ldl_be_mmu(env, taddr, oi, ra);
case MO_BEUQ:
- return helper_be_ldq_mmu(env, taddr, oi, ra);
+ return cpu_ldq_be_mmu(env, taddr, oi, ra);
default:
g_assert_not_reached();
}
-#else
- void *haddr = g2h(env_cpu(env), taddr);
- unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
- uint64_t ret;
-
- set_helper_retaddr(ra);
- if (taddr & a_mask) {
- helper_unaligned_ld(env, taddr);
- }
- switch (mop & (MO_BSWAP | MO_SSIZE)) {
- case MO_UB:
- ret = ldub_p(haddr);
- break;
- case MO_SB:
- ret = ldsb_p(haddr);
- break;
- case MO_LEUW:
- ret = lduw_le_p(haddr);
- break;
- case MO_LESW:
- ret = ldsw_le_p(haddr);
- break;
- case MO_LEUL:
- ret = (uint32_t)ldl_le_p(haddr);
- break;
- case MO_LESL:
- ret = (int32_t)ldl_le_p(haddr);
- break;
- case MO_LEUQ:
- ret = ldq_le_p(haddr);
- break;
- case MO_BEUW:
- ret = lduw_be_p(haddr);
- break;
- case MO_BESW:
- ret = ldsw_be_p(haddr);
- break;
- case MO_BEUL:
- ret = (uint32_t)ldl_be_p(haddr);
- break;
- case MO_BESL:
- ret = (int32_t)ldl_be_p(haddr);
- break;
- case MO_BEUQ:
- ret = ldq_be_p(haddr);
- break;
- default:
- g_assert_not_reached();
- }
- clear_helper_retaddr();
- return ret;
-#endif
}
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
@@ -382,67 +329,31 @@ static void tci_qemu_st(CPUArchState *env, target_ulong
taddr, uint64_t val,
MemOp mop = get_memop(oi);
uintptr_t ra = (uintptr_t)tb_ptr;
-#ifdef CONFIG_SOFTMMU
switch (mop & (MO_BSWAP | MO_SIZE)) {
case MO_UB:
- helper_ret_stb_mmu(env, taddr, val, oi, ra);
+ cpu_stb_mmu(env, taddr, val, oi, ra);
break;
case MO_LEUW:
- helper_le_stw_mmu(env, taddr, val, oi, ra);
+ cpu_stw_le_mmu(env, taddr, val, oi, ra);
break;
case MO_LEUL:
- helper_le_stl_mmu(env, taddr, val, oi, ra);
+ cpu_stl_le_mmu(env, taddr, val, oi, ra);
break;
case MO_LEUQ:
- helper_le_stq_mmu(env, taddr, val, oi, ra);
+ cpu_stq_le_mmu(env, taddr, val, oi, ra);
break;
case MO_BEUW:
- helper_be_stw_mmu(env, taddr, val, oi, ra);
+ cpu_stw_be_mmu(env, taddr, val, oi, ra);
break;
case MO_BEUL:
- helper_be_stl_mmu(env, taddr, val, oi, ra);
+ cpu_stl_be_mmu(env, taddr, val, oi, ra);
break;
case MO_BEUQ:
- helper_be_stq_mmu(env, taddr, val, oi, ra);
+ cpu_stq_be_mmu(env, taddr, val, oi, ra);
break;
default:
g_assert_not_reached();
}
-#else
- void *haddr = g2h(env_cpu(env), taddr);
- unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
-
- set_helper_retaddr(ra);
- if (taddr & a_mask) {
- helper_unaligned_st(env, taddr);
- }
- switch (mop & (MO_BSWAP | MO_SIZE)) {
- case MO_UB:
- stb_p(haddr, val);
- break;
- case MO_LEUW:
- stw_le_p(haddr, val);
- break;
- case MO_LEUL:
- stl_le_p(haddr, val);
- break;
- case MO_LEUQ:
- stq_le_p(haddr, val);
- break;
- case MO_BEUW:
- stw_be_p(haddr, val);
- break;
- case MO_BEUL:
- stl_be_p(haddr, val);
- break;
- case MO_BEUQ:
- stq_be_p(haddr, val);
- break;
- default:
- g_assert_not_reached();
- }
- clear_helper_retaddr();
-#endif
}
#if TCG_TARGET_REG_BITS == 64
--
2.34.1
- [PATCH for-8.0 03/29] accel/tcg: Add cpu_in_serial_context, (continued)
- [PATCH for-8.0 03/29] accel/tcg: Add cpu_in_serial_context, Richard Henderson, 2022/11/18
- [PATCH for-8.0 04/29] accel/tcg: Introduce tlb_read_idx, Richard Henderson, 2022/11/18
- [PATCH for-8.0 05/29] accel/tcg: Reorg system mode load helpers, Richard Henderson, 2022/11/18
- [PATCH for-8.0 01/29] include/qemu/cpuid: Introduce xgetbv_low, Richard Henderson, 2022/11/18
- [PATCH for-8.0 06/29] accel/tcg: Reorg system mode store helpers, Richard Henderson, 2022/11/18
- [PATCH for-8.0 07/29] accel/tcg: Honor atomicity of loads, Richard Henderson, 2022/11/18
- [PATCH for-8.0 09/29] tcg/tci: Use cpu_{ld,st}_mmu,
Richard Henderson <=
- [PATCH for-8.0 08/29] accel/tcg: Honor atomicity of stores, Richard Henderson, 2022/11/18
- [PATCH for-8.0 11/29] accel/tcg: Implement helper_{ld, st}*_mmu for user-only, Richard Henderson, 2022/11/18
- [PATCH for-8.0 10/29] tcg: Unify helper_{be,le}_{ld,st}*, Richard Henderson, 2022/11/18
- [PATCH for-8.0 12/29] tcg: Add 128-bit guest memory primitives, Richard Henderson, 2022/11/18
- [PATCH for-8.0 21/29] tcg/i386: Introduce tcg_out_mov2, Richard Henderson, 2022/11/18
- [PATCH for-8.0 24/29] tcg/i386: Replace is64 with type in qemu_ld/st routines, Richard Henderson, 2022/11/18