target/riscv/cpu.h | 3 -
target/riscv/cpu.c | 2 +-
target/riscv/cpu_helper.c | 87 ------------------------
target/riscv/tcg/tcg-cpu.c | 88 +++++++++++++++++++++++++
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
5 files changed, 90 insertions(+), 92 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 3b1a02b944..d00d1be235 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -704,9 +704,6 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb,
uint32_t vsew,
return vlen >> (vsew + 3 - lmul);
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
void riscv_cpu_update_mask(CPURISCVState *env);
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index c160b9216b..ca537d0e0a 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -889,7 +889,7 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- /* Match cpu_get_tb_cpu_state. */
+ /* Match riscv_get_cpu_state. */
if (env->xl == MXL_RV32) {
return env->pc & UINT32_MAX;
}
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index ce7322011d..e18a269358 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -25,7 +25,6 @@
#include "pmu.h"
#include "exec/exec-all.h"
#include "instmap.h"
-#include "tcg/tcg-op.h"
#include "trace.h"
#include "semihosting/common-semi.h"
#include "sysemu/cpu-timers.h"
@@ -62,92 +61,6 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- RISCVCPU *cpu = env_archcpu(env);
- RISCVExtStatus fs, vs;
- uint32_t flags = 0;
-
- *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
- *cs_base = 0;
-
- if (cpu->cfg.ext_zve32f) {
- /*
- * If env->vl equals to VLMAX, we can use generic vector operation
- * expanders (GVEC) to accerlate the vector operations.
- * However, as LMUL could be a fractional number. The maximum
- * vector size can be operated might be less than 8 bytes,
- * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
- * only when maxsz >= 8 bytes.
- */
-
- /* lmul encoded as in DisasContext::lmul */
- int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
- uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
- uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
- uint32_t maxsz = vlmax << vsew;
- bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
- (maxsz >= 8);
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
- flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
- flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
- FIELD_EX64(env->vtype, VTYPE, VLMUL));
- flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
- flags = FIELD_DP32(flags, TB_FLAGS, VTA,
- FIELD_EX64(env->vtype, VTYPE, VTA));
- flags = FIELD_DP32(flags, TB_FLAGS, VMA,
- FIELD_EX64(env->vtype, VTYPE, VMA));
- flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
- } else {
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
- }
-
-#ifdef CONFIG_USER_ONLY
- fs = EXT_STATUS_DIRTY;
- vs = EXT_STATUS_DIRTY;
-#else
- flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
-
- flags |= riscv_env_mmu_index(env, 0);
- fs = get_field(env->mstatus, MSTATUS_FS);
- vs = get_field(env->mstatus, MSTATUS_VS);
-
- if (env->virt_enabled) {
- flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
- /*
- * Merge DISABLED and !DIRTY states using MIN.
- * We will set both fields when dirtying.
- */
- fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
- vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
- }
-
- /* With Zfinx, floating point is enabled/disabled by Smstateen. */
- if (!riscv_has_ext(env, RVF)) {
- fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
- ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
- }
-
- if (cpu->cfg.debug && !icount_enabled()) {
- flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
- }
-#endif
-
- flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
- flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
- flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
- flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- if (env->cur_pmmask != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
- }
- if (env->cur_pmbase != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
- }
-
- *pflags = flags;
-}
-
void riscv_cpu_update_mask(CPURISCVState *env)
{
target_ulong mask = 0, base = 0;
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index ab6db817db..934007673e 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -31,6 +31,7 @@
#include "hw/core/accel-cpu.h"
#include "hw/core/tcg-cpu-ops.h"
#include "tcg/tcg.h"
+#include "sysemu/cpu-timers.h"
/* Hash that stores user set extensions */
static GHashTable *multi_ext_user_opts;
@@ -129,10 +130,97 @@ static void riscv_restore_state_to_opc(CPUState *cs,
env->bins = data[1];
}
+static void riscv_get_cpu_state(CPURISCVState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *pflags)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+ RISCVExtStatus fs, vs;
+ uint32_t flags = 0;
+
+ *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
+ *cs_base = 0;
+
+ if (cpu->cfg.ext_zve32f) {
+ /*
+ * If env->vl equals to VLMAX, we can use generic vector operation
+ * expanders (GVEC) to accerlate the vector operations.
+ * However, as LMUL could be a fractional number. The maximum
+ * vector size can be operated might be less than 8 bytes,
+ * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
+ * only when maxsz >= 8 bytes.
+ */
+
+ /* lmul encoded as in DisasContext::lmul */
+ int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
+ uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
+ uint32_t maxsz = vlmax << vsew;
+ bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
+ (maxsz >= 8);
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
+ flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
+ flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+ FIELD_EX64(env->vtype, VTYPE, VLMUL));
+ flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+ flags = FIELD_DP32(flags, TB_FLAGS, VTA,
+ FIELD_EX64(env->vtype, VTYPE, VTA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VMA,
+ FIELD_EX64(env->vtype, VTYPE, VMA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
+ } else {
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ fs = EXT_STATUS_DIRTY;
+ vs = EXT_STATUS_DIRTY;
+#else
+ flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
+
+ flags |= riscv_env_mmu_index(env, 0);
+ fs = get_field(env->mstatus, MSTATUS_FS);
+ vs = get_field(env->mstatus, MSTATUS_VS);
+
+ if (env->virt_enabled) {
+ flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
+ /*
+ * Merge DISABLED and !DIRTY states using MIN.
+ * We will set both fields when dirtying.
+ */
+ fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
+ vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ }
+
+ /* With Zfinx, floating point is enabled/disabled by Smstateen. */
+ if (!riscv_has_ext(env, RVF)) {
+ fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
+ ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
+ }
+
+ if (cpu->cfg.debug && !icount_enabled()) {
+ flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ }
+#endif
+
+ flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
+ flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
+ if (env->cur_pmmask != 0) {
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
+ }
+ if (env->cur_pmbase != 0) {
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
+ }
+
+ *pflags = flags;
+}
+
static const TCGCPUOps riscv_tcg_ops = {
.initialize = riscv_translate_init,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
.restore_state_to_opc = riscv_restore_state_to_opc,
+ .get_cpu_state = riscv_get_cpu_state,
#ifndef CONFIG_USER_ONLY
.tlb_fill = riscv_cpu_tlb_fill,
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
b/target/riscv/insn_trans/trans_rvv.c.inc
index e42728990e..3c16c4852b 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -578,7 +578,7 @@ static bool vext_check_slide(DisasContext *s, int vd, int
vs2,
}
/*
- * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
+ * In riscv_get_cpu_state(), set VILL if RVV was not present.
* So RVV is also be checked in this function.
*/
static bool vext_check_isa_ill(DisasContext *s)