qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-riscv] [Qemu-devel] [PATCH 16/26] target/riscv: Convert to CPU


From: Alistair Francis
Subject: Re: [Qemu-riscv] [Qemu-devel] [PATCH 16/26] target/riscv: Convert to CPUClass::tlb_fill
Date: Wed, 3 Apr 2019 16:02:44 -0700

On Tue, Apr 2, 2019 at 8:57 PM Richard Henderson
<address@hidden> wrote:
>
> Note that env->pc is removed from the qemu_log as that value is garbage.
> The PC isn't recovered until cpu_restore_state, called from
> cpu_loop_exit_restore, called from riscv_raise_exception.
>
> Cc: address@hidden
> Cc: Palmer Dabbelt <address@hidden>
> Cc: Alistair Francis <address@hidden>
> Signed-off-by: Richard Henderson <address@hidden>

This is going to conflict with some two-stage MMU work I have, but it
shouldn't be too bad.

Reviewed-by: Alistair Francis <address@hidden>

Alistair

> ---
>  target/riscv/cpu.h        |  5 +++--
>  target/riscv/cpu.c        |  5 ++---
>  target/riscv/cpu_helper.c | 46 ++++++++++++++++++---------------------
>  3 files changed, 26 insertions(+), 30 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 20bce8742e..40c1254408 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -261,8 +261,9 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr 
> addr);
>  void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
>                                      MMUAccessType access_type, int mmu_idx,
>                                      uintptr_t retaddr);
> -int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
> -                              int rw, int mmu_idx);
> +bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> +                        MMUAccessType access_type, int mmu_idx,
> +                        bool probe, uintptr_t retaddr);
>  char *riscv_isa_string(RISCVCPU *cpu);
>  void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index d61bce6d55..e9f569c665 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -355,9 +355,8 @@ static void riscv_cpu_class_init(ObjectClass *c, void 
> *data)
>  #endif
>      cc->gdb_stop_before_watchpoint = true;
>      cc->disas_set_info = riscv_cpu_disas_set_info;
> -#ifdef CONFIG_USER_ONLY
> -    cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
> -#else
> +    cc->tlb_fill = riscv_cpu_tlb_fill;
> +#ifndef CONFIG_USER_ONLY
>      cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
>      cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
>  #endif
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index b17f169681..2535435260 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -379,53 +379,49 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr 
> addr,
>      riscv_raise_exception(env, cs->exception_index, retaddr);
>  }
>
> -/* called by qemu's softmmu to fill the qemu tlb */
>  void tlb_fill(CPUState *cs, target_ulong addr, int size,
>          MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
>  {
> -    int ret;
> -    ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
> -    if (ret == TRANSLATE_FAIL) {
> -        RISCVCPU *cpu = RISCV_CPU(cs);
> -        CPURISCVState *env = &cpu->env;
> -        riscv_raise_exception(env, cs->exception_index, retaddr);
> -    }
> +    riscv_cpu_tlb_fill(cs, addr, size, access_type, mmu_idx, false, retaddr);
>  }
> -
>  #endif
>
> -int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
> -        int rw, int mmu_idx)
> +bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> +                        MMUAccessType access_type, int mmu_idx,
> +                        bool probe, uintptr_t retaddr)
>  {
> +#ifndef CONFIG_USER_ONLY
>      RISCVCPU *cpu = RISCV_CPU(cs);
>      CPURISCVState *env = &cpu->env;
> -#if !defined(CONFIG_USER_ONLY)
>      hwaddr pa = 0;
>      int prot;
> -#endif
>      int ret = TRANSLATE_FAIL;
>
> -    qemu_log_mask(CPU_LOG_MMU,
> -            "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
> -             %d\n", __func__, env->pc, address, rw, mmu_idx);
> +    qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
> +                  __func__, address, access_type, mmu_idx);
> +
> +    ret = get_physical_address(env, &pa, &prot, address, access_type, 
> mmu_idx);
>
> -#if !defined(CONFIG_USER_ONLY)
> -    ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
>      qemu_log_mask(CPU_LOG_MMU,
> -            "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
> -             " prot %d\n", __func__, address, ret, pa, prot);
> +                  "%s address=%" VADDR_PRIx " ret %d physical " 
> TARGET_FMT_plx
> +                  " prot %d\n", __func__, address, ret, pa, prot);
> +
>      if (riscv_feature(env, RISCV_FEATURE_PMP) &&
> -        !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
> +        !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) {
>          ret = TRANSLATE_FAIL;
>      }
>      if (ret == TRANSLATE_SUCCESS) {
>          tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
>                       prot, mmu_idx, TARGET_PAGE_SIZE);
> -    } else if (ret == TRANSLATE_FAIL) {
> -        raise_mmu_exception(env, address, rw);
> +        return true;
> +    } else if (probe) {
> +        return false;
> +    } else {
> +        raise_mmu_exception(env, address, access_type);
> +        riscv_raise_exception(env, cs->exception_index, retaddr);
>      }
>  #else
> -    switch (rw) {
> +    switch (access_type) {
>      case MMU_INST_FETCH:
>          cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
>          break;
> @@ -436,8 +432,8 @@ int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr 
> address, int size,
>          cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
>          break;
>      }
> +    cpu_loop_exit_restore(cs, retaddr);
>  #endif
> -    return ret;
>  }
>
>  /*
> --
> 2.17.1
>
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]