qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-riscv] [Qemu-devel] [PATCH v5 05/30] riscv: hw: Change to use


From: Alistair Francis
Subject: Re: [Qemu-riscv] [Qemu-devel] [PATCH v5 05/30] riscv: hw: Change to use qemu_log_mask(LOG_GUEST_ERROR, ...) instead
Date: Fri, 23 Aug 2019 10:38:57 -0700

On Thu, Aug 22, 2019 at 10:21 PM Bin Meng <address@hidden> wrote:
>
> Replace the call to hw_error() with qemu_log_mask(LOG_GUEST_ERROR,...)
> in various sifive models.
>
> Signed-off-by: Bin Meng <address@hidden>

Thanks for this cleanup!

Reviewed-by: Alistair Francis <address@hidden>

Alistair

>
> ---
>
> Changes in v5:
> - new patch to change to use qemu_log_mask(LOG_GUEST_ERROR,...) instead
>   in various sifive models
>
> Changes in v4: None
> Changes in v3: None
> Changes in v2: None
>
>  hw/riscv/sifive_prci.c | 8 +++++---
>  hw/riscv/sifive_test.c | 5 +++--
>  hw/riscv/sifive_uart.c | 9 +++++----
>  3 files changed, 13 insertions(+), 9 deletions(-)
>
> diff --git a/hw/riscv/sifive_prci.c b/hw/riscv/sifive_prci.c
> index f406682..1ab98d4 100644
> --- a/hw/riscv/sifive_prci.c
> +++ b/hw/riscv/sifive_prci.c
> @@ -20,6 +20,7 @@
>
>  #include "qemu/osdep.h"
>  #include "hw/sysbus.h"
> +#include "qemu/log.h"
>  #include "qemu/module.h"
>  #include "target/riscv/cpu.h"
>  #include "hw/riscv/sifive_prci.h"
> @@ -37,7 +38,8 @@ static uint64_t sifive_prci_read(void *opaque, hwaddr addr, 
> unsigned int size)
>      case SIFIVE_PRCI_PLLOUTDIV:
>          return s->plloutdiv;
>      }
> -    hw_error("%s: read: addr=0x%x\n", __func__, (int)addr);
> +    qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%x\n",
> +                  __func__, (int)addr);
>      return 0;
>  }
>
> @@ -65,8 +67,8 @@ static void sifive_prci_write(void *opaque, hwaddr addr,
>          s->plloutdiv = (uint32_t) val64;
>          break;
>      default:
> -        hw_error("%s: bad write: addr=0x%x v=0x%x\n",
> -                 __func__, (int)addr, (int)val64);
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x v=0x%x\n",
> +                      __func__, (int)addr, (int)val64);
>      }
>  }
>
> diff --git a/hw/riscv/sifive_test.c b/hw/riscv/sifive_test.c
> index cd86831..655a3d7 100644
> --- a/hw/riscv/sifive_test.c
> +++ b/hw/riscv/sifive_test.c
> @@ -20,6 +20,7 @@
>
>  #include "qemu/osdep.h"
>  #include "hw/sysbus.h"
> +#include "qemu/log.h"
>  #include "qemu/module.h"
>  #include "sysemu/sysemu.h"
>  #include "target/riscv/cpu.h"
> @@ -48,8 +49,8 @@ static void sifive_test_write(void *opaque, hwaddr addr,
>              break;
>          }
>      }
> -    hw_error("%s: write: addr=0x%x val=0x%016" PRIx64 "\n",
> -        __func__, (int)addr, val64);
> +    qemu_log_mask(LOG_GUEST_ERROR, "%s: write: addr=0x%x val=0x%016" PRIx64 
> "\n",
> +                  __func__, (int)addr, val64);
>  }
>
>  static const MemoryRegionOps sifive_test_ops = {
> diff --git a/hw/riscv/sifive_uart.c b/hw/riscv/sifive_uart.c
> index 3b3f94f..cd74043 100644
> --- a/hw/riscv/sifive_uart.c
> +++ b/hw/riscv/sifive_uart.c
> @@ -18,6 +18,7 @@
>
>  #include "qemu/osdep.h"
>  #include "qapi/error.h"
> +#include "qemu/log.h"
>  #include "hw/sysbus.h"
>  #include "chardev/char.h"
>  #include "chardev/char-fe.h"
> @@ -93,8 +94,8 @@ uart_read(void *opaque, hwaddr addr, unsigned int size)
>          return s->div;
>      }
>
> -    hw_error("%s: bad read: addr=0x%x\n",
> -        __func__, (int)addr);
> +    qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read: addr=0x%x\n",
> +                  __func__, (int)addr);
>      return 0;
>  }
>
> @@ -125,8 +126,8 @@ uart_write(void *opaque, hwaddr addr,
>          s->div = val64;
>          return;
>      }
> -    hw_error("%s: bad write: addr=0x%x v=0x%x\n",
> -        __func__, (int)addr, (int)value);
> +    qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x v=0x%x\n",
> +                  __func__, (int)addr, (int)value);
>  }
>
>  static const MemoryRegionOps uart_ops = {
> --
> 2.7.4
>
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]