[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [RFC PATCH v2 1/2] hw/riscv: hart: replace array access with qemu_ge
From: |
Alistair Francis |
Subject: |
Re: [RFC PATCH v2 1/2] hw/riscv: hart: replace array access with qemu_get_cpu() |
Date: |
Mon, 18 Sep 2023 11:50:23 +1000 |
On Thu, Sep 14, 2023 at 6:09 PM Nikita Shubin <nikita.shubin@maquefel.me> wrote:
>
> From: Nikita Shubin <n.shubin@yadro.com>
>
> Replace all RISCVHartArrayState->harts[idx] with
> qemu_get_cpu()/cpu_by_arch_id().
Thanks for the patch
Why do we want this change though?
>
> cpu_index is guaranteed to be continuus by cpu_get_free_index(), so they
> can be accessed in same order they were added.
>
> "Hart IDs might not necessarily be numbered contiguously in a
> multiprocessor system, but at least one hart must have
> a hart ID of zero."
>
> This states that hart ID zero should always be present, this makes using
> cpu_by_arch_id(0) safe.
>
> Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
> ---
> hw/riscv/boot.c | 6 ++++--
> hw/riscv/sifive_u.c | 7 +++++--
> hw/riscv/spike.c | 17 ++++++++++-------
> hw/riscv/virt-acpi-build.c | 2 +-
> hw/riscv/virt.c | 17 +++++++++--------
> 5 files changed, 29 insertions(+), 20 deletions(-)
>
> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
> index 52bf8e67de..041f966e58 100644
> --- a/hw/riscv/boot.c
> +++ b/hw/riscv/boot.c
> @@ -36,7 +36,8 @@
>
> bool riscv_is_32bit(RISCVHartArrayState *harts)
> {
> - return harts->harts[0].env.misa_mxl_max == MXL_RV32;
> + RISCVCPU *hart = RISCV_CPU(cpu_by_arch_id(0));
> + return hart->env.misa_mxl_max == MXL_RV32;
> }
>
> /*
> @@ -385,6 +386,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine,
> RISCVHartArrayState *harts
> uint64_t fdt_load_addr)
> {
> int i;
> + RISCVCPU *hart = RISCV_CPU(cpu_by_arch_id(0));
> uint32_t start_addr_hi32 = 0x00000000;
> uint32_t fdt_load_addr_hi32 = 0x00000000;
>
> @@ -414,7 +416,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine,
> RISCVHartArrayState *harts
> reset_vec[4] = 0x0182b283; /* ld t0, 24(t0) */
> }
>
> - if (!harts->harts[0].cfg.ext_icsr) {
> + if (!hart->cfg.ext_icsr) {
> /*
> * The Zicsr extension has been disabled, so let's ensure we don't
> * run the CSR instruction. Let's fill the address with a non
> diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
> index ec76dce6c9..3d09d0ee0e 100644
> --- a/hw/riscv/sifive_u.c
> +++ b/hw/riscv/sifive_u.c
> @@ -168,6 +168,7 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry
> *memmap,
> qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
>
> for (cpu = ms->smp.cpus - 1; cpu >= 0; cpu--) {
> + RISCVCPU *hart;
> int cpu_phandle = phandle++;
> nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller",
> cpu);
> @@ -180,9 +181,11 @@ static void create_fdt(SiFiveUState *s, const
> MemMapEntry *memmap,
> } else {
> qemu_fdt_setprop_string(fdt, nodename, "mmu-type",
> "riscv,sv48");
> }
> - isa = riscv_isa_string(&s->soc.u_cpus.harts[cpu - 1]);
> + hart = RISCV_CPU(qemu_get_cpu(cpu - 1));
> + isa = riscv_isa_string(hart);
This doesn't look right. The existing code accesses the u_cpus/e_cpus.
The new code doesn't do that. You need to change this offset based on
the number of e/u cpus (depending on order).
Alistair
> } else {
> - isa = riscv_isa_string(&s->soc.e_cpus.harts[0]);
> + hart = RISCV_CPU(qemu_get_cpu(0));
> + isa = riscv_isa_string(hart);
> }
> qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
> index 81f7e53aed..f3ec6427a1 100644
> --- a/hw/riscv/spike.c
> +++ b/hw/riscv/spike.c
> @@ -97,29 +97,32 @@ static void create_fdt(SpikeState *s, const MemMapEntry
> *memmap,
> qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
>
> for (socket = (riscv_socket_count(ms) - 1); socket >= 0; socket--) {
> + uint32_t num_harts = s->soc[socket].num_harts;
> + uint32_t hartid_base = s->soc[socket].hartid_base;
> +
> clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
> qemu_fdt_add_subnode(fdt, clust_name);
>
> - clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
> + clint_cells = g_new0(uint32_t, num_harts * 4);
>
> - for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> + for (cpu = num_harts - 1; cpu >= 0; cpu--) {
> + int cpu_index = hartid_base + cpu;
> + RISCVCPU *hart = RISCV_CPU(qemu_get_cpu(cpu_index));
> cpu_phandle = phandle++;
>
> - cpu_name = g_strdup_printf("/cpus/cpu@%d",
> - s->soc[socket].hartid_base + cpu);
> + cpu_name = g_strdup_printf("/cpus/cpu@%d", cpu_index);
> qemu_fdt_add_subnode(fdt, cpu_name);
> if (is_32_bit) {
> qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> "riscv,sv32");
> } else {
> qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> "riscv,sv48");
> }
> - name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> + name = riscv_isa_string(hart);
> qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> g_free(name);
> qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> - qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> - s->soc[socket].hartid_base + cpu);
> + qemu_fdt_setprop_cell(fdt, cpu_name, "reg", cpu_index);
> qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> riscv_socket_fdt_write_id(ms, cpu_name, socket);
> qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle);
> diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
> index 7331248f59..d885220cc9 100644
> --- a/hw/riscv/virt-acpi-build.c
> +++ b/hw/riscv/virt-acpi-build.c
> @@ -158,7 +158,7 @@ static void build_rhct(GArray *table_data,
> isa_offset = table_data->len - table.table_offset;
> build_append_int_noprefix(table_data, 0, 2); /* Type 0 */
>
> - cpu = &s->soc[0].harts[0];
> + cpu = RISCV_CPU(cpu_by_arch_id(0));
> isa = riscv_isa_string(cpu);
> len = 8 + strlen(isa) + 1;
> aligned_len = (len % 2) ? (len + 1) : len;
> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
> index 5edc1d98d2..f3132ecc1b 100644
> --- a/hw/riscv/virt.c
> +++ b/hw/riscv/virt.c
> @@ -239,16 +239,18 @@ static void create_fdt_socket_cpus(RISCVVirtState *s,
> int socket,
> uint32_t cpu_phandle;
> MachineState *ms = MACHINE(s);
> char *name, *cpu_name, *core_name, *intc_name, *sv_name;
> + uint32_t num_harts = s->soc[socket].num_harts;
> + uint32_t hartid_base = s->soc[socket].hartid_base;
> bool is_32_bit = riscv_is_32bit(&s->soc[0]);
> uint8_t satp_mode_max;
>
> - for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> - RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu];
> + for (cpu = num_harts - 1; cpu >= 0; cpu--) {
> + int cpu_index = hartid_base + cpu;
> + RISCVCPU *cpu_ptr = RISCV_CPU(qemu_get_cpu(cpu_index));
>
> cpu_phandle = (*phandle)++;
>
> - cpu_name = g_strdup_printf("/cpus/cpu@%d",
> - s->soc[socket].hartid_base + cpu);
> + cpu_name = g_strdup_printf("/cpus/cpu@%d", cpu_index);
> qemu_fdt_add_subnode(ms->fdt, cpu_name);
>
> if (cpu_ptr->cfg.satp_mode.supported != 0) {
> @@ -275,8 +277,7 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int
> socket,
>
> qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv");
> qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay");
> - qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg",
> - s->soc[socket].hartid_base + cpu);
> + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg", cpu_index);
> qemu_fdt_setprop_string(ms->fdt, cpu_name, "device_type", "cpu");
> riscv_socket_fdt_write_id(ms, cpu_name, socket);
> qemu_fdt_setprop_cell(ms->fdt, cpu_name, "phandle", cpu_phandle);
> @@ -717,12 +718,12 @@ static void create_fdt_pmu(RISCVVirtState *s)
> {
> char *pmu_name;
> MachineState *ms = MACHINE(s);
> - RISCVCPU hart = s->soc[0].harts[0];
> + RISCVCPU *hart = RISCV_CPU(qemu_get_cpu(0));
>
> pmu_name = g_strdup_printf("/pmu");
> qemu_fdt_add_subnode(ms->fdt, pmu_name);
> qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
> - riscv_pmu_generate_fdt_node(ms->fdt, hart.cfg.pmu_num, pmu_name);
> + riscv_pmu_generate_fdt_node(ms->fdt, hart->cfg.pmu_num, pmu_name);
>
> g_free(pmu_name);
> }
> --
> 2.39.2
>
>