qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-ppc] [RFC PATCH v2 23/23] spapr: Memory hotplug support


From: David Gibson
Subject: Re: [Qemu-ppc] [RFC PATCH v2 23/23] spapr: Memory hotplug support
Date: Thu, 26 Mar 2015 14:57:45 +1100
User-agent: Mutt/1.5.23 (2014-03-12)

On Mon, Mar 23, 2015 at 07:06:04PM +0530, Bharata B Rao wrote:
> Make use of pc-dimm infrastructure to support memory hotplug
> for PowerPC.
> 
> Modelled on i386 memory hotplug.
> 
> Signed-off-by: Bharata B Rao <address@hidden>
> ---
>  hw/ppc/spapr.c        | 119 
> +++++++++++++++++++++++++++++++++++++++++++++++++-
>  hw/ppc/spapr_events.c |   3 ++
>  2 files changed, 120 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 4e844ab..bc46acd 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -61,7 +61,8 @@
>  #include "hw/nmi.h"
>  
>  #include "hw/compat.h"
> -
> +#include "hw/mem/pc-dimm.h"
> +#include "qapi/qmp/qerror.h"
>  #include <libfdt.h>
>  
>  /* SLOF memory layout:
> @@ -902,6 +903,10 @@ int spapr_h_cas_compose_response(target_ulong addr, 
> target_ulong size,
>          _FDT((spapr_populate_memory(spapr, fdt)));
>      }
>  
> +    if (spapr->dr_lmb_enabled) {
> +        _FDT(spapr_drc_populate_dt(fdt, 0, NULL, 
> SPAPR_DR_CONNECTOR_TYPE_LMB));
> +    }
> +
>      /* Pack resulting tree */
>      _FDT((fdt_pack(fdt)));
>  
> @@ -2185,6 +2190,109 @@ static void spapr_cpu_socket_unplug(HotplugHandler 
> *hotplug_dev,
>      object_child_foreach(OBJECT(dev), spapr_cpu_core_unplug, errp);
>  }
>  
> +static void spapr_add_lmbs(uint64_t addr, uint64_t size, Error **errp)
> +{
> +    sPAPRDRConnector *drc;
> +    uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
> +    int i;
> +
> +    if (size % SPAPR_MEMORY_BLOCK_SIZE) {
> +        error_setg(errp, "Hotplugged memory size must be a multiple of "
> +                      "%d MB", SPAPR_MEMORY_BLOCK_SIZE/(1024 * 1024));

s/MB/MiB/  there seems to be precedent for using the mebibyte term in
qemu.

Also you can use the MiB #define instead of (1024 * 1024).

> +        return;
> +    }
> +
> +    for (i = 0; i < nr_lmbs; i++) {
> +        drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
> +                addr/SPAPR_MEMORY_BLOCK_SIZE);
> +        g_assert(drc);
> +        spapr_hotplug_req_add_event(drc);
> +        addr += SPAPR_MEMORY_BLOCK_SIZE;
> +    }
> +}
> +
> +static void spapr_memory_plug(HotplugHandler *hotplug_dev,
> +                         DeviceState *dev, Error **errp)
> +{
> +    int slot;
> +    Error *local_err = NULL;
> +    sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
> +    MachineState *machine = MACHINE(hotplug_dev);
> +    PCDIMMDevice *dimm = PC_DIMM(dev);
> +    PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
> +    MemoryRegion *mr = ddc->get_memory_region(dimm);
> +    uint64_t existing_dimms_capacity = 0;
> +    uint64_t align = TARGET_PAGE_SIZE;
> +    uint64_t addr;
> +
> +    addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, 
> &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +
> +    if (memory_region_get_alignment(mr) && ms->enforce_aligned_dimm) {
> +        align = memory_region_get_alignment(mr);
> +    }
> +
> +    addr = pc_dimm_get_free_addr(ms->hotplug_memory_base,
> +                                 memory_region_size(&ms->hotplug_memory),
> +                                 !addr ? NULL : &addr, align,
> +                                 memory_region_size(mr), &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +
> +    existing_dimms_capacity = pc_existing_dimms_capacity(&local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +
> +    if (existing_dimms_capacity + memory_region_size(mr) >
> +        machine->maxram_size - machine->ram_size) {
> +        error_setg(&local_err, "not enough space, currently 0x%" PRIx64
> +                   " in use of total hot pluggable 0x" RAM_ADDR_FMT,
> +                   existing_dimms_capacity,
> +                   machine->maxram_size - machine->ram_size);
> +        goto out;
> +    }
> +
> +    object_property_set_int(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, 
> &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +    trace_mhp_pc_dimm_assigned_address(addr);
> +
> +    slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, 
> &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +
> +    slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : 
> &slot,
> +                                 machine->ram_slots, &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +    object_property_set_int(OBJECT(dev), slot, PC_DIMM_SLOT_PROP, 
> &local_err);
> +    if (local_err) {
> +        goto out;
> +    }
> +    trace_mhp_pc_dimm_assigned_slot(slot);
> +
> +    if (kvm_enabled() && !kvm_has_free_slot(machine)) {
> +        error_setg(&local_err, "hypervisor has no free memory slots left");
> +        goto out;
> +    }
> +
> +    memory_region_add_subregion(&ms->hotplug_memory,
> +                                addr - ms->hotplug_memory_base, mr);
> +    vmstate_register_ram(mr, dev);
> +
> +    spapr_add_lmbs(addr, memory_region_size(mr), &local_err);

It really looks as if it should be possible to make a common function
covering most of this and pc_dimm_plug, with the only difference being
the final call to do the arch specific stuff.  Even that might be able
to just be a hotplug handler call if you add a parameter for the
apprpirate hotplughandler object, instead of assuming the acpi device
on the PC side.

> +out:
> +    error_propagate(errp, local_err);
> +}
> +
>  static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
>                                        DeviceState *dev, Error **errp)
>  {
> @@ -2197,6 +2305,9 @@ static void spapr_machine_device_plug(HotplugHandler 
> *hotplug_dev,
>          if (dev->hotplugged && spapr->dr_cpu_enabled) {
>              spapr_cpu_plug(hotplug_dev, dev, errp);
>          }
> +    } else if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) &&
> +                spapr->dr_lmb_enabled) {
> +        spapr_memory_plug(hotplug_dev, dev, errp);
>      }
>  }
>  
> @@ -2207,6 +2318,9 @@ static void spapr_machine_device_unplug(HotplugHandler 
> *hotplug_dev,
>          if (dev->hotplugged && spapr->dr_cpu_enabled) {
>              spapr_cpu_socket_unplug(hotplug_dev, dev, errp);
>          }
> +    } else if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) &&
> +                spapr->dr_lmb_enabled) {
> +        error_setg(errp, "Memory hot unplug not supported by sPAPR");
>      }
>  }
>  
> @@ -2214,7 +2328,8 @@ static HotplugHandler 
> *spapr_get_hotpug_handler(MachineState *machine,
>                                               DeviceState *dev)
>  {
>      if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) ||
> -        object_dynamic_cast(OBJECT(dev), TYPE_CPU_SOCKET)) {
> +        object_dynamic_cast(OBJECT(dev), TYPE_CPU_SOCKET) ||
> +        object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
>          return HOTPLUG_HANDLER(machine);
>      }
>      return NULL;
> diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
> index 4ae818a..e2a22d2 100644
> --- a/hw/ppc/spapr_events.c
> +++ b/hw/ppc/spapr_events.c
> @@ -431,6 +431,9 @@ static void spapr_hotplug_req_event(sPAPRDRConnector 
> *drc, uint8_t hp_action)
>      case SPAPR_DR_CONNECTOR_TYPE_CPU:
>          hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
>          break;
> +    case SPAPR_DR_CONNECTOR_TYPE_LMB:
> +        hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
> +        break;
>      default:
>          /* we shouldn't be signaling hotplug events for resources
>           * that don't support them
> -- 
> 2.1.0
> 

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: pgpimb7w5btYF.pgp
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]