qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v8 06/23] target/arm: Add support for Non-maskable Interr


From: Jinjie Ruan
Subject: Re: [RFC PATCH v8 06/23] target/arm: Add support for Non-maskable Interrupt
Date: Thu, 21 Mar 2024 17:26:32 +0800
User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Thunderbird/102.2.0


On 2024/3/20 1:28, Peter Maydell wrote:
> On Mon, 18 Mar 2024 at 09:37, Jinjie Ruan <ruanjinjie@huawei.com> wrote:
>>
>> This only implements the external delivery method via the GICv3.
>>
>> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
>> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>> v8:
>> - Fix the rcu stall after sending a VNMI in qemu VM.
>> v7:
>> - Add Reviewed-by.
>> v6:
>> - env->cp15.hcr_el2 -> arm_hcr_el2_eff().
>> - env->cp15.hcrx_el2 -> arm_hcrx_el2_eff().
>> - Not include VF && VFNMI in CPU_INTERRUPT_VNMI.
>> v4:
>> - Accept NMI unconditionally for arm_cpu_has_work() but add comment.
>> - Change from & to && for EXCP_IRQ or EXCP_FIQ.
>> - Refator nmi mask in arm_excp_unmasked().
>> - Also handle VNMI in arm_cpu_exec_interrupt() and arm_cpu_set_irq().
>> - Rename virtual to Virtual.
>> v3:
>> - Not include CPU_INTERRUPT_NMI when FEAT_NMI not enabled
>> - Add ARM_CPU_VNMI.
>> - Refator nmi mask in arm_excp_unmasked().
>> - Test SCTLR_ELx.NMI for ALLINT mask for NMI.
>> ---
>>  target/arm/cpu-qom.h   |  4 +-
>>  target/arm/cpu.c       | 85 +++++++++++++++++++++++++++++++++++++++---
>>  target/arm/cpu.h       |  4 ++
>>  target/arm/helper.c    |  2 +
>>  target/arm/internals.h |  9 +++++
>>  5 files changed, 97 insertions(+), 7 deletions(-)
>>
>> diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
>> index 8e032691db..e0c9e18036 100644
>> --- a/target/arm/cpu-qom.h
>> +++ b/target/arm/cpu-qom.h
>> @@ -36,11 +36,13 @@ DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
>>  #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
>>  #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
>>
>> -/* Meanings of the ARMCPU object's four inbound GPIO lines */
>> +/* Meanings of the ARMCPU object's six inbound GPIO lines */
>>  #define ARM_CPU_IRQ 0
>>  #define ARM_CPU_FIQ 1
>>  #define ARM_CPU_VIRQ 2
>>  #define ARM_CPU_VFIQ 3
>> +#define ARM_CPU_NMI 4
>> +#define ARM_CPU_VNMI 5
>>
>>  /* For M profile, some registers are banked secure vs non-secure;
>>   * these are represented as a 2-element array where the first element
> 
>> @@ -678,13 +687,31 @@ static inline bool arm_excp_unmasked(CPUState *cs, 
>> unsigned int excp_idx,
>>          return false;
>>      }
>>
>> +    if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) &&
>> +        env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) {
>> +        allIntMask = env->pstate & PSTATE_ALLINT ||
>> +                     ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) &&
>> +                      (env->pstate & PSTATE_SP));
>> +    }
>> +
>>      switch (excp_idx) {
>> +    case EXCP_NMI:
>> +        pstate_unmasked = !allIntMask;
>> +        break;
>> +
>> +    case EXCP_VNMI:
>> +        if ((!(hcr_el2 & HCR_IMO) && !(hcr_el2 & HCR_FMO)) ||
>> +             (hcr_el2 & HCR_TGE)) {
>> +            /* VNMIs(VIRQs or VFIQs) are only taken when hypervized.  */
>> +            return false;
>> +        }
> 
> VINMI and VFNMI aren't the same thing: do we definitely want to
> merge them into one EXCP_VNMI ? It feels like it would be simpler
> to keep them separate. Similarly CPU_INTERRUPT_VNMI, and
> arm_cpu_update_vnmi() probably want VINMI and VFNMI versions.

It's not like that. The VFNMI cannot be reported from the GIC, there
will be no opportunity to call arm_cpu_update_vfnmi().
> 
>> +        return !allIntMask;
>>      case EXCP_FIQ:
>> -        pstate_unmasked = !(env->daif & PSTATE_F);
>> +        pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask);
>>          break;
>>
>>      case EXCP_IRQ:
>> -        pstate_unmasked = !(env->daif & PSTATE_I);
>> +        pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask);
>>          break;
>>
>>      case EXCP_VFIQ:
>> @@ -692,13 +719,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, 
>> unsigned int excp_idx,
>>              /* VFIQs are only taken when hypervized.  */
>>              return false;
>>          }
>> -        return !(env->daif & PSTATE_F);
>> +        return !(env->daif & PSTATE_F) && (!allIntMask);
>>      case EXCP_VIRQ:
>>          if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
>>              /* VIRQs are only taken when hypervized.  */
>>              return false;
>>          }
>> -        return !(env->daif & PSTATE_I);
>> +        return !(env->daif & PSTATE_I) && (!allIntMask);
>>      case EXCP_VSERR:
>>          if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
>>              /* VIRQs are only taken when hypervized.  */
>> @@ -804,6 +831,24 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int 
>> interrupt_request)
>>
>>      /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
>>
>> +    if (cpu_isar_feature(aa64_nmi, env_archcpu(env))) {
>> +        if (interrupt_request & CPU_INTERRUPT_NMI) {
>> +            excp_idx = EXCP_NMI;
>> +            target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, 
>> secure);
>> +            if (arm_excp_unmasked(cs, excp_idx, target_el,
>> +                                  cur_el, secure, hcr_el2)) {
>> +                goto found;
>> +            }
>> +        }
>> +        if (interrupt_request & CPU_INTERRUPT_VNMI) {
>> +            excp_idx = EXCP_VNMI;
>> +            target_el = 1;
>> +            if (arm_excp_unmasked(cs, excp_idx, target_el,
>> +                                  cur_el, secure, hcr_el2)) {
>> +                goto found;
>> +            }
>> +        }
>> +    }
>>      if (interrupt_request & CPU_INTERRUPT_FIQ) {
>>          excp_idx = EXCP_FIQ;
>>          target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
>> @@ -900,6 +945,28 @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
>>      }
>>  }
>>
>> +void arm_cpu_update_vnmi(ARMCPU *cpu)
>> +{
>> +    /*
>> +     * Update the interrupt level for VNMI, which is the logical OR of
>> +     * the HCRX_EL2.VINMI bit and the input line level from the GIC.
>> +     */
>> +    CPUARMState *env = &cpu->env;
>> +    CPUState *cs = CPU(cpu);
>> +
>> +    bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) &&
>> +                      (arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
>> +        (env->irq_line_state & CPU_INTERRUPT_VNMI);
>> +
>> +    if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VNMI) != 0)) {
>> +        if (new_state) {
>> +            cpu_interrupt(cs, CPU_INTERRUPT_VNMI);
>> +        } else {
>> +            cpu_reset_interrupt(cs, CPU_INTERRUPT_VNMI);
>> +        }
>> +    }
>> +}
> 
> I think Richard noted on a previous version of the series that
> the existing arm_cpu_update_virq() and arm_cpu_update_vfiq()
> also need changing so they don't set CPU_INTERRUPT_VIRQ
> or CPU_INTERRUPT_VFIQ if the HCRX_EL2 bits indicate that
> we should be signalling a VINMI or VFNMI instead.
> That also means that VIRQ and VFIQ will change values based
> on changes in HCRX_EL2, which means that hcrx_write() needs
> to have calls to arm_cpu_update_{virq,vfiq,vnmi} the way
> that do_hcr_write() already does.
> 
> The use of the _eff() versions of the functions here is
> correct but it introduces a new case where we need to
> reevaluate the status of the VNMI etc interrupt status:
> when we change from Secure to NonSecure or when we change
> SCR_EL3.EEL2 or SCR_EL3.HXEN. We either need to make sure
> we reevaluate when we drop from EL3 to EL2 (which would be
> OK since VINMI and VFNMI can't be taken at EL3 and none of
> these bits can change except at EL3) or else make the calls
> to reevaluate them when we write to SCR_EL3. At least, I don't
> think we currently reevaluate these bits on an EL change.
> 
>> +
>>  void arm_cpu_update_vserr(ARMCPU *cpu)
>>  {
>>      /*
>> @@ -929,7 +996,9 @@ static void arm_cpu_set_irq(void *opaque, int irq, int 
>> level)
>>          [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
>>          [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
>>          [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
>> -        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
>> +        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
>> +        [ARM_CPU_NMI] = CPU_INTERRUPT_NMI,
>> +        [ARM_CPU_VNMI] = CPU_INTERRUPT_VNMI
>>      };
>>
>>      if (!arm_feature(env, ARM_FEATURE_EL2) &&
>> @@ -955,8 +1024,12 @@ static void arm_cpu_set_irq(void *opaque, int irq, int 
>> level)
>>      case ARM_CPU_VFIQ:
>>          arm_cpu_update_vfiq(cpu);
>>          break;
>> +    case ARM_CPU_VNMI:
>> +        arm_cpu_update_vnmi(cpu);
>> +        break;
>>      case ARM_CPU_IRQ:
>>      case ARM_CPU_FIQ:
>> +    case ARM_CPU_NMI:
>>          if (level) {
>>              cpu_interrupt(cs, mask[irq]);
>>          } else {
>> @@ -1355,7 +1428,7 @@ static void arm_cpu_initfn(Object *obj)
>>           */
>>          qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
>>      } else {
>> -        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
>> +        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 6);
> 
> You should also update the value passed when we init the
> GPIOs in the arm_cpu_kvm_set_irq case, and update the comment
> that explains why, which currently reads:
> 
>         /* VIRQ and VFIQ are unused with KVM but we add them to maintain
>          * the same interface as non-KVM CPUs.
>          */
> 
> so it mentions also the NMI and VNMI inputs.
> 
>>      }
>>
>>      qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
>> index de740d223f..629221e1a9 100644
>> --- a/target/arm/cpu.h
>> +++ b/target/arm/cpu.h
>> @@ -61,6 +61,8 @@
>>  #define EXCP_DIVBYZERO      23   /* v7M DIVBYZERO UsageFault */
>>  #define EXCP_VSERR          24
>>  #define EXCP_GPC            25   /* v9 Granule Protection Check Fault */
>> +#define EXCP_NMI            26
>> +#define EXCP_VNMI           27
>>  /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
>>
>>  #define ARMV7M_EXCP_RESET   1
>> @@ -80,6 +82,8 @@
>>  #define CPU_INTERRUPT_VIRQ  CPU_INTERRUPT_TGT_EXT_2
>>  #define CPU_INTERRUPT_VFIQ  CPU_INTERRUPT_TGT_EXT_3
>>  #define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
>> +#define CPU_INTERRUPT_NMI   CPU_INTERRUPT_TGT_EXT_4
>> +#define CPU_INTERRUPT_VNMI  CPU_INTERRUPT_TGT_EXT_0
> 
>>  /* The usual mapping for an AArch64 system register to its AArch32
>>   * counterpart is for the 32 bit world to have access to the lower
>> diff --git a/target/arm/helper.c b/target/arm/helper.c
>> index aa0151c775..875a7fa8da 100644
>> --- a/target/arm/helper.c
>> +++ b/target/arm/helper.c
>> @@ -10793,6 +10793,8 @@ void arm_log_exception(CPUState *cs)
>>              [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
>>              [EXCP_VSERR] = "Virtual SERR",
>>              [EXCP_GPC] = "Granule Protection Check",
>> +            [EXCP_NMI] = "NMI",
>> +            [EXCP_VNMI] = "Virtual NMI"
>>          };
>>
>>          if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
>> diff --git a/target/arm/internals.h b/target/arm/internals.h
>> index 516e0584bf..cb217a9ce7 100644
>> --- a/target/arm/internals.h
>> +++ b/target/arm/internals.h
>> @@ -1109,6 +1109,15 @@ void arm_cpu_update_virq(ARMCPU *cpu);
>>   */
>>  void arm_cpu_update_vfiq(ARMCPU *cpu);
>>
>> +/**
>> + * arm_cpu_update_vnmi: Update CPU_INTERRUPT_VNMI bit in 
>> cs->interrupt_request
>> + *
>> + * Update the CPU_INTERRUPT_VNMI bit in cs->interrupt_request, following
>> + * a change to either the input VNMI line from the GIC or the 
>> HCRX_EL2.VINMI.
>> + * Must be called with the BQL held.
>> + */
>> +void arm_cpu_update_vnmi(ARMCPU *cpu);
> 
> thanks
> -- PMM



reply via email to

[Prev in Thread] Current Thread [Next in Thread]