[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 1/3] target/arm: Support SError injection
From: |
Gavin Shan |
Subject: |
[PATCH v4 1/3] target/arm: Support SError injection |
Date: |
Tue, 18 Feb 2020 13:04:14 +1100 |
This supports SError injection, which will be used by "virt" board to
simulating the behavior of NMI injection in next patch. As Peter Maydell
suggested, this adds a new interrupt (ARM_CPU_SERROR), which is parallel
to CPU_INTERRUPT_HARD. The backend depends on if kvm is enabled or not.
kvm_vcpu_ioctl(cpu, KVM_SET_VCPU_EVENTS) is leveraged to inject SError
or data abort to guest. When TCG is enabled, the behavior is simulated
by injecting SError and data abort to guest.
Signed-off-by: Gavin Shan <address@hidden>
---
target/arm/cpu.c | 69 +++++++++++++++++++++++++++++++++++--------
target/arm/cpu.h | 20 ++++++++-----
target/arm/helper.c | 12 ++++++++
target/arm/m_helper.c | 8 +++++
target/arm/machine.c | 3 +-
5 files changed, 91 insertions(+), 21 deletions(-)
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index de733aceeb..e5750080bc 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -78,7 +78,7 @@ static bool arm_cpu_has_work(CPUState *cs)
&& cs->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
- | CPU_INTERRUPT_EXITTB);
+ | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
}
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
@@ -449,6 +449,9 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned
int excp_idx,
return false;
}
return !(env->daif & PSTATE_I);
+ case EXCP_SERROR:
+ pstate_unmasked = !(env->daif & PSTATE_A);
+ break;
default:
g_assert_not_reached();
}
@@ -538,6 +541,15 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
interrupt_request)
/* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
+ if (interrupt_request & CPU_INTERRUPT_SERROR) {
+ excp_idx = EXCP_SERROR;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+
if (interrupt_request & CPU_INTERRUPT_FIQ) {
excp_idx = EXCP_FIQ;
target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -570,6 +582,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int
interrupt_request)
goto found;
}
}
+
return false;
found:
@@ -585,7 +598,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int
interrupt_request)
CPUClass *cc = CPU_GET_CLASS(cs);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- bool ret = false;
+ uint32_t excp_idx;
/* ARMv7-M interrupt masking works differently than -A or -R.
* There is no FIQ/IRQ distinction. Instead of I and F bits
@@ -594,13 +607,26 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int
interrupt_request)
* (which depends on state like BASEPRI, FAULTMASK and the
* currently active exception).
*/
- if (interrupt_request & CPU_INTERRUPT_HARD
- && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
- cs->exception_index = EXCP_IRQ;
- cc->do_interrupt(cs);
- ret = true;
+ if (!armv7m_nvic_can_take_pending_exception(env->nvic)) {
+ return false;
+ }
+
+ if (interrupt_request & CPU_INTERRUPT_SERROR) {
+ excp_idx = EXCP_SERROR;
+ goto found;
+ }
+
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ excp_idx = EXCP_IRQ;
+ goto found;
}
- return ret;
+
+ return false;
+
+found:
+ cs->exception_index = excp_idx;
+ cc->do_interrupt(cs);
+ return true;
}
#endif
@@ -656,7 +682,8 @@ static void arm_cpu_set_irq(void *opaque, int irq, int
level)
[ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
[ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
[ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
- [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
+ [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
};
if (level) {
@@ -676,6 +703,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int
level)
break;
case ARM_CPU_IRQ:
case ARM_CPU_FIQ:
+ case ARM_CPU_SERROR:
if (level) {
cpu_interrupt(cs, mask[irq]);
} else {
@@ -693,8 +721,10 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int
level)
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
CPUState *cs = CPU(cpu);
+ struct kvm_vcpu_events events;
uint32_t linestate_bit;
int irq_id;
+ bool inject_irq = true;
switch (irq) {
case ARM_CPU_IRQ:
@@ -705,6 +735,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int
level)
irq_id = KVM_ARM_IRQ_CPU_FIQ;
linestate_bit = CPU_INTERRUPT_FIQ;
break;
+ case ARM_CPU_SERROR:
+ if (!kvm_has_vcpu_events()) {
+ return;
+ }
+
+ inject_irq = false;
+ linestate_bit = CPU_INTERRUPT_SERROR;
+ break;
default:
g_assert_not_reached();
}
@@ -714,7 +752,14 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int
level)
} else {
env->irq_line_state &= ~linestate_bit;
}
- kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+
+ if (inject_irq) {
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+ } else if (level) {
+ memset(&events, 0, sizeof(events));
+ events.exception.serror_pending = 1;
+ kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events);
+ }
#endif
}
@@ -1064,9 +1109,9 @@ static void arm_cpu_initfn(Object *obj)
/* VIRQ and VFIQ are unused with KVM but we add them to maintain
* the same interface as non-KVM CPUs.
*/
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, ARM_CPU_NUM_IRQ);
} else {
- qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, ARM_CPU_NUM_IRQ);
}
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e943ffe8a9..23e9f7ee2d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -49,6 +49,7 @@
#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
+#define EXCP_SERROR 23 /* SError Interrupt */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -79,9 +80,10 @@ enum {
};
/* ARM-specific interrupt pending bits. */
-#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
-#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
-#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_SERROR CPU_INTERRUPT_TGT_EXT_4
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
@@ -97,11 +99,13 @@ enum {
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* Meanings of the ARMCPU object's four inbound GPIO lines */
-#define ARM_CPU_IRQ 0
-#define ARM_CPU_FIQ 1
-#define ARM_CPU_VIRQ 2
-#define ARM_CPU_VFIQ 3
+/* ARMCPU object's inbound GPIO lines */
+#define ARM_CPU_IRQ 0
+#define ARM_CPU_FIQ 1
+#define ARM_CPU_VIRQ 2
+#define ARM_CPU_VFIQ 3
+#define ARM_CPU_SERROR 4
+#define ARM_CPU_NUM_IRQ 5
/* ARM-specific extra insn start words:
* 1: Conditional execution bits
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 366dbcf460..3f00af4c41 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1969,6 +1969,12 @@ static uint64_t isr_read(CPUARMState *env, const
ARMCPRegInfo *ri)
}
}
+ if (!allow_virt || !(hcr_el2 & HCR_AMO)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
+ ret |= CPSR_A;
+ }
+ }
+
/* External aborts are not possible in QEMU so A bit is always clear */
return ret;
}
@@ -8598,6 +8604,7 @@ void arm_log_exception(int idx)
[EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
[EXCP_LSERR] = "v8M LSERR UsageFault",
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+ [EXCP_SERROR] = "SError Interrupt",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -8923,6 +8930,7 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
addr = 0x0c;
break;
case EXCP_DATA_ABORT:
+ case EXCP_SERROR:
env->cp15.dfar_s = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
(uint32_t)env->exception.vaddress);
@@ -9051,6 +9059,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
offset = 4;
break;
case EXCP_DATA_ABORT:
+ case EXCP_SERROR:
A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
@@ -9213,6 +9222,9 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
case EXCP_VFIQ:
addr += 0x100;
break;
+ case EXCP_SERROR:
+ addr += 0x180;
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
}
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 33d414a684..a7271cc386 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -2211,6 +2211,14 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
* v7m_preserve_fp_state() helper function.
*/
break;
+ case EXCP_SERROR:
+ env->v7m.cfsr[M_REG_NS] |=
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.bfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
+ env->v7m.bfar);
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
diff --git a/target/arm/machine.c b/target/arm/machine.c
index 241890ac8c..e2ad2f156e 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -714,7 +714,8 @@ static int cpu_post_load(void *opaque, int version_id)
env->irq_line_state = cs->interrupt_request &
(CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
- CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
+ CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ |
+ CPU_INTERRUPT_SERROR);
}
/* Update the values list from the incoming migration data.
--
2.23.0