[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 66/66] target/arm: Implement FEAT_HAFDBS
From: |
Richard Henderson |
Subject: |
[PATCH v2 66/66] target/arm: Implement FEAT_HAFDBS |
Date: |
Mon, 22 Aug 2022 08:27:41 -0700 |
Perform the atomic update for hardware management of the access flag
and the dirty bit.
A limitation of the implementation so far is that the page table
itself must already be writable, i.e. the dirty bit for the stage2
page table must already be set, i.e. we cannot set both dirty bits
at the same time.
This is allowed because it is CONSTRAINED UNPREDICTABLE whether any
atomic update happens at all. The implementation is allowed to simply
fall back on software update at any time.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
docs/system/arm/emulation.rst | 1 +
target/arm/cpu64.c | 1 +
target/arm/ptw.c | 115 ++++++++++++++++++++++++++++++++--
3 files changed, 113 insertions(+), 4 deletions(-)
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 8e494c8bea..3eee95c39b 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -30,6 +30,7 @@ the following architecture extensions:
- FEAT_FRINTTS (Floating-point to integer instructions)
- FEAT_FlagM (Flag manipulation instructions v2)
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
+- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state)
- FEAT_HCX (Support for the HCRX_EL2 register)
- FEAT_HPDS (Hierarchical permission disables)
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 78e27f778a..98771918c2 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -1037,6 +1037,7 @@ static void aarch64_max_initfn(Object *obj)
cpu->isar.id_aa64mmfr0 = t;
t = cpu->isar.id_aa64mmfr1;
+ t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index c38c7d2a65..c81c51f60c 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -193,6 +193,7 @@ static bool regime_translation_disabled(CPUARMState *env,
ARMMMUIdx mmu_idx,
typedef struct {
bool is_secure;
bool be;
+ bool rw;
void *hphys;
hwaddr gphys;
} S1TranslateResult;
@@ -221,6 +222,8 @@ static bool S1_ptw_translate(CPUARMState *env, ARMMMUIdx
mmu_idx,
return false;
}
+ res->rw = full->prot & PAGE_WRITE;
+
if (s2_mmu_idx == ARMMMUIdx_Stage2 || s2_mmu_idx == ARMMMUIdx_Stage2_S) {
uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
uint8_t s2attrs = full->pte_attrs;
@@ -333,6 +336,56 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, const
S1TranslateResult *s1,
return data;
}
+static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
+ uint64_t new_val, const S1TranslateResult *s1,
+ ARMMMUFaultInfo *fi)
+{
+ uint64_t cur_val;
+
+ if (unlikely(!s1->hphys)) {
+ fi->type = ARMFault_UnsuppAtomicUpdate;
+ fi->s1ptw = true;
+ return 0;
+ }
+
+#ifndef CONFIG_ATOMIC64
+ /*
+ * We can't support the atomic operation on the host. We should be
+ * running in round-robin mode though, which means that we would only
+ * race with dma i/o.
+ */
+ qemu_mutex_lock_iothread();
+ if (s1->be) {
+ cur_val = ldq_be_p(s1->hphys);
+ if (cur_val == old_val) {
+ stq_be_p(s1->hphys, new_val);
+ }
+ } else {
+ cur_val = ldq_le_p(s1->hphys);
+ if (cur_val == old_val) {
+ stq_le_p(s1->hphys, new_val);
+ }
+ }
+ qemu_mutex_unlock_iothread();
+#else
+ if (s1->be) {
+ old_val = cpu_to_be64(old_val);
+ new_val = cpu_to_be64(new_val);
+ cur_val = qatomic_cmpxchg__nocheck((uint64_t *)s1->hphys,
+ old_val, new_val);
+ cur_val = be64_to_cpu(cur_val);
+ } else {
+ old_val = cpu_to_le64(old_val);
+ new_val = cpu_to_le64(new_val);
+ cur_val = qatomic_cmpxchg__nocheck((uint64_t *)s1->hphys,
+ old_val, new_val);
+ cur_val = le64_to_cpu(cur_val);
+ }
+#endif
+
+ return cur_val;
+}
+
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t *table, uint32_t address)
{
@@ -1240,6 +1293,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t
address,
goto do_fault;
}
+ restart_atomic_update:
if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
goto do_translation_fault;
@@ -1317,8 +1371,26 @@ static bool get_phys_addr_lpae(CPUARMState *env,
uint64_t address,
*/
if ((attrs & (1 << 10)) == 0) {
/* Access flag */
- fi->type = ARMFault_AccessFlag;
- goto do_fault;
+ uint64_t new_des, old_des;
+
+ /*
+ * If HA is disabled, or if the pte is not writable,
+ * pass on the access fault to software.
+ */
+ if (!param.ha || !s1.rw) {
+ fi->type = ARMFault_AccessFlag;
+ goto do_fault;
+ }
+
+ old_des = descriptor;
+ new_des = descriptor | (1 << 10); /* AF */
+ descriptor = arm_casq_ptw(env, old_des, new_des, &s1, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ if (old_des != descriptor) {
+ goto restart_atomic_update;
+ }
}
ap = extract32(attrs, 6, 2);
@@ -1335,8 +1407,43 @@ static bool get_phys_addr_lpae(CPUARMState *env,
uint64_t address,
}
if (!(result->f.prot & (1 << access_type))) {
- fi->type = ARMFault_Permission;
- goto do_fault;
+ uint64_t new_des, old_des;
+
+ /* Writes may set dirty if DBM attribute is set. */
+ if (!param.hd
+ || access_type != MMU_DATA_STORE
+ || !extract64(attrs, 51, 1) /* DBM */
+ || !s1.rw) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ old_des = descriptor;
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ new_des = descriptor | (1ull << 7); /* S2AP[1] */
+ } else {
+ new_des = descriptor & ~(1ull << 7); /* AP[2] */
+ }
+
+ /*
+ * If the descriptor didn't change, then attributes weren't the
+ * reason for the permission fault, so deliver it.
+ */
+ if (old_des == new_des) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ descriptor = arm_casq_ptw(env, old_des, new_des, &s1, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ if (old_des != descriptor) {
+ goto restart_atomic_update;
+ }
+
+ /* Success: the page is now writable. */
+ result->f.prot |= 1 << MMU_DATA_STORE;
}
if (ns) {
--
2.34.1
- [PATCH v2 61/66] target/arm: Add ARMFault_UnsuppAtomicUpdate, (continued)
- [PATCH v2 61/66] target/arm: Add ARMFault_UnsuppAtomicUpdate, Richard Henderson, 2022/08/22
- [PATCH v2 45/66] target/arm: Use probe_access_full for MTE, Richard Henderson, 2022/08/22
- [PATCH v2 60/66] target/arm: Move S1_ptw_translate outside arm_ld[lq]_ptw, Richard Henderson, 2022/08/22
- [PATCH v2 53/66] target/arm: Use bool consistently for get_phys_addr subroutines, Richard Henderson, 2022/08/22
- [PATCH v2 56/66] target/arm: Add isar predicates for FEAT_HAFDBS, Richard Henderson, 2022/08/22
- [PATCH v2 62/66] target/arm: Remove loop from get_phys_addr_lpae, Richard Henderson, 2022/08/22
- [PATCH v2 54/66] target/arm: Only use ARMMMUIdx_Stage1* for two-stage translation, Richard Henderson, 2022/08/22
- [PATCH v2 57/66] target/arm: Extract HA and HD in aa64_va_parameters, Richard Henderson, 2022/08/22
- [PATCH v2 58/66] target/arm: Split out S1TranslateResult type, Richard Henderson, 2022/08/22
- [PATCH v2 65/66] target/arm: Consider GP an attribute in get_phys_addr_lpae, Richard Henderson, 2022/08/22
- [PATCH v2 66/66] target/arm: Implement FEAT_HAFDBS,
Richard Henderson <=
- [PATCH v2 63/66] target/arm: Fix fault reporting in get_phys_addr_lpae, Richard Henderson, 2022/08/22
- [PATCH v2 64/66] target/arm: Don't shift attrs in get_phys_addr_lpae, Richard Henderson, 2022/08/22