[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 45/66] target/arm: Use probe_access_full for MTE
From: |
Richard Henderson |
Subject: |
[PATCH v2 45/66] target/arm: Use probe_access_full for MTE |
Date: |
Mon, 22 Aug 2022 08:27:20 -0700 |
The CPUTLBEntryFull structure now stores the original pte attributes, as
well as the physical address. Therefore, we no longer need a separate
bit in MemTxAttrs, nor do we need to walk the tree of memory regions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/cpu.h | 1 -
target/arm/sve_ldst_internal.h | 1 +
target/arm/mte_helper.c | 61 +++++++++-------------------------
target/arm/sve_helper.c | 54 ++++++++++--------------------
target/arm/tlb_helper.c | 4 ---
5 files changed, 35 insertions(+), 86 deletions(-)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a08e546de4..8230a0b141 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3388,7 +3388,6 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs
*x)
* generic target bits directly.
*/
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
-#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
/*
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
diff --git a/target/arm/sve_ldst_internal.h b/target/arm/sve_ldst_internal.h
index b5c473fc48..4f159ec4ad 100644
--- a/target/arm/sve_ldst_internal.h
+++ b/target/arm/sve_ldst_internal.h
@@ -134,6 +134,7 @@ typedef struct {
void *host;
int flags;
MemTxAttrs attrs;
+ bool tagged;
} SVEHostPage;
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index fdd23ab3f8..a81c4a3318 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -105,10 +105,9 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int
ptr_mmu_idx,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
return tags + index;
#else
- uintptr_t index;
CPUTLBEntryFull *full;
+ MemTxAttrs attrs;
int in_page, flags;
- ram_addr_t ptr_ra;
hwaddr ptr_paddr, tag_paddr, xlat;
MemoryRegion *mr;
ARMASIdx tag_asi;
@@ -124,30 +123,12 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int
ptr_mmu_idx,
* valid. Indicate to probe_access_flags no-fault, then assert that
* we received a valid page.
*/
- flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
- ra == 0, &host, ra);
+ flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
+ ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
- /*
- * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
- * because we just found the mapping.
- * TODO: Perhaps there should be a cputlb helper that returns a
- * matching tlb entry + iotlb entry.
- */
- index = tlb_index(env, ptr_mmu_idx, ptr);
-# ifdef CONFIG_DEBUG_TCG
- {
- CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
- target_ulong comparator = (ptr_access == MMU_DATA_LOAD
- ? entry->addr_read
- : tlb_addr_write(entry));
- g_assert(tlb_hit(comparator, ptr));
- }
-# endif
- full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
-
/* If the virtual page MemAttr != Tagged, access unchecked. */
- if (!arm_tlb_mte_tagged(&full->attrs)) {
+ if (full->pte_attrs != 0xf0) {
return NULL;
}
@@ -162,6 +143,13 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int
ptr_mmu_idx,
return NULL;
}
+ /*
+ * Remember these values across the second lookup below,
+ * which may invalidate this pointer via tlb resize.
+ */
+ ptr_paddr = full->phys_addr;
+ attrs = full->attrs;
+
/*
* The Normal memory access can extend to the next page. E.g. a single
* 8-byte access to the last byte of a page will check only the last
@@ -170,9 +158,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int
ptr_mmu_idx,
*/
in_page = -(ptr | TARGET_PAGE_MASK);
if (unlikely(ptr_size > in_page)) {
- void *ignore;
- flags |= probe_access_flags(env, ptr + in_page, ptr_access,
- ptr_mmu_idx, ra == 0, &ignore, ra);
+ flags |= probe_access_full(env, ptr + in_page, ptr_access,
+ ptr_mmu_idx, ra == 0, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
}
@@ -180,33 +167,17 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int
ptr_mmu_idx,
if (unlikely(flags & TLB_WATCHPOINT)) {
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
assert(ra != 0);
- cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
- full->attrs, wp, ra);
+ cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
}
- /*
- * Find the physical address within the normal mem space.
- * The memory region lookup must succeed because TLB_MMIO was
- * not set in the cputlb lookup above.
- */
- mr = memory_region_from_host(host, &ptr_ra);
- tcg_debug_assert(mr != NULL);
- tcg_debug_assert(memory_region_is_ram(mr));
- ptr_paddr = ptr_ra;
- do {
- ptr_paddr += mr->addr;
- mr = mr->container;
- } while (mr);
-
/* Convert to the physical address in tag space. */
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
/* Look up the address in tag space. */
- tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
+ tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
- tag_access == MMU_DATA_STORE,
- full->attrs);
+ tag_access == MMU_DATA_STORE, attrs);
/*
* Note that @mr will never be NULL. If there is nothing in the address
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 9cae8fd352..3d0d2987cd 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -5351,8 +5351,19 @@ bool sve_probe_page(SVEHostPage *info, bool nofault,
CPUARMState *env,
*/
addr = useronly_clean_ptr(addr);
+#ifdef CONFIG_USER_ONLY
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
&info->host, retaddr);
+ memset(&info->attrs, 0, sizeof(info->attrs));
+ /* Require both ANON and MTE; see allocation_tag_mem(). */
+ info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
+#else
+ CPUTLBEntryFull *full;
+ flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
+ &info->host, &full, retaddr);
+ info->attrs = full->attrs;
+ info->tagged = full->pte_attrs == 0xf0;
+#endif
info->flags = flags;
if (flags & TLB_INVALID_MASK) {
@@ -5362,33 +5373,6 @@ bool sve_probe_page(SVEHostPage *info, bool nofault,
CPUARMState *env,
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
info->host -= mem_off;
-
-#ifdef CONFIG_USER_ONLY
- memset(&info->attrs, 0, sizeof(info->attrs));
- /* Require both MAP_ANON and PROT_MTE -- see allocation_tag_mem. */
- arm_tlb_mte_tagged(&info->attrs) =
- (flags & PAGE_ANON) && (flags & PAGE_MTE);
-#else
- /*
- * Find the iotlbentry for addr and return the transaction attributes.
- * This *must* be present in the TLB because we just found the mapping.
- */
- {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
-
-# ifdef CONFIG_DEBUG_TCG
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong comparator = (access_type == MMU_DATA_LOAD
- ? entry->addr_read
- : tlb_addr_write(entry));
- g_assert(tlb_hit(comparator, addr));
-# endif
-
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- info->attrs = full->attrs;
- }
-#endif
-
return true;
}
@@ -5617,7 +5601,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info,
CPUARMState *env,
intptr_t mem_off, reg_off, reg_last;
/* Process the page only if MemAttr == Tagged. */
- if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
+ if (info->page[0].tagged) {
mem_off = info->mem_off_first[0];
reg_off = info->reg_off_first[0];
reg_last = info->reg_off_split;
@@ -5638,7 +5622,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info,
CPUARMState *env,
}
mem_off = info->mem_off_first[1];
- if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
+ if (mem_off >= 0 && info->page[1].tagged) {
reg_off = info->reg_off_first[1];
reg_last = info->reg_off_last[1];
@@ -6017,7 +6001,7 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const
target_ulong addr,
* Disable MTE checking if the Tagged bit is not set. Since TBI must
* be set within MTEDESC for MTE, !mtedesc => !mte_active.
*/
- if (!arm_tlb_mte_tagged(&info.page[0].attrs)) {
+ if (!info.page[0].tagged) {
mtedesc = 0;
}
@@ -6568,7 +6552,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg,
void *vm,
cpu_check_watchpoint(env_cpu(env), addr, msize,
info.attrs, BP_MEM_READ, retaddr);
}
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ if (mtedesc && info.tagged) {
mte_check(env, mtedesc, addr, retaddr);
}
if (unlikely(info.flags & TLB_MMIO)) {
@@ -6585,7 +6569,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg,
void *vm,
msize, info.attrs,
BP_MEM_READ, retaddr);
}
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ if (mtedesc && info.tagged) {
mte_check(env, mtedesc, addr, retaddr);
}
tlb_fn(env, &scratch, reg_off, addr, retaddr);
@@ -6786,9 +6770,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t
*vg, void *vm,
(env_cpu(env), addr, msize) & BP_MEM_READ)) {
goto fault;
}
- if (mtedesc &&
- arm_tlb_mte_tagged(&info.attrs) &&
- !mte_probe(env, mtedesc, addr)) {
+ if (mtedesc && info.tagged && !mte_probe(env, mtedesc, addr)) {
goto fault;
}
@@ -6974,7 +6956,7 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg,
void *vm,
info.attrs, BP_MEM_WRITE, retaddr);
}
- if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ if (mtedesc && info.tagged) {
mte_check(env, mtedesc, addr, retaddr);
}
}
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 353edbeb1d..3462a6ea14 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -231,10 +231,6 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
res.f.phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
}
- /* Notice and record tagged memory. */
- if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
- arm_tlb_mte_tagged(&res.f.attrs) = true;
- }
res.f.pte_attrs = res.cacheattrs.attrs;
res.f.shareability = res.cacheattrs.shareability;
--
2.34.1
- [PATCH v2 41/66] accel/tcg: Introduce tlb_set_page_full, (continued)
- [PATCH v2 41/66] accel/tcg: Introduce tlb_set_page_full, Richard Henderson, 2022/08/22
- [PATCH v2 42/66] target/arm: Use tlb_set_page_full, Richard Henderson, 2022/08/22
- [PATCH v2 51/66] target/arm: Hoist check for disabled stage2 translation., Richard Henderson, 2022/08/22
- [PATCH v2 50/66] target/arm: Use softmmu tlbs for page table walking, Richard Henderson, 2022/08/22
- [PATCH v2 49/66] target/arm: Move ARMMMUIdx_Stage2 to a real tlb mmu_idx, Richard Henderson, 2022/08/22
- [PATCH v2 47/66] include/exec: Remove target_tlb_bitN from MemTxAttrs, Richard Henderson, 2022/08/22
- [PATCH v2 59/66] target/arm: Move be test for regime into S1TranslateResult, Richard Henderson, 2022/08/22
- [PATCH v2 55/66] target/arm: Add ptw_idx argument to S1_ptw_translate, Richard Henderson, 2022/08/22
- [PATCH v2 52/66] target/arm: Split out get_phys_addr_twostage, Richard Henderson, 2022/08/22
- [PATCH v2 61/66] target/arm: Add ARMFault_UnsuppAtomicUpdate, Richard Henderson, 2022/08/22
- [PATCH v2 45/66] target/arm: Use probe_access_full for MTE,
Richard Henderson <=
- [PATCH v2 60/66] target/arm: Move S1_ptw_translate outside arm_ld[lq]_ptw, Richard Henderson, 2022/08/22
- [PATCH v2 53/66] target/arm: Use bool consistently for get_phys_addr subroutines, Richard Henderson, 2022/08/22
- [PATCH v2 56/66] target/arm: Add isar predicates for FEAT_HAFDBS, Richard Henderson, 2022/08/22
- [PATCH v2 62/66] target/arm: Remove loop from get_phys_addr_lpae, Richard Henderson, 2022/08/22
- [PATCH v2 54/66] target/arm: Only use ARMMMUIdx_Stage1* for two-stage translation, Richard Henderson, 2022/08/22
- [PATCH v2 57/66] target/arm: Extract HA and HD in aa64_va_parameters, Richard Henderson, 2022/08/22
- [PATCH v2 58/66] target/arm: Split out S1TranslateResult type, Richard Henderson, 2022/08/22
- [PATCH v2 65/66] target/arm: Consider GP an attribute in get_phys_addr_lpae, Richard Henderson, 2022/08/22
- [PATCH v2 66/66] target/arm: Implement FEAT_HAFDBS, Richard Henderson, 2022/08/22
- [PATCH v2 63/66] target/arm: Fix fault reporting in get_phys_addr_lpae, Richard Henderson, 2022/08/22