[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 7/9] accel/tcg: Add TLB_CHECK_ALIGNED
From: |
Richard Henderson |
Subject: |
[PATCH v2 7/9] accel/tcg: Add TLB_CHECK_ALIGNED |
Date: |
Wed, 21 Jun 2023 14:19:00 +0200 |
This creates a per-page method for checking of alignment.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 4 +++-
accel/tcg/cputlb.c | 27 ++++++++++++++++++++++++++-
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 8018ce783e..e61100fc80 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -346,8 +346,10 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
-#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 61f4d94a4d..cb7b4b01e9 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1553,7 +1553,7 @@ static int probe_access_internal(CPUArchState *env,
target_ulong addr,
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
+ if (flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) {
*phost = NULL;
return TLB_MMIO;
}
@@ -1909,6 +1909,31 @@ static bool mmu_lookup(CPUArchState *env, target_ulong
addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ MemOp size = l->memop & MO_SIZE;
+
+ switch (l->memop & MO_ATOM_MASK) {
+ case MO_ATOM_NONE:
+ size = MO_8;
+ break;
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ size = size ? size - 1 : 0;
+ break;
+ default:
+ break;
+ }
+ if (addr & ((1 << size) - 1)) {
+ cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
+ }
+ }
+
return crosspage;
}
--
2.34.1
- [PATCH v2 0/9] {tcg,aarch64}: Add TLB_CHECK_ALIGNED, Richard Henderson, 2023/06/21
- [PATCH v2 1/9] accel/tcg: Store some tlb flags in CPUTLBEntryFull, Richard Henderson, 2023/06/21
- [PATCH v2 4/9] target/arm: Support 32-byte alignment in pow2_align, Richard Henderson, 2023/06/21
- [PATCH v2 2/9] accel/tcg: Move TLB_WATCHPOINT to TLB_SLOW_FLAGS_MASK, Richard Henderson, 2023/06/21
- [PATCH v2 5/9] exec/memattrs: Remove target_tlb_bit*, Richard Henderson, 2023/06/21
- [PATCH v2 9/9] target/arm: Do memory type alignment check when translation enabled, Richard Henderson, 2023/06/21
- [PATCH v2 7/9] accel/tcg: Add TLB_CHECK_ALIGNED,
Richard Henderson <=
- [PATCH v2 8/9] target/arm: Do memory type alignment check when translation disabled, Richard Henderson, 2023/06/21
- [PATCH v2 3/9] accel/tcg: Renumber TLB_DISCARD_WRITE, Richard Henderson, 2023/06/21
- [PATCH v2 6/9] accel/tcg: Add tlb_fill_flags to CPUTLBEntryFull, Richard Henderson, 2023/06/21