[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 1/2] accel/tcg/cputlb: Extract load_helper_unaligned() from lo
From: |
Philippe Mathieu-Daudé |
Subject: |
[PATCH v2 1/2] accel/tcg/cputlb: Extract load_helper_unaligned() from load_helper() |
Date: |
Wed, 9 Jun 2021 16:10:09 +0200 |
Replace a goto statement by an inlined function for easier review.
No logical change intended.
Inspired-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
---
accel/tcg/cputlb.c | 54 ++++++++++++++++++++++++++++------------------
1 file changed, 33 insertions(+), 21 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f24348e9793..2b5d569412c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1851,6 +1851,34 @@ load_memop(const void *haddr, MemOp op)
}
}
+static inline uint64_t QEMU_ALWAYS_INLINE
+load_helper_unaligned(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr, MemOp op, bool code_read,
+ FullLoadHelper *full_load)
+{
+ size_t size = memop_size(op);
+ target_ulong addr1, addr2;
+ uint64_t res;
+ uint64_t r1, r2;
+ unsigned shift;
+
+ addr1 = addr & ~((target_ulong)size - 1);
+ addr2 = addr1 + size;
+ r1 = full_load(env, addr1, oi, retaddr);
+ r2 = full_load(env, addr2, oi, retaddr);
+ shift = (addr & (size - 1)) * 8;
+
+ if (memop_big_endian(op)) {
+ /* Big-endian combine. */
+ res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ } else {
+ /* Little-endian combine. */
+ res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ }
+
+ return res & MAKE_64BIT_MASK(0, size * 8);
+}
+
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
@@ -1866,7 +1894,6 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
- uint64_t res;
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
@@ -1893,9 +1920,10 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
CPUIOTLBEntry *iotlbentry;
bool need_swap;
- /* For anything that is unaligned, recurse through full_load. */
+ /* For anything that is unaligned, recurse through byte loads. */
if ((addr & (size - 1)) != 0) {
- goto do_unaligned_access;
+ return load_helper_unaligned(env, addr, oi, retaddr, op,
+ code_read, full_load);
}
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
@@ -1932,24 +1960,8 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
if (size > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
>= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- uint64_t r1, r2;
- unsigned shift;
- do_unaligned_access:
- addr1 = addr & ~((target_ulong)size - 1);
- addr2 = addr1 + size;
- r1 = full_load(env, addr1, oi, retaddr);
- r2 = full_load(env, addr2, oi, retaddr);
- shift = (addr & (size - 1)) * 8;
-
- if (memop_big_endian(op)) {
- /* Big-endian combine. */
- res = (r1 << shift) | (r2 >> ((size * 8) - shift));
- } else {
- /* Little-endian combine. */
- res = (r1 >> shift) | (r2 << ((size * 8) - shift));
- }
- return res & MAKE_64BIT_MASK(0, size * 8);
+ return load_helper_unaligned(env, addr, oi, retaddr, op,
+ code_read, full_load);
}
haddr = (void *)((uintptr_t)addr + entry->addend);
--
2.31.1