[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 02/15] accel/tcg: Extract load_helper_unaligned from load_helper
From: |
Richard Henderson |
Subject: |
[PATCH 02/15] accel/tcg: Extract load_helper_unaligned from load_helper |
Date: |
Sat, 19 Jun 2021 10:26:13 -0700 |
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
Replace a goto statement by an inlined function for easier review.
No logical change intended.
Inspired-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-Id: <20210609141010.1066750-2-f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 52 ++++++++++++++++++++++++++++------------------
1 file changed, 32 insertions(+), 20 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index f24348e979..a94de90099 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1851,6 +1851,34 @@ load_memop(const void *haddr, MemOp op)
}
}
+static inline uint64_t QEMU_ALWAYS_INLINE
+load_helper_unaligned(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr, MemOp op, bool code_read,
+ FullLoadHelper *full_load)
+{
+ size_t size = memop_size(op);
+ target_ulong addr1, addr2;
+ uint64_t res;
+ uint64_t r1, r2;
+ unsigned shift;
+
+ addr1 = addr & ~((target_ulong)size - 1);
+ addr2 = addr1 + size;
+ r1 = full_load(env, addr1, oi, retaddr);
+ r2 = full_load(env, addr2, oi, retaddr);
+ shift = (addr & (size - 1)) * 8;
+
+ if (memop_big_endian(op)) {
+ /* Big-endian combine. */
+ res = (r1 << shift) | (r2 >> ((size * 8) - shift));
+ } else {
+ /* Little-endian combine. */
+ res = (r1 >> shift) | (r2 << ((size * 8) - shift));
+ }
+
+ return res & MAKE_64BIT_MASK(0, size * 8);
+}
+
static inline uint64_t QEMU_ALWAYS_INLINE
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
@@ -1866,7 +1894,6 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
- uint64_t res;
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
@@ -1895,7 +1922,8 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
/* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
- goto do_unaligned_access;
+ return load_helper_unaligned(env, addr, oi, retaddr, op,
+ code_read, full_load);
}
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
@@ -1932,24 +1960,8 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
if (size > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
>= TARGET_PAGE_SIZE)) {
- target_ulong addr1, addr2;
- uint64_t r1, r2;
- unsigned shift;
- do_unaligned_access:
- addr1 = addr & ~((target_ulong)size - 1);
- addr2 = addr1 + size;
- r1 = full_load(env, addr1, oi, retaddr);
- r2 = full_load(env, addr2, oi, retaddr);
- shift = (addr & (size - 1)) * 8;
-
- if (memop_big_endian(op)) {
- /* Big-endian combine. */
- res = (r1 << shift) | (r2 >> ((size * 8) - shift));
- } else {
- /* Little-endian combine. */
- res = (r1 >> shift) | (r2 << ((size * 8) - shift));
- }
- return res & MAKE_64BIT_MASK(0, size * 8);
+ return load_helper_unaligned(env, addr, oi, retaddr, op,
+ code_read, full_load);
}
haddr = (void *)((uintptr_t)addr + entry->addend);
--
2.25.1
- [PATCH 00/15] accel/tcg: Fix for #360 and other i/o alignment issues, Richard Henderson, 2021/06/19
- [PATCH 01/15] NOTFORMERGE q800: test case for do_unaligned_access issue, Richard Henderson, 2021/06/19
- [PATCH 04/15] accel/tcg: Don't test for watchpoints for code read, Richard Henderson, 2021/06/19
- [PATCH 05/15] accel/tcg: Handle page span access before i/o access, Richard Henderson, 2021/06/19
- [PATCH 03/15] accel/tcg: Use byte ops for unaligned loads, Richard Henderson, 2021/06/19
- [PATCH 06/15] softmmu/memory: Inline memory_region_dispatch_read1, Richard Henderson, 2021/06/19
- [PATCH 02/15] accel/tcg: Extract load_helper_unaligned from load_helper,
Richard Henderson <=
- [PATCH 08/15] hw/net/e1000e: Fix size of io operations, Richard Henderson, 2021/06/19
- [PATCH 09/15] hw/net/e1000e: Fix impl.min_access_size, Richard Henderson, 2021/06/19
- [PATCH 07/15] softmmu/memory: Simplify access_with_adjusted_size interface, Richard Henderson, 2021/06/19
- [PATCH 11/15] hw/scsi/megasas: Fix megasas_mmio_ops sizes, Richard Henderson, 2021/06/19
- [PATCH 10/15] hw/pci-host/q35: Improve blackhole_ops, Richard Henderson, 2021/06/19
- [PATCH 13/15] softmmu/memory: Disallow short writes, Richard Henderson, 2021/06/19
- [PATCH 12/15] hw/scsi/megasas: Improve megasas_queue_ops min_access_size, Richard Henderson, 2021/06/19