[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v6 12/14] softmmu: Simplify helper_*_st_name, wrap RA
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v6 12/14] softmmu: Simplify helper_*_st_name, wrap RAM code |
Date: |
Mon, 14 Dec 2015 09:41:36 +0100 |
Attempting to simplify the helper_*_st_name, wrap the code relative to a
RAM access into an inline function.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
softmmu_template.h | 110 +++++++++++++++++++++++++++++++++--------------------
1 file changed, 68 insertions(+), 42 deletions(-)
diff --git a/softmmu_template.h b/softmmu_template.h
index 2ebf527..262c95f 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -416,13 +416,46 @@ static inline void glue(helper_le_st_name,
_do_mmio_access)(CPUArchState *env,
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
}
+static inline void glue(helper_le_st_name, _do_ram_access)(CPUArchState *env,
+ DATA_TYPE val,
+ target_ulong addr,
+ TCGMemOpIdx oi,
+ unsigned mmu_idx,
+ int index,
+ uintptr_t retaddr)
+{
+ uintptr_t haddr;
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ glue(helper_le_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
+ retaddr);
+ return;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+#if DATA_SIZE == 1
+ glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
+#else
+ glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
+#endif
+}
+
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -484,28 +517,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
}
}
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- glue(helper_le_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
- retaddr);
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
-#if DATA_SIZE == 1
- glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-#else
- glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-#endif
+ glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index,
+ retaddr);
}
#if DATA_SIZE > 1
@@ -555,13 +568,42 @@ static inline void glue(helper_be_st_name,
_do_mmio_access)(CPUArchState *env,
glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
}
+static inline void glue(helper_be_st_name, _do_ram_access)(CPUArchState *env,
+ DATA_TYPE val,
+ target_ulong addr,
+ TCGMemOpIdx oi,
+ unsigned mmu_idx,
+ int index,
+ uintptr_t retaddr)
+{
+ uintptr_t haddr;
+
+ /* Handle slow unaligned access (it spans two pages or IO). */
+ if (DATA_SIZE > 1
+ && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
+ >= TARGET_PAGE_SIZE)) {
+ glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
+ retaddr);
+ return;
+ }
+
+ /* Handle aligned access or unaligned access in the same page. */
+ if ((addr & (DATA_SIZE - 1)) != 0
+ && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+}
+
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
unsigned mmu_idx = get_mmuidx(oi);
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -623,24 +665,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
}
}
- /* Handle slow unaligned access (it spans two pages or IO). */
- if (DATA_SIZE > 1
- && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
- >= TARGET_PAGE_SIZE)) {
- glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
- retaddr);
- return;
- }
-
- /* Handle aligned access or unaligned access in the same page. */
- if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
-
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
- glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
+ glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi, mmu_idx, index,
+ retaddr);
}
#endif /* DATA_SIZE > 1 */
--
2.6.4
- Re: [Qemu-devel] [RFC v6 06/14] configure: Use slow-path for atomic only when the softmmu is enabled, (continued)
- [Qemu-devel] [RFC v6 04/14] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 05/14] tcg: Create new runtime helpers for excl accesses, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 01/14] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 08/14] target-arm: Add atomic_clear helper for CLREX insn, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 10/14] softmmu: Simplify helper_*_st_name, wrap unaligned code, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 12/14] softmmu: Simplify helper_*_st_name, wrap RAM code,
Alvise Rigo <=
- [Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive accesses, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 14/14] softmmu: Protect MMIO exclusive range, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 11/14] softmmu: Simplify helper_*_st_name, wrap MMIO code, Alvise Rigo, 2015/12/14
- [Qemu-devel] [RFC v6 09/14] softmmu: Add history of excl accesses, Alvise Rigo, 2015/12/14
- Re: [Qemu-devel] [RFC v6 00/14] Slow-path for atomic instruction translation, Paolo Bonzini, 2015/12/14