[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v3 10/20] cputlb: Introduce TLB_BSWAP
From: |
David Hildenbrand |
Subject: |
Re: [PATCH v3 10/20] cputlb: Introduce TLB_BSWAP |
Date: |
Mon, 23 Sep 2019 10:33:00 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.8.0 |
On 22.09.19 05:54, Richard Henderson wrote:
> Handle bswap on ram directly in load/store_helper. This fixes a
> bug with the previous implementation in that one cannot use the
> I/O path for RAM.
>
> Fixes: a26fc6f5152b47f1
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> include/exec/cpu-all.h | 4 +-
> accel/tcg/cputlb.c | 108 +++++++++++++++++++++--------------------
> 2 files changed, 59 insertions(+), 53 deletions(-)
>
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 2db73c7a27..1ebd1b59ab 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -346,12 +346,14 @@ CPUArchState *cpu_copy(CPUArchState *env);
> #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
> /* Set if TLB entry contains a watchpoint. */
> #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
> +/* Set if TLB entry requires byte swap. */
> +#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
>
> /* Use this mask to check interception with an alignment mask
> * in a TCG backend.
> */
> #define TLB_FLAGS_MASK \
> - (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
> + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
>
> /**
> * tlb_hit_page: return true if page aligned @addr is a hit against the
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index b4a63d3928..cb603917a2 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -737,8 +737,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong
> vaddr,
> address |= TLB_INVALID_MASK;
> }
> if (attrs.byte_swap) {
> - /* Force the access through the I/O slow path. */
> - address |= TLB_MMIO;
> + address |= TLB_BSWAP;
> }
> if (!memory_region_is_ram(section->mr) &&
> !memory_region_is_romd(section->mr)) {
> @@ -901,10 +900,6 @@ static uint64_t io_readx(CPUArchState *env,
> CPUIOTLBEntry *iotlbentry,
> bool locked = false;
> MemTxResult r;
>
> - if (iotlbentry->attrs.byte_swap) {
> - op ^= MO_BSWAP;
> - }
> -
> section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
> mr = section->mr;
> mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> @@ -947,10 +942,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry
> *iotlbentry,
> bool locked = false;
> MemTxResult r;
>
> - if (iotlbentry->attrs.byte_swap) {
> - op ^= MO_BSWAP;
> - }
> -
> section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
> mr = section->mr;
> mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
> @@ -1133,8 +1124,8 @@ void *probe_access(CPUArchState *env, target_ulong
> addr, int size,
> wp_access, retaddr);
> }
>
> - if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
> - /* I/O access */
> + /* Reject I/O access, or other required slow-path. */
> + if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
> return NULL;
> }
>
> @@ -1311,7 +1302,8 @@ static inline uint64_t wrap_ldul_le(const void *haddr)
> static inline uint64_t QEMU_ALWAYS_INLINE
> load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
> uintptr_t retaddr, MemOp op, bool code_read,
> - FullLoadHelper *full_load, LoadHelper *direct)
> + FullLoadHelper *full_load, LoadHelper *direct,
> + LoadHelper *direct_swap)
> {
> uintptr_t mmu_idx = get_mmuidx(oi);
> uintptr_t index = tlb_index(env, mmu_idx, addr);
> @@ -1361,17 +1353,21 @@ load_helper(CPUArchState *env, target_ulong addr,
> TCGMemOpIdx oi,
> /* On watchpoint hit, this will longjmp out. */
> cpu_check_watchpoint(env_cpu(env), addr, size,
> iotlbentry->attrs, BP_MEM_READ, retaddr);
> -
> - /* The backing page may or may not require I/O. */
> - tlb_addr &= ~TLB_WATCHPOINT;
> - if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
> - goto do_aligned_access;
> - }
> }
>
> /* Handle I/O access. */
> - return io_readx(env, iotlbentry, mmu_idx, addr,
> - retaddr, access_type, op);
> + if (likely(tlb_addr & TLB_MMIO)) {
> + return io_readx(env, iotlbentry, mmu_idx, addr,
> + retaddr, access_type,
> + op ^ (tlb_addr & TLB_BSWAP ? MO_BSWAP : 0));
> + }
> +
> + haddr = (void *)((uintptr_t)addr + entry->addend);
> +
> + if (unlikely(tlb_addr & TLB_BSWAP)) {
> + return direct_swap(haddr);
> + }
> + return direct(haddr);
> }
>
> /* Handle slow unaligned access (it spans two pages or IO). */
> @@ -1398,7 +1394,6 @@ load_helper(CPUArchState *env, target_ulong addr,
> TCGMemOpIdx oi,
> return res & MAKE_64BIT_MASK(0, size * 8);
> }
>
> - do_aligned_access:
> haddr = (void *)((uintptr_t)addr + entry->addend);
> return direct(haddr);
> }
> @@ -1417,7 +1412,7 @@ static uint64_t full_ldub_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_UB, false,
> - full_ldub_mmu, wrap_ldub);
> + full_ldub_mmu, wrap_ldub, wrap_ldub);
> }
>
> tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
> @@ -1430,7 +1425,7 @@ static uint64_t full_le_lduw_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
> - full_le_lduw_mmu, wrap_lduw_le);
> + full_le_lduw_mmu, wrap_lduw_le, wrap_lduw_be);
> }
>
> tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
> @@ -1443,7 +1438,7 @@ static uint64_t full_be_lduw_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
> - full_be_lduw_mmu, wrap_lduw_be);
> + full_be_lduw_mmu, wrap_lduw_be, wrap_lduw_le);
> }
>
> tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
> @@ -1456,7 +1451,7 @@ static uint64_t full_le_ldul_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
> - full_le_ldul_mmu, wrap_ldul_le);
> + full_le_ldul_mmu, wrap_ldul_le, wrap_ldul_be);
> }
>
> tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
> @@ -1469,7 +1464,7 @@ static uint64_t full_be_ldul_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
> - full_be_ldul_mmu, wrap_ldul_be);
> + full_be_ldul_mmu, wrap_ldul_be, wrap_ldul_le);
> }
>
> tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
> @@ -1482,14 +1477,14 @@ uint64_t helper_le_ldq_mmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
> - helper_le_ldq_mmu, ldq_le_p);
> + helper_le_ldq_mmu, ldq_le_p, ldq_be_p);
> }
>
> uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
> - helper_be_ldq_mmu, ldq_be_p);
> + helper_be_ldq_mmu, ldq_be_p, ldq_le_p);
> }
>
> /*
> @@ -1563,7 +1558,7 @@ static inline void wrap_stl_le(void *haddr, uint64_t
> val)
> static inline void QEMU_ALWAYS_INLINE
> store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
> TCGMemOpIdx oi, uintptr_t retaddr, MemOp op,
> - StoreHelper *direct)
> + StoreHelper *direct, StoreHelper *direct_swap)
> {
> uintptr_t mmu_idx = get_mmuidx(oi);
> uintptr_t index = tlb_index(env, mmu_idx, addr);
> @@ -1608,16 +1603,22 @@ store_helper(CPUArchState *env, target_ulong addr,
> uint64_t val,
> /* On watchpoint hit, this will longjmp out. */
> cpu_check_watchpoint(env_cpu(env), addr, size,
> iotlbentry->attrs, BP_MEM_WRITE, retaddr);
> -
> - /* The backing page may or may not require I/O. */
> - tlb_addr &= ~TLB_WATCHPOINT;
> - if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
> - goto do_aligned_access;
> - }
> }
>
> /* Handle I/O access. */
> - io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
> + if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
> + io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
> + op ^ (tlb_addr & TLB_BSWAP ? MO_BSWAP : 0));
> + return;
> + }
> +
> + haddr = (void *)((uintptr_t)addr + entry->addend);
> +
> + if (unlikely(tlb_addr & TLB_BSWAP)) {
> + direct_swap(haddr, val);
> + } else {
> + direct(haddr, val);
> + }
> return;
> }
>
> @@ -1686,7 +1687,6 @@ store_helper(CPUArchState *env, target_ulong addr,
> uint64_t val,
> return;
> }
>
> - do_aligned_access:
> haddr = (void *)((uintptr_t)addr + entry->addend);
> direct(haddr, val);
> }
> @@ -1694,43 +1694,47 @@ store_helper(CPUArchState *env, target_ulong addr,
> uint64_t val,
> void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_UB, wrap_stb);
> + store_helper(env, addr, val, oi, retaddr, MO_UB, wrap_stb, wrap_stb);
> }
>
> void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_LEUW, wrap_stw_le);
> + store_helper(env, addr, val, oi, retaddr, MO_LEUW,
> + wrap_stw_le, wrap_stw_be);
> }
>
> void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_BEUW, wrap_stw_be);
> + store_helper(env, addr, val, oi, retaddr, MO_BEUW,
> + wrap_stw_be, wrap_stw_le);
> }
>
> void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_LEUL, wrap_stl_le);
> + store_helper(env, addr, val, oi, retaddr, MO_LEUL,
> + wrap_stl_le, wrap_stl_be);
> }
>
> void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_BEUL, wrap_stl_be);
> + store_helper(env, addr, val, oi, retaddr, MO_BEUL,
> + wrap_stl_be, wrap_stl_le);
> }
>
> void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_LEQ, stq_le_p);
> + store_helper(env, addr, val, oi, retaddr, MO_LEQ, stq_le_p, stq_be_p);
> }
>
> void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> - store_helper(env, addr, val, oi, retaddr, MO_BEQ, stq_be_p);
> + store_helper(env, addr, val, oi, retaddr, MO_BEQ, stq_be_p, stq_le_p);
> }
>
> /* First set of helpers allows passing in of OI and RETADDR. This makes
> @@ -1796,7 +1800,7 @@ static uint64_t full_ldub_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_8, true,
> - full_ldub_cmmu, wrap_ldub);
> + full_ldub_cmmu, wrap_ldub, wrap_ldub);
> }
>
> uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
> @@ -1809,7 +1813,7 @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
> - full_le_lduw_cmmu, wrap_lduw_le);
> + full_le_lduw_cmmu, wrap_lduw_le, wrap_lduw_be);
> }
>
> uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
> @@ -1822,7 +1826,7 @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
> - full_be_lduw_cmmu, wrap_lduw_be);
> + full_be_lduw_cmmu, wrap_lduw_be, wrap_lduw_le);
> }
>
> uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
> @@ -1835,7 +1839,7 @@ static uint64_t full_le_ldul_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
> - full_le_ldul_cmmu, wrap_ldul_le);
> + full_le_ldul_cmmu, wrap_ldul_le, wrap_ldul_be);
> }
>
> uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
> @@ -1848,7 +1852,7 @@ static uint64_t full_be_ldul_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
> - full_be_ldul_cmmu, wrap_ldul_be);
> + full_be_ldul_cmmu, wrap_ldul_be, wrap_ldul_le);
> }
>
> uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
> @@ -1861,12 +1865,12 @@ uint64_t helper_le_ldq_cmmu(CPUArchState *env,
> target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
> - helper_le_ldq_cmmu, ldq_le_p);
> + helper_le_ldq_cmmu, ldq_le_p, ldq_be_p);
> }
>
> uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
> TCGMemOpIdx oi, uintptr_t retaddr)
> {
> return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
> - helper_be_ldq_cmmu, ldq_be_p);
> + helper_be_ldq_cmmu, ldq_be_p, ldq_le_p);
> }
>
Reviewed-by: David Hildenbrand <address@hidden>
--
Thanks,
David / dhildenb
- [PATCH v3 07/20] exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY, (continued)
- [PATCH v3 07/20] exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY, Richard Henderson, 2019/09/21
- [PATCH v3 08/20] cputlb: Disable __always_inline__ without optimization, Richard Henderson, 2019/09/21
- [PATCH v3 09/20] cputlb: Replace switches in load/store_helper with callback, Richard Henderson, 2019/09/21
- [PATCH v3 11/20] exec: Adjust notdirty tracing, Richard Henderson, 2019/09/21
- [PATCH v3 10/20] cputlb: Introduce TLB_BSWAP, Richard Henderson, 2019/09/21
- Re: [PATCH v3 10/20] cputlb: Introduce TLB_BSWAP,
David Hildenbrand <=
- [PATCH v3 12/20] cputlb: Move ROM handling from I/O path to TLB path, Richard Henderson, 2019/09/21
- [PATCH v3 13/20] cputlb: Move NOTDIRTY handling from I/O path to TLB path, Richard Henderson, 2019/09/21
- [PATCH v3 14/20] cputlb: Partially inline memory_region_section_get_iotlb, Richard Henderson, 2019/09/21
- [PATCH v3 16/20] cputlb: Handle TLB_NOTDIRTY in probe_access, Richard Henderson, 2019/09/21
- [PATCH v3 15/20] cputlb: Merge and move memory_notdirty_write_{prepare, complete}, Richard Henderson, 2019/09/21
- [PATCH v3 17/20] cputlb: Remove cpu->mem_io_vaddr, Richard Henderson, 2019/09/21