[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 50/57] tcg/mips: Reorg tlb load within prepare_host_addr
From: |
Richard Henderson |
Subject: |
[PATCH v3 50/57] tcg/mips: Reorg tlb load within prepare_host_addr |
Date: |
Mon, 24 Apr 2023 06:40:58 +0100 |
Compare the address vs the tlb entry with sign-extended values.
This simplifies the page+alignment mask constant, and the
generation of the last byte address for the misaligned test.
Move the tlb addend load up, and the zero-extension down.
This frees up a register, which allows us use TMP3 as the returned base
address register instead of A0, which we were using as a 5th temporary.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/mips/tcg-target.c.inc | 38 ++++++++++++++++++--------------------
1 file changed, 18 insertions(+), 20 deletions(-)
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 31d58e1977..695c137023 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -370,6 +370,8 @@ typedef enum {
ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU,
ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32
? OPC_SRL : OPC_DSRL,
+ ALIAS_TADDI = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32
+ ? OPC_ADDIU : OPC_DADDIU,
} MIPSInsn;
/*
@@ -1263,14 +1265,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
int add_off = offsetof(CPUTLBEntry, addend);
int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write);
- target_ulong tlb_mask;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addrlo;
ldst->addrhi_reg = addrhi;
- base = TCG_REG_A0;
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
@@ -1290,15 +1290,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
} else {
- tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
- : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
- TCG_TMP0, TCG_TMP3, cmp_off);
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_TMP0, TCG_TMP3, cmp_off);
}
- /* Zero extend a 32-bit guest address for a 64-bit host. */
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
- tcg_out_ext32u(s, base, addrlo);
- addrlo = base;
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
+ /* Load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
}
/*
@@ -1306,18 +1303,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
* For unaligned accesses, compare against the end of the access to
* verify that it does not cross a page boundary.
*/
- tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
- tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask);
- if (a_mask >= s_mask) {
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
- } else {
- tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrlo, s_mask - a_mask);
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, TARGET_PAGE_MASK | a_mask);
+ if (a_mask < s_mask) {
+ tcg_out_opc_imm(s, ALIAS_TADDI, TCG_TMP2, addrlo, s_mask - a_mask);
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
+ } else {
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
}
- if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
- /* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
+ /* Zero extend a 32-bit guest address for a 64-bit host. */
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
+ tcg_out_ext32u(s, TCG_TMP2, addrlo);
+ addrlo = TCG_TMP2;
}
ldst->label_ptr[0] = s->code_ptr;
@@ -1329,14 +1326,15 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext
*s, HostAddress *h,
tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
/* Load the tlb addend for the fast path. */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
ldst->label_ptr[1] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
}
/* delay slot */
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrlo);
+ base = TCG_TMP3;
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
#else
if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
ldst = new_ldst_label(s);
--
2.34.1
- [PATCH v3 53/57] tcg/ppc: Adjust constraints on qemu_ld/st, (continued)
- [PATCH v3 53/57] tcg/ppc: Adjust constraints on qemu_ld/st, Richard Henderson, 2023/04/24
- [PATCH v3 48/57] tcg/loongarch64: Simplify constraints on qemu_ld/st, Richard Henderson, 2023/04/24
- [PATCH v3 43/57] tcg/loongarch64: Convert tcg_out_qemu_{ld, st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 47/57] tcg/s390x: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 54/57] tcg/ppc: Remove unused constraints A, B, C, D, Richard Henderson, 2023/04/24
- [PATCH v3 46/57] tcg/riscv: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 57/57] tcg/s390x: Simplify constraints on qemu_ld/st, Richard Henderson, 2023/04/24
- [PATCH v3 49/57] tcg/mips: Remove MO_BSWAP handling, Richard Henderson, 2023/04/24
- [PATCH v3 45/57] tcg/ppc: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 51/57] tcg/mips: Simplify constraints on qemu_ld/st, Richard Henderson, 2023/04/24
- [PATCH v3 50/57] tcg/mips: Reorg tlb load within prepare_host_addr,
Richard Henderson <=
- [PATCH v3 56/57] tcg/s390x: Use ALGFR in constructing softmmu host address, Richard Henderson, 2023/04/24
- [PATCH v3 55/57] tcg/riscv: Simplify constraints on qemu_ld/st, Richard Henderson, 2023/04/24
- [PATCH v3 52/57] tcg/ppc: Reorg tcg_out_tlb_read, Richard Henderson, 2023/04/24