[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 35/54] tcg/i386: Convert tcg_out_qemu_ld_slow_path
From: |
Richard Henderson |
Subject: |
[PATCH v4 35/54] tcg/i386: Convert tcg_out_qemu_ld_slow_path |
Date: |
Wed, 3 May 2023 07:57:10 +0100 |
Use tcg_out_ld_helper_args and tcg_out_ld_helper_ret.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/i386/tcg-target.c.inc | 71 +++++++++++++++------------------------
1 file changed, 28 insertions(+), 43 deletions(-)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 8752968af2..17ad3c5963 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1802,13 +1802,37 @@ static void * const qemu_st_helpers[(MO_SIZE |
MO_BSWAP) + 1] = {
[MO_BEUQ] = helper_be_stq_mmu,
};
+/*
+ * Because i686 has no register parameters and because x86_64 has xchg
+ * to handle addr/data register overlap, we have placed all input arguments
+ * before we need might need a scratch reg.
+ *
+ * Even then, a scratch is only needed for l->raddr. Rather than expose
+ * a general-purpose scratch when we don't actually know it's available,
+ * use the ra_gen hook to load into RAX if needed.
+ */
+#if TCG_TARGET_REG_BITS == 64
+static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
+{
+ if (arg < 0) {
+ arg = TCG_REG_RAX;
+ }
+ tcg_out_movi(s, TCG_TYPE_PTR, arg, (uintptr_t)l->raddr);
+ return arg;
+}
+static const TCGLdstHelperParam ldst_helper_param = {
+ .ra_gen = ldst_ra_gen
+};
+#else
+static const TCGLdstHelperParam ldst_helper_param = { };
+#endif
+
/*
* Generate code for the slow path for a load at the end of block
*/
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
- MemOpIdx oi = l->oi;
- MemOp opc = get_memop(oi);
+ MemOp opc = get_memop(l->oi);
tcg_insn_unit **label_ptr = &l->label_ptr[0];
/* resolve label address */
@@ -1817,49 +1841,10 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s,
TCGLabelQemuLdst *l)
tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
}
- if (TCG_TARGET_REG_BITS == 32) {
- int ofs = 0;
-
- tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
- ofs += 4;
-
- if (TARGET_LONG_BITS == 64) {
- tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
- ofs += 4;
- }
-
- tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
- ofs += 4;
-
- tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
- } else {
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
- tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
- l->addrlo_reg);
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
- tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
- (uintptr_t)l->raddr);
- }
-
+ tcg_out_ld_helper_args(s, l, &ldst_helper_param);
tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
+ tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
- if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
- TCGMovExtend ext[2] = {
- { .dst = l->datalo_reg, .dst_type = TCG_TYPE_I32,
- .src = TCG_REG_EAX, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
- { .dst = l->datahi_reg, .dst_type = TCG_TYPE_I32,
- .src = TCG_REG_EDX, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
- };
- tcg_out_movext2(s, &ext[0], &ext[1], -1);
- } else {
- tcg_out_movext(s, l->type, l->datalo_reg,
- TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_EAX);
- }
-
- /* Jump to the code corresponding to next IR of qemu_st */
tcg_out_jmp(s, l->raddr);
return true;
}
--
2.34.1
- [PATCH v4 26/54] tcg/s390x: Introduce HostAddress, (continued)
- [PATCH v4 26/54] tcg/s390x: Introduce HostAddress, Richard Henderson, 2023/05/03
- [PATCH v4 27/54] tcg/s390x: Introduce prepare_host_addr, Richard Henderson, 2023/05/03
- [PATCH v4 28/54] tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return, Richard Henderson, 2023/05/03
- [PATCH v4 34/54] tcg: Add routines for calling slow-path helpers, Richard Henderson, 2023/05/03
- [PATCH v4 24/54] tcg/riscv: Introduce prepare_host_addr, Richard Henderson, 2023/05/03
- [PATCH v4 29/54] tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/05/03
- [PATCH v4 30/54] tcg: Move TCGLabelQemuLdst to tcg.c, Richard Henderson, 2023/05/03
- [PATCH v4 37/54] tcg/aarch64: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 31/54] tcg: Replace REG_P with arg_loc_reg_p, Richard Henderson, 2023/05/03
- [PATCH v4 33/54] tcg: Widen helper_*_st[bw]_mmu val arguments, Richard Henderson, 2023/05/03
- [PATCH v4 35/54] tcg/i386: Convert tcg_out_qemu_ld_slow_path,
Richard Henderson <=
- [PATCH v4 32/54] tcg: Introduce arg_slot_stk_ofs, Richard Henderson, 2023/05/03
- [PATCH v4 38/54] tcg/arm: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 39/54] tcg/loongarch64: Convert tcg_out_qemu_{ld, st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 36/54] tcg/i386: Convert tcg_out_qemu_st_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 42/54] tcg/riscv: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 40/54] tcg/mips: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 43/54] tcg/s390x: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/05/03
- [PATCH v4 45/54] tcg/mips: Remove MO_BSWAP handling, Richard Henderson, 2023/05/03
- [PATCH v4 48/54] tcg/ppc: Reorg tcg_out_tlb_read, Richard Henderson, 2023/05/03
- [PATCH v4 46/54] tcg/mips: Reorg tlb load within prepare_host_addr, Richard Henderson, 2023/05/03