[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128
From: |
Richard Henderson |
Subject: |
[PATCH v4 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128 |
Date: |
Sat, 7 Jan 2023 18:36:54 -0800 |
Fill in the parameters for libffi for Int128.
Adjust the interpreter to allow for 16-byte return values.
Adjust tcg_out_call to record the return value length.
Call parameters are no longer all the same size, so we
cannot reuse the same call_slots array for every function.
Compute it each time now, but only fill in slots required
for the call we're about to make.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tci/tcg-target.h | 3 +++
tcg/tcg.c | 19 +++++++++++++++++
tcg/tci.c | 44 ++++++++++++++++++++--------------------
tcg/tci/tcg-target.c.inc | 10 ++++-----
4 files changed, 49 insertions(+), 27 deletions(-)
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 94ec541b4e..9d569c9e04 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -161,10 +161,13 @@ typedef enum {
#if TCG_TARGET_REG_BITS == 32
# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN
# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
#else
# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
#endif
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
#define HAVE_TCG_QEMU_TB_EXEC
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/tcg.c b/tcg/tcg.c
index c032606b21..6f72d4157a 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -549,6 +549,22 @@ static GHashTable *helper_table;
#ifdef CONFIG_TCG_INTERPRETER
static ffi_type *typecode_to_ffi(int argmask)
{
+ /*
+ * libffi does not support __int128_t, so we have forced Int128
+ * to use the structure definition instead of the builtin type.
+ */
+ static ffi_type *ffi_type_i128_elements[3] = {
+ &ffi_type_uint64,
+ &ffi_type_uint64,
+ NULL
+ };
+ static ffi_type ffi_type_i128 = {
+ .size = 16,
+ .alignment = __alignof__(Int128),
+ .type = FFI_TYPE_STRUCT,
+ .elements = ffi_type_i128_elements,
+ };
+
switch (argmask) {
case dh_typecode_void:
return &ffi_type_void;
@@ -562,6 +578,8 @@ static ffi_type *typecode_to_ffi(int argmask)
return &ffi_type_sint64;
case dh_typecode_ptr:
return &ffi_type_pointer;
+ case dh_typecode_i128:
+ return &ffi_type_i128;
}
g_assert_not_reached();
}
@@ -592,6 +610,7 @@ static void init_ffi_layouts(void)
/* Ignoring the return type, find the last non-zero field. */
nargs = 32 - clz32(typemask >> 3);
nargs = DIV_ROUND_UP(nargs, 3);
+ assert(nargs <= MAX_CALL_IARGS);
ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
ca->cif.rtype = typecode_to_ffi(typemask & 7);
diff --git a/tcg/tci.c b/tcg/tci.c
index eeccdde8bc..022fe9d0f8 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -470,12 +470,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
/ sizeof(uint64_t)];
- void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
regs[TCG_AREG0] = (tcg_target_ulong)env;
regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
- /* Other call_slots entries initialized at first use (see below). */
- call_slots[0] = NULL;
tci_assert(tb_ptr);
for (;;) {
@@ -498,26 +495,26 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
switch (opc) {
case INDEX_op_call:
- /*
- * Set up the ffi_avalue array once, delayed until now
- * because many TB's do not make any calls. In tcg_gen_callN,
- * we arranged for every real argument to be "left-aligned"
- * in each 64-bit slot.
- */
- if (unlikely(call_slots[0] == NULL)) {
- for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
- call_slots[i] = &stack[i];
- }
- }
-
- tci_args_nl(insn, tb_ptr, &len, &ptr);
-
- /* Helper functions may need to access the "return address" */
- tci_tb_ptr = (uintptr_t)tb_ptr;
-
{
- void **pptr = ptr;
- ffi_call(pptr[1], pptr[0], stack, call_slots);
+ void *call_slots[MAX_CALL_IARGS];
+ ffi_cif *cif;
+ void *func;
+ unsigned i, s, n;
+
+ tci_args_nl(insn, tb_ptr, &len, &ptr);
+ func = ((void **)ptr)[0];
+ cif = ((void **)ptr)[1];
+
+ n = cif->nargs;
+ for (i = s = 0; i < n; ++i) {
+ ffi_type *t = cif->arg_types[i];
+ call_slots[i] = &stack[s];
+ s += DIV_ROUND_UP(t->size, 8);
+ }
+
+ /* Helper functions may need to access the "return address" */
+ tci_tb_ptr = (uintptr_t)tb_ptr;
+ ffi_call(cif, func, stack, call_slots);
}
switch (len) {
@@ -542,6 +539,9 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
*/
memcpy(®s[TCG_REG_R0], stack, 8);
break;
+ case 3: /* Int128 */
+ memcpy(®s[TCG_REG_R0], stack, 16);
+ break;
default:
g_assert_not_reached();
}
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index cd53cb6b6b..357888a532 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -203,7 +203,7 @@ static const int tcg_target_call_iarg_regs[] = { };
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
{
tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
- tcg_debug_assert(slot >= 0 && slot < 64 / TCG_TARGET_REG_BITS);
+ tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
return TCG_REG_R0 + slot;
}
@@ -573,11 +573,11 @@ static void tcg_out_call(TCGContext *s, const
tcg_insn_unit *func,
if (cif->rtype == &ffi_type_void) {
which = 0;
- } else if (cif->rtype->size == 4) {
- which = 1;
} else {
- tcg_debug_assert(cif->rtype->size == 8);
- which = 2;
+ tcg_debug_assert(cif->rtype->size == 4 ||
+ cif->rtype->size == 8 ||
+ cif->rtype->size == 16);
+ which = ctz32(cif->rtype->size) - 1;
}
new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
insn = deposit32(insn, 0, 8, INDEX_op_call);
--
2.34.1
- [PATCH v4 06/36] tcg: Introduce tcg_target_call_oarg_reg, (continued)
- [PATCH v4 06/36] tcg: Introduce tcg_target_call_oarg_reg, Richard Henderson, 2023/01/07
- [PATCH v4 07/36] tcg: Add TCG_CALL_RET_BY_VEC, Richard Henderson, 2023/01/07
- [PATCH v4 08/36] include/qemu/int128: Use Int128 structure for TCI, Richard Henderson, 2023/01/07
- [PATCH v4 09/36] tcg/i386: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 10/36] tcg/tci: Fix big-endian return register ordering, Richard Henderson, 2023/01/07
- [PATCH v4 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128,
Richard Henderson <=
- [PATCH v4 12/36] tcg: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 13/36] tcg: Add temp allocation for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 14/36] tcg: Add basic data movement for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 15/36] tcg: Add guest load/store primitives for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 16/36] tcg: Add tcg_gen_{non}atomic_cmpxchg_i128, Richard Henderson, 2023/01/07
- [PATCH v4 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64}, Richard Henderson, 2023/01/07
- [PATCH v4 18/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for STXP, Richard Henderson, 2023/01/07
- [PATCH v4 19/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for CASP, Richard Henderson, 2023/01/07