[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 13/36] tcg: Add temp allocation for TCGv_i128
From: |
Richard Henderson |
Subject: |
[PATCH v4 13/36] tcg: Add temp allocation for TCGv_i128 |
Date: |
Sat, 7 Jan 2023 18:36:56 -0800 |
This enables allocation of i128. The type is not yet
usable, as we have not yet added data movement ops.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/tcg/tcg.h | 32 +++++++++++++++++++++++++
tcg/tcg.c | 60 +++++++++++++++++++++++++++++++++--------------
2 files changed, 74 insertions(+), 18 deletions(-)
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 7d346192ca..a996da60b5 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -697,6 +697,11 @@ static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
return tcgv_i32_temp((TCGv_i32)v);
}
+static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
{
return tcgv_i32_temp((TCGv_i32)v);
@@ -717,6 +722,11 @@ static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
return temp_arg(tcgv_i64_temp(v));
}
+static inline TCGArg tcgv_i128_arg(TCGv_i128 v)
+{
+ return temp_arg(tcgv_i128_temp(v));
+}
+
static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
{
return temp_arg(tcgv_ptr_temp(v));
@@ -738,6 +748,11 @@ static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
return (TCGv_i64)temp_tcgv_i32(t);
}
+static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t)
+{
+ return (TCGv_i128)temp_tcgv_i32(t);
+}
+
static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
{
return (TCGv_ptr)temp_tcgv_i32(t);
@@ -860,6 +875,11 @@ static inline void tcg_temp_free_i64(TCGv_i64 arg)
tcg_temp_free_internal(tcgv_i64_temp(arg));
}
+static inline void tcg_temp_free_i128(TCGv_i128 arg)
+{
+ tcg_temp_free_internal(tcgv_i128_temp(arg));
+}
+
static inline void tcg_temp_free_ptr(TCGv_ptr arg)
{
tcg_temp_free_internal(tcgv_ptr_temp(arg));
@@ -908,6 +928,18 @@ static inline TCGv_i64 tcg_temp_local_new_i64(void)
return temp_tcgv_i64(t);
}
+static inline TCGv_i128 tcg_temp_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, false);
+ return temp_tcgv_i128(t);
+}
+
+static inline TCGv_i128 tcg_temp_local_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, true);
+ return temp_tcgv_i128(t);
+}
+
static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
const char *name)
{
diff --git a/tcg/tcg.c b/tcg/tcg.c
index e9bb1f329f..2ab012a095 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1251,26 +1251,45 @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool
temp_local)
tcg_debug_assert(ts->base_type == type);
tcg_debug_assert(ts->kind == kind);
} else {
+ int i, n;
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ n = 1;
+ break;
+ case TCG_TYPE_I64:
+ n = 64 / TCG_TARGET_REG_BITS;
+ break;
+ case TCG_TYPE_I128:
+ n = 128 / TCG_TARGET_REG_BITS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
ts = tcg_temp_alloc(s);
- if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
- TCGTemp *ts2 = tcg_temp_alloc(s);
+ ts->base_type = type;
+ ts->temp_allocated = 1;
+ ts->kind = kind;
- ts->base_type = type;
- ts->type = TCG_TYPE_I32;
- ts->temp_allocated = 1;
- ts->kind = kind;
-
- tcg_debug_assert(ts2 == ts + 1);
- ts2->base_type = TCG_TYPE_I64;
- ts2->type = TCG_TYPE_I32;
- ts2->temp_allocated = 1;
- ts2->temp_subindex = 1;
- ts2->kind = kind;
- } else {
- ts->base_type = type;
+ if (n == 1) {
ts->type = type;
- ts->temp_allocated = 1;
- ts->kind = kind;
+ } else {
+ ts->type = TCG_TYPE_REG;
+
+ for (i = 1; i < n; ++i) {
+ TCGTemp *ts2 = tcg_temp_alloc(s);
+
+ tcg_debug_assert(ts2 == ts + i);
+ ts2->base_type = type;
+ ts2->type = TCG_TYPE_REG;
+ ts2->temp_allocated = 1;
+ ts2->temp_subindex = i;
+ ts2->kind = kind;
+ }
}
}
@@ -3359,9 +3378,14 @@ static void temp_allocate_frame(TCGContext *s, TCGTemp
*ts)
case TCG_TYPE_V64:
align = 8;
break;
+ case TCG_TYPE_I128:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
- /* Note that we do not require aligned storage for V256. */
+ /*
+ * Note that we do not require aligned storage for V256,
+ * and that we provide alignment for I128 to match V128,
+ * even if that's above what the host ABI requires.
+ */
align = 16;
break;
default:
--
2.34.1
- Re: [PATCH v4 06/36] tcg: Introduce tcg_target_call_oarg_reg, (continued)
- [PATCH v4 07/36] tcg: Add TCG_CALL_RET_BY_VEC, Richard Henderson, 2023/01/07
- [PATCH v4 08/36] include/qemu/int128: Use Int128 structure for TCI, Richard Henderson, 2023/01/07
- [PATCH v4 09/36] tcg/i386: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 10/36] tcg/tci: Fix big-endian return register ordering, Richard Henderson, 2023/01/07
- [PATCH v4 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 12/36] tcg: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/07
- [PATCH v4 13/36] tcg: Add temp allocation for TCGv_i128,
Richard Henderson <=
- [PATCH v4 14/36] tcg: Add basic data movement for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 15/36] tcg: Add guest load/store primitives for TCGv_i128, Richard Henderson, 2023/01/07
- [PATCH v4 16/36] tcg: Add tcg_gen_{non}atomic_cmpxchg_i128, Richard Henderson, 2023/01/07
- [PATCH v4 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64}, Richard Henderson, 2023/01/07
- [PATCH v4 18/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for STXP, Richard Henderson, 2023/01/07
- [PATCH v4 19/36] target/arm: Use tcg_gen_atomic_cmpxchg_i128 for CASP, Richard Henderson, 2023/01/07
- [PATCH v4 20/36] target/ppc: Use tcg_gen_atomic_cmpxchg_i128 for STQCX, Richard Henderson, 2023/01/07
- [PATCH v4 21/36] tests/tcg/s390x: Add div.c, Richard Henderson, 2023/01/07