[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 25/54] tcg/sparc64: Rename tcg_out_movi_imm13 to tcg_out_movi_
From: |
Richard Henderson |
Subject: |
[PATCH v5 25/54] tcg/sparc64: Rename tcg_out_movi_imm13 to tcg_out_movi_s13 |
Date: |
Mon, 15 May 2023 07:32:44 -0700 |
Emphasize that the constant is signed.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/sparc64/tcg-target.c.inc | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 64464ab363..15d6a9fd73 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -399,7 +399,8 @@ static void tcg_out_sethi(TCGContext *s, TCGReg ret,
uint32_t arg)
tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
}
-static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
+/* A 13-bit constant sign-extended to 64 bits. */
+static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
{
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
}
@@ -408,7 +409,7 @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret,
int32_t arg)
{
if (check_fit_i32(arg, 13)) {
/* A 13-bit constant sign-extended to 64-bits. */
- tcg_out_movi_imm13(s, ret, arg);
+ tcg_out_movi_s13(s, ret, arg);
} else {
/* A 32-bit constant zero-extended to 64 bits. */
tcg_out_sethi(s, ret, arg);
@@ -433,7 +434,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type,
TCGReg ret,
/* A 13-bit constant sign-extended to 64-bits. */
if (check_fit_tl(arg, 13)) {
- tcg_out_movi_imm13(s, ret, arg);
+ tcg_out_movi_s13(s, ret, arg);
return;
}
@@ -767,7 +768,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond
cond, TCGReg ret,
default:
tcg_out_cmp(s, c1, c2, c2const);
- tcg_out_movi_imm13(s, ret, 0);
+ tcg_out_movi_s13(s, ret, 0);
tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
return;
}
@@ -803,11 +804,11 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond
cond, TCGReg ret,
/* For 64-bit signed comparisons vs zero, we can avoid the compare
if the input does not overlap the output. */
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
- tcg_out_movi_imm13(s, ret, 0);
+ tcg_out_movi_s13(s, ret, 0);
tcg_out_movr(s, cond, ret, c1, 1, 1);
} else {
tcg_out_cmp(s, c1, c2, c2const);
- tcg_out_movi_imm13(s, ret, 0);
+ tcg_out_movi_s13(s, ret, 0);
tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
}
}
@@ -844,7 +845,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl,
TCGReg rh,
if (use_vis3_instructions && !is_sub) {
/* Note that ADDXC doesn't accept immediates. */
if (bhconst && bh != 0) {
- tcg_out_movi_imm13(s, TCG_REG_T2, bh);
+ tcg_out_movi_s13(s, TCG_REG_T2, bh);
bh = TCG_REG_T2;
}
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
@@ -866,7 +867,7 @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl,
TCGReg rh,
* so the adjustment fits 12 bits.
*/
if (bhconst) {
- tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
+ tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
} else {
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
is_sub ? ARITH_SUB : ARITH_ADD);
@@ -1036,7 +1037,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
/* delay slot */
- tcg_out_movi_imm13(s, TCG_REG_O0, 0);
+ tcg_out_movi_s13(s, TCG_REG_O0, 0);
build_trampolines(s);
}
@@ -1430,7 +1431,7 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
if (check_fit_ptr(a0, 13)) {
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
- tcg_out_movi_imm13(s, TCG_REG_O0, a0);
+ tcg_out_movi_s13(s, TCG_REG_O0, a0);
return;
} else {
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
--
2.34.1
- Re: [PATCH v5 10/54] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc, (continued)
- [PATCH v5 14/54] accel/tcg: Add have_lse2 support in ldst_atomicity, Richard Henderson, 2023/05/15
- [PATCH v5 16/54] tcg/aarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 18/54] tcg/loongarch64: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 24/54] tcg/sparc64: Allocate %g2 as a third temporary, Richard Henderson, 2023/05/15
- [PATCH v5 15/54] tcg/i386: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 22/54] tcg/mips: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 21/54] tcg/arm: Use full load/store helpers in user-only mode, Richard Henderson, 2023/05/15
- [PATCH v5 25/54] tcg/sparc64: Rename tcg_out_movi_imm13 to tcg_out_movi_s13,
Richard Henderson <=
- [PATCH v5 36/54] tcg: Introduce tcg_out_movext3, Richard Henderson, 2023/05/15
- [PATCH v5 20/54] tcg/arm: Adjust constraints on qemu_ld/st, Richard Henderson, 2023/05/15
- [PATCH v5 27/54] tcg/sparc64: Rename tcg_out_movi_imm32 to tcg_out_movi_u32, Richard Henderson, 2023/05/15
- [PATCH v5 32/54] tcg/loongarch64: Support softmmu unaligned accesses, Richard Henderson, 2023/05/15
- [PATCH v5 26/54] target/sparc64: Remove tcg_out_movi_s13 case from tcg_out_movi_imm32, Richard Henderson, 2023/05/15