[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC 03/14] tcg-aarch64: Allow immediate operands to compar
From: |
Richard Henderson |
Subject: |
[Qemu-devel] [RFC 03/14] tcg-aarch64: Allow immediate operands to compare |
Date: |
Mon, 12 Aug 2013 11:44:44 -0700 |
Signed-off-by: Richard Henderson <address@hidden>
---
tcg/aarch64/tcg-target.c | 74 +++++++++++++++++++++++++++++++++---------------
1 file changed, 51 insertions(+), 23 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index e8370a9..76595b4 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -109,6 +109,7 @@ static inline void patch_reloc(uint8_t *code_ptr, int type,
#define TCG_CT_CONST_S25 0x100
#define TCG_CT_CONST_LI32 0x200
#define TCG_CT_CONST_LI64 0x400
+#define TCG_CT_CONST_CMP 0x800
#include "bitmask-table.h"
@@ -135,6 +136,14 @@ static int find_bitmask32(uint32_t val)
return find_bitmask64(((uint64_t)val << 32) | val);
}
+static int can_cmpi(tcg_target_long val)
+{
+ if (val < 0) {
+ val = ~val;
+ }
+ return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
+}
+
/* parse target specific constraints */
static int target_parse_constraint(TCGArgConstraint *ct,
const char **pct_str)
@@ -161,6 +170,9 @@ static int target_parse_constraint(TCGArgConstraint *ct,
case 'A': /* 25-bit signed, to be added or subtracted. */
ct->ct |= TCG_CT_CONST_S25;
break;
+ case 'C': /* a 12-bit constant (maybe inverted) to be used with compare. */
+ ct->ct |= TCG_CT_CONST_CMP;
+ break;
case 'K': /* logical immediate 32-bit */
ct->ct |= TCG_CT_CONST_LI32;
break;
@@ -193,6 +205,9 @@ static inline int tcg_target_const_match(tcg_target_long
val,
if ((ct & TCG_CT_CONST_LI32) && find_bitmask32(val) >= 0) {
return 1;
}
+ if ((ct & TCG_CT_CONST_CMP) && can_cmpi(val)) {
+ return 1;
+ }
return 0;
}
@@ -247,19 +262,21 @@ enum aarch64_ldst_op_type { /* type of operation */
};
enum aarch64_arith_opc {
- ARITH_AND = 0x0a,
- ARITH_ADD = 0x0b,
- ARITH_ADDI = 0x11,
- ARITH_ANDI = 0x12,
- ARITH_OR = 0x2a,
- ARITH_ADDS = 0x2b,
- ARITH_ORI = 0x32,
- ARITH_XOR = 0x4a,
- ARITH_SUB = 0x4b,
- ARITH_SUBI = 0x51,
- ARITH_XORI = 0x52,
- ARITH_ANDS = 0x6a,
- ARITH_SUBS = 0x6b,
+ ARITH_AND = 0x0a,
+ ARITH_ADD = 0x0b,
+ ARITH_ADDI = 0x11,
+ ARITH_ANDI = 0x12,
+ ARITH_OR = 0x2a,
+ ARITH_ADDS = 0x2b,
+ ARITH_ADDSI = 0x31,
+ ARITH_ORI = 0x32,
+ ARITH_XOR = 0x4a,
+ ARITH_SUB = 0x4b,
+ ARITH_SUBI = 0x51,
+ ARITH_XORI = 0x52,
+ ARITH_ANDS = 0x6a,
+ ARITH_SUBS = 0x6b,
+ ARITH_SUBSI = 0x71,
};
enum aarch64_srr_opc {
@@ -609,11 +626,22 @@ static inline void tcg_out_rotl(TCGContext *s, int ext,
tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
}
-static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm,
- int shift_imm)
+static void tcg_out_cmp(TCGContext *s, int ext, TCGReg a,
+ tcg_target_long b, bool const_b)
{
- /* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
+ if (const_b) {
+ /* Using CMP alias SUBS xzr, Xn, const */
+ enum aarch64_arith_opc opc = ARITH_SUBSI;
+
+ if (b < 0) {
+ b = ~b;
+ opc = ARITH_ADDSI;
+ }
+ tcg_out_aimm(s, opc, ext, TCG_REG_XZR, a, b);
+ } else {
+ /* Using CMP alias SUBS wzr, Wn, Wm */
+ tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, a, b, 0);
+ }
}
static inline void tcg_out_cset(TCGContext *s, int ext, TCGReg rd, TCGCond c)
@@ -1348,14 +1376,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_brcond_i64:
ext = 1; /* fall through */
case INDEX_op_brcond_i32: /* CMP 0, 1, cond(2), label 3 */
- tcg_out_cmp(s, ext, args[0], args[1], 0);
+ tcg_out_cmp(s, ext, args[0], args[1], const_args[1]);
tcg_out_goto_label_cond(s, args[2], args[3]);
break;
case INDEX_op_setcond_i64:
ext = 1; /* fall through */
case INDEX_op_setcond_i32:
- tcg_out_cmp(s, ext, args[1], args[2], 0);
+ tcg_out_cmp(s, ext, args[1], args[2], const_args[2]);
tcg_out_cset(s, 0, args[0], args[3]);
break;
@@ -1494,10 +1522,10 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_rotl_i64, { "r", "r", "ri" } },
{ INDEX_op_rotr_i64, { "r", "r", "ri" } },
- { INDEX_op_brcond_i32, { "r", "r" } },
- { INDEX_op_setcond_i32, { "r", "r", "r" } },
- { INDEX_op_brcond_i64, { "r", "r" } },
- { INDEX_op_setcond_i64, { "r", "r", "r" } },
+ { INDEX_op_brcond_i32, { "r", "rC" } },
+ { INDEX_op_setcond_i32, { "r", "r", "rC" } },
+ { INDEX_op_brcond_i64, { "r", "rC" } },
+ { INDEX_op_setcond_i64, { "r", "r", "rC" } },
{ INDEX_op_qemu_ld8u, { "r", "l" } },
{ INDEX_op_qemu_ld8s, { "r", "l" } },
--
1.8.3.1
- [Qemu-devel] [RFC 00/14] tcg aarch64 improvements, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 01/14] tcg-aarch64: Allow immediate operands to add and sub, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 03/14] tcg-aarch64: Allow immediate operands to compare,
Richard Henderson <=
- [Qemu-devel] [RFC 04/14] tcg-aarch64: Convert from opcode enums to insn enums, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 05/14] tcg-aarch64: Support andc, orc, eqv, not, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 06/14] tcg-aarch64: Handle zero as first argument to sub, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 07/14] tcg-aarch64: Support movcond, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 08/14] tcg-aarch64: Support deposit, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 09/14] tcg-aarch64: Support add2, sub2, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 10/14] tcg-aarch64: Support div, mulu2, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 11/14] tcg-aarch64: Improve tcg_out_movi, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 12/14] tcg-aarch64: Avoid add with zero in tlb load, Richard Henderson, 2013/08/12
- [Qemu-devel] [RFC 13/14] tcg-aarch64: Use adrp in tcg_out_movi, Richard Henderson, 2013/08/12