[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 04/14] tcg/loongarch64: Lower add/sub_vec to vadd/vsub
From: |
Jiajie Chen |
Subject: |
[PATCH v2 04/14] tcg/loongarch64: Lower add/sub_vec to vadd/vsub |
Date: |
Fri, 1 Sep 2023 17:30:57 +0800 |
Lower the following ops:
- add_vec
- sub_vec
Signed-off-by: Jiajie Chen <c@jia.je>
---
tcg/loongarch64/tcg-target-con-set.h | 1 +
tcg/loongarch64/tcg-target.c.inc | 58 ++++++++++++++++++++++++++++
2 files changed, 59 insertions(+)
diff --git a/tcg/loongarch64/tcg-target-con-set.h
b/tcg/loongarch64/tcg-target-con-set.h
index d04916db25..eaa015e813 100644
--- a/tcg/loongarch64/tcg-target-con-set.h
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -31,5 +31,6 @@ C_O1_I2(r, 0, rZ)
C_O1_I2(r, rZ, ri)
C_O1_I2(r, rZ, rJ)
C_O1_I2(r, rZ, rZ)
+C_O1_I2(w, w, wi)
C_O1_I2(w, w, wJ)
C_O1_I4(r, rZ, rJ, rZ, rZ)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 18fe5fc148..555080f2b0 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -1641,6 +1641,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
[TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU,
OPC_VSLTI_DU},
};
LoongArchInsn insn;
+ static const LoongArchInsn add_vec_insn[4] = {
+ OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
+ };
+ static const LoongArchInsn add_vec_imm_insn[4] = {
+ OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
+ };
+ static const LoongArchInsn sub_vec_insn[4] = {
+ OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
+ };
+ static const LoongArchInsn sub_vec_imm_insn[4] = {
+ OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
+ };
a0 = args[0];
a1 = args[1];
@@ -1707,6 +1719,46 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
}
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
break;
+ case INDEX_op_add_vec:
+ if (const_args[2]) {
+ int64_t value = sextract64(a2, 0, 8 << vece);
+ /* Try vaddi/vsubi */
+ if (0 <= value && value <= 0x1f) {
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
+ a1, value));
+ break;
+ } else if (-0x1f <= value && value < 0) {
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
+ a1, -value));
+ break;
+ }
+
+ /* Fallback to dupi + vadd */
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+ a2 = temp_vec;
+ }
+ tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
+ break;
+ case INDEX_op_sub_vec:
+ if (const_args[2]) {
+ int64_t value = sextract64(a2, 0, 8 << vece);
+ /* Try vaddi/vsubi */
+ if (0 <= value && value <= 0x1f) {
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
+ a1, value));
+ break;
+ } else if (-0x1f <= value && value < 0) {
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
+ a1, -value));
+ break;
+ }
+
+ /* Fallback to dupi + vsub */
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
+ a2 = temp_vec;
+ }
+ tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
+ break;
case INDEX_op_dupm_vec:
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
break;
@@ -1723,6 +1775,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type,
unsigned vece)
case INDEX_op_dup_vec:
case INDEX_op_dupm_vec:
case INDEX_op_cmp_vec:
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
return 1;
default:
return 0;
@@ -1887,6 +1941,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode
op)
case INDEX_op_cmp_vec:
return C_O1_I2(w, w, wJ);
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ return C_O1_I2(w, w, wi);
+
default:
g_assert_not_reached();
}
--
2.42.0
[PATCH v2 04/14] tcg/loongarch64: Lower add/sub_vec to vadd/vsub,
Jiajie Chen <=
[PATCH v2 02/14] tcg/loongarch64: Lower basic tcg vec ops to LSX, Jiajie Chen, 2023/09/01
[PATCH v2 05/14] tcg/loongarch64: Lower vector bitwise operations, Jiajie Chen, 2023/09/01
[PATCH v2 06/14] tcg/loongarch64: Lower neg_vec to vneg, Jiajie Chen, 2023/09/01
[PATCH v2 07/14] tcg/loongarch64: Lower mul_vec to vmul, Jiajie Chen, 2023/09/01
[PATCH v2 08/14] tcg/loongarch64: Lower vector min max ops, Jiajie Chen, 2023/09/01
[PATCH v2 09/14] tcg/loongarch64: Lower vector saturated ops, Jiajie Chen, 2023/09/01
[PATCH v2 10/14] tcg/loongarch64: Lower vector shift vector ops, Jiajie Chen, 2023/09/01