[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 23/23] target/arm: Convert load/store tags insns to decodetree
From: |
Peter Maydell |
Subject: |
[PATCH v2 23/23] target/arm: Convert load/store tags insns to decodetree |
Date: |
Sun, 11 Jun 2023 17:00:32 +0100 |
Convert the instructions in the load/store memory tags instruction
group to decodetree.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230602155223.2040685-21-peter.maydell@linaro.org
---
target/arm/tcg/a64.decode | 25 +++
target/arm/tcg/translate-a64.c | 360 ++++++++++++++++-----------------
2 files changed, 199 insertions(+), 186 deletions(-)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 4ffdc91865f..ef64a3f9cba 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -528,3 +528,28 @@ LD_single 0 . 001101 . 1 . ..... 10 . 001 .....
..... @ldst_single_d
# Replicating load case
LD_single_repl 0 q:1 001101 p:1 1 . rm:5 11 . 0 scale:2 rn:5 rt:5
selem=%ldst_single_selem
+
+%tag_offset 12:s9 !function=scale_by_log2_tag_granule
+&ldst_tag rn rt imm p w
+@ldst_tag ........ .. . ......... .. rn:5 rt:5 &ldst_tag imm=%tag_offset
+@ldst_tag_mult ........ .. . 000000000 .. rn:5 rt:5 &ldst_tag imm=0
+
+STZGM 11011001 00 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+STG 11011001 00 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STG 11011001 00 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STG 11011001 00 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+LDG 11011001 01 1 ......... 00 ..... ..... @ldst_tag p=0 w=0
+STZG 11011001 01 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STZG 11011001 01 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STZG 11011001 01 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+STGM 11011001 10 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+ST2G 11011001 10 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+ST2G 11011001 10 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+ST2G 11011001 10 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
+
+LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
+STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
+STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
+STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 6e7fe1f35cf..43963287a8c 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -62,6 +62,12 @@ static int uimm_scaled(DisasContext *s, int x)
return imm << scale;
}
+/* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
+static int scale_by_log2_tag_granule(DisasContext *s, int x)
+{
+ return x << LOG2_TAG_GRANULE;
+}
+
/*
* Include the generated decoders.
*/
@@ -3712,185 +3718,184 @@ static bool trans_LD_single_repl(DisasContext *s,
arg_LD_single_repl *a)
return true;
}
-/*
- * Load/Store memory tags
- *
- * 31 30 29 24 22 21 12 10 5 0
- * +-----+-------------+-----+---+------+-----+------+------+
- * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
- * +-----+-------------+-----+---+------+-----+------+------+
- */
-static void disas_ldst_tag(DisasContext *s, uint32_t insn)
+static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
{
- int rt = extract32(insn, 0, 5);
- int rn = extract32(insn, 5, 5);
- uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
- int op2 = extract32(insn, 10, 2);
- int op1 = extract32(insn, 22, 2);
- bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
- int index = 0;
TCGv_i64 addr, clean_addr, tcg_rt;
+ int size = 4 << s->dcz_blocksize;
- /* We checked insn bits [29:24,21] in the caller. */
- if (extract32(insn, 30, 2) != 3) {
- goto do_unallocated;
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
}
- /*
- * @index is a tri-state variable which has 3 states:
- * < 0 : post-index, writeback
- * = 0 : signed offset
- * > 0 : pre-index, writeback
- */
- switch (op1) {
- case 0:
- if (op2 != 0) {
- /* STG */
- index = op2 - 2;
- } else {
- /* STZGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = is_zero = true;
- }
- break;
- case 1:
- if (op2 != 0) {
- /* STZG */
- is_zero = true;
- index = op2 - 2;
- } else {
- /* LDG */
- is_load = true;
- }
- break;
- case 2:
- if (op2 != 0) {
- /* ST2G */
- is_pair = true;
- index = op2 - 2;
- } else {
- /* STGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = true;
- }
- break;
- case 3:
- if (op2 != 0) {
- /* STZ2G */
- is_pair = is_zero = true;
- index = op2 - 2;
- } else {
- /* LDGM */
- if (s->current_el == 0 || offset != 0) {
- goto do_unallocated;
- }
- is_mult = is_load = true;
- }
- break;
-
- default:
- do_unallocated:
- unallocated_encoding(s);
- return;
- }
-
- if (is_mult
- ? !dc_isar_feature(aa64_mte, s)
- : !dc_isar_feature(aa64_mte_insn_reg, s)) {
- goto do_unallocated;
- }
-
- if (rn == 31) {
+ if (a->rn == 31) {
gen_check_sp_alignment(s);
}
- addr = read_cpu_reg_sp(s, rn, true);
- if (index >= 0) {
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
+ }
+ /*
+ * The non-tags portion of STZGM is mostly like DC_ZVA,
+ * except the alignment happens before the access.
+ */
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_helper_dc_zva(cpu_env, clean_addr);
+ return true;
+}
+
+static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_stgm(cpu_env, addr, tcg_rt);
+ } else {
+ MMUAccessType acc = MMU_DATA_STORE;
+ int size = 4 << GMID_EL1_BS;
+
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_probe_access(s, clean_addr, acc, size);
+ }
+ return true;
+}
+
+static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+ if (s->current_el == 0) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ tcg_rt = cpu_reg(s, a->rt);
+
+ if (s->ata) {
+ gen_helper_ldgm(tcg_rt, cpu_env, addr);
+ } else {
+ MMUAccessType acc = MMU_DATA_LOAD;
+ int size = 4 << GMID_EL1_BS;
+
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_probe_access(s, clean_addr, acc, size);
+ /* The result tags are zeros. */
+ tcg_gen_movi_i64(tcg_rt, 0);
+ }
+ return true;
+}
+
+static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
+{
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
+ return false;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ if (!a->p) {
/* pre-index or signed offset */
- tcg_gen_addi_i64(addr, addr, offset);
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- if (is_mult) {
- tcg_rt = cpu_reg(s, rt);
+ tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
+ tcg_rt = cpu_reg(s, a->rt);
+ if (s->ata) {
+ gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ } else {
+ /*
+ * Tag access disabled: we must check for aborts on the load
+ * load from [rn+offset], and then insert a 0 tag into rt.
+ */
+ clean_addr = clean_data_tbi(s, addr);
+ gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
+ gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
+ }
- if (is_zero) {
- int size = 4 << s->dcz_blocksize;
-
- if (s->ata) {
- gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
- }
- /*
- * The non-tags portion of STZGM is mostly like DC_ZVA,
- * except the alignment happens before the access.
- */
- clean_addr = clean_data_tbi(s, addr);
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
- gen_helper_dc_zva(cpu_env, clean_addr);
- } else if (s->ata) {
- if (is_load) {
- gen_helper_ldgm(tcg_rt, cpu_env, addr);
- } else {
- gen_helper_stgm(cpu_env, addr, tcg_rt);
- }
- } else {
- MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
- int size = 4 << GMID_EL1_BS;
-
- clean_addr = clean_data_tbi(s, addr);
- tcg_gen_andi_i64(clean_addr, clean_addr, -size);
- gen_probe_access(s, clean_addr, acc, size);
-
- if (is_load) {
- /* The result tags are zeros. */
- tcg_gen_movi_i64(tcg_rt, 0);
- }
+ if (a->w) {
+ /* pre-index or post-index */
+ if (a->p) {
+ /* post-index */
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- return;
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
+ }
+ return true;
+}
+
+static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool
is_pair)
+{
+ TCGv_i64 addr, tcg_rt;
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
}
- if (is_load) {
- tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
- tcg_rt = cpu_reg(s, rt);
- if (s->ata) {
- gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ addr = read_cpu_reg_sp(s, a->rn, true);
+ if (!a->p) {
+ /* pre-index or signed offset */
+ tcg_gen_addi_i64(addr, addr, a->imm);
+ }
+ tcg_rt = cpu_reg_sp(s, a->rt);
+ if (!s->ata) {
+ /*
+ * For STG and ST2G, we need to check alignment and probe memory.
+ * TODO: For STZG and STZ2G, we could rely on the stores below,
+ * at least for system mode; user-only won't enforce alignment.
+ */
+ if (is_pair) {
+ gen_helper_st2g_stub(cpu_env, addr);
} else {
- /*
- * Tag access disabled: we must check for aborts on the load
- * load from [rn+offset], and then insert a 0 tag into rt.
- */
- clean_addr = clean_data_tbi(s, addr);
- gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
- gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
+ gen_helper_stg_stub(cpu_env, addr);
+ }
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (is_pair) {
+ gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
+ } else {
+ gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
}
} else {
- tcg_rt = cpu_reg_sp(s, rt);
- if (!s->ata) {
- /*
- * For STG and ST2G, we need to check alignment and probe memory.
- * TODO: For STZG and STZ2G, we could rely on the stores below,
- * at least for system mode; user-only won't enforce alignment.
- */
- if (is_pair) {
- gen_helper_st2g_stub(cpu_env, addr);
- } else {
- gen_helper_stg_stub(cpu_env, addr);
- }
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
- if (is_pair) {
- gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
- } else {
- gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
- }
+ if (is_pair) {
+ gen_helper_st2g(cpu_env, addr, tcg_rt);
} else {
- if (is_pair) {
- gen_helper_st2g(cpu_env, addr, tcg_rt);
- } else {
- gen_helper_stg(cpu_env, addr, tcg_rt);
- }
+ gen_helper_stg(cpu_env, addr, tcg_rt);
}
}
@@ -3911,32 +3916,21 @@ static void disas_ldst_tag(DisasContext *s, uint32_t
insn)
}
}
- if (index != 0) {
+ if (a->w) {
/* pre-index or post-index */
- if (index < 0) {
+ if (a->p) {
/* post-index */
- tcg_gen_addi_i64(addr, addr, offset);
+ tcg_gen_addi_i64(addr, addr, a->imm);
}
- tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
+ tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
}
+ return true;
}
-/* Loads and stores */
-static void disas_ldst(DisasContext *s, uint32_t insn)
-{
- switch (extract32(insn, 24, 6)) {
- case 0x19:
- if (extract32(insn, 21, 1) != 0) {
- disas_ldst_tag(s, insn);
- } else {
- unallocated_encoding(s);
- }
- break;
- default:
- unallocated_encoding(s);
- break;
- }
-}
+TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false)
+TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
+TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
+TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
@@ -13832,12 +13826,6 @@ static bool btype_destination_ok(uint32_t insn, bool
bt, int btype)
static void disas_a64_legacy(DisasContext *s, uint32_t insn)
{
switch (extract32(insn, 25, 4)) {
- case 0x4:
- case 0x6:
- case 0xc:
- case 0xe: /* Loads and stores */
- disas_ldst(s, insn);
- break;
case 0x5:
case 0xd: /* Data processing - register */
disas_data_proc_reg(s, insn);
--
2.34.1
- [PATCH v2 06/23] target/arm: Convert barrier insns to decodetree, (continued)
- [PATCH v2 06/23] target/arm: Convert barrier insns to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 04/23] target/arm: Consistently use finalize_memop_asimd() for ASIMD loads/stores, Peter Maydell, 2023/06/11
- [PATCH v2 10/23] target/arm: Convert exception generation instructions to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 12/23] target/arm: Convert LDXP, STXP, CASP, CAS to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 17/23] target/arm: Convert LDR/STR reg+reg to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 14/23] target/arm: Convert load/store-pair to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 18/23] target/arm: Convert atomic memory ops to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 23/23] target/arm: Convert load/store tags insns to decodetree,
Peter Maydell <=
- [PATCH v2 05/23] target/arm: Convert hint instruction space to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 02/23] target/arm: Return correct result for LDG when ATA=0, Peter Maydell, 2023/06/11
- [PATCH v2 15/23] target/arm: Convert ld/st reg+imm9 insns to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 19/23] target/arm: Convert load (pointer auth) insns to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 21/23] target/arm: Convert load/store (multiple structures) to decodetree, Peter Maydell, 2023/06/11
- [PATCH v2 20/23] target/arm: Convert LDAPR/STLR (imm) to decodetree, Peter Maydell, 2023/06/11