[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 08/20] target/arm: Convert load/store exclusive and ordered to de
From: |
Peter Maydell |
Subject: |
[PATCH 08/20] target/arm: Convert load/store exclusive and ordered to decodetree |
Date: |
Fri, 2 Jun 2023 16:52:11 +0100 |
Convert the instructions in the load/store exclusive (STXR,
STLXR, LDXR, LDAXR) and load/store ordered (STLR, STLLR,
LDAR, LDLAR) to decodetree.
Note that for STLR, STLLR, LDAR, LDLAR this fixes an under-decoding
in the legacy decoder where we were not checking that the RES1 bits
in the Rs and Rt2 fields were set.
The new function ldst_iss_sf() is equivalent to the existing
disas_ldst_compute_iss_sf(), but it takes the pre-decoded 'ext' field
rather than taking an undecoded two-bit opc field and extracting
'ext' from it. Once all the loads and stores have been converted
to decodetree disas_ldst_compute_iss_sf() will be unused and
can be deleted.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/tcg/a64.decode | 11 +++
target/arm/tcg/translate-a64.c | 164 ++++++++++++++++++++-------------
2 files changed, 110 insertions(+), 65 deletions(-)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index aba27047b56..b9b32490cef 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -230,3 +230,14 @@ HLT 1101 0100 010 ................ 000 00 @i16
# DCPS1 1101 0100 101 ................ 000 01 @i16
# DCPS2 1101 0100 101 ................ 000 10 @i16
# DCPS3 1101 0100 101 ................ 000 11 @i16
+
+# Loads and stores
+
+&stxr rn rt rt2 rs sz lasr
+&stlr rn rt sz lasr
+@stxr sz:2 ...... ... rs:5 lasr:1 rt2:5 rn:5 rt:5 &stxr
+@stlr sz:2 ...... ... ..... lasr:1 ..... rn:5 rt:5 &stlr
+STXR .. 001000 000 ..... . ..... ..... ..... @stxr # inc STLXR
+LDXR .. 001000 010 ..... . ..... ..... ..... @stxr # inc LDAXR
+STLR .. 001000 100 11111 . 11111 ..... ..... @stlr # inc STLLR
+LDAR .. 001000 110 11111 . 11111 ..... ..... @stlr # inc LDLAR
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 94410f6ece5..2a5c4eea02f 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -2560,6 +2560,102 @@ static bool disas_ldst_compute_iss_sf(int size, bool
is_signed, int opc)
return regsize == 64;
}
+static bool ldst_iss_sf(int size, bool sign, bool ext)
+{
+
+ if (sign) {
+ /*
+ * Signed loads are 64 bit results if we are not going to
+ * do a zero-extend from 32 to 64 after the load.
+ * (For a store, sign and ext are always false.)
+ */
+ return !ext;
+ } else {
+ /* Unsigned loads/stores work at the specified size */
+ return size == MO_64;
+ }
+}
+
+static bool trans_STXR(DisasContext *s, arg_stxr *a)
+{
+ TCGv_i64 clean_addr;
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ if (a->lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+ true, a->rn != 31, a->sz);
+ gen_store_exclusive(s, a->rs, a->rt, a->rt2, clean_addr, a->sz, false);
+ return true;
+}
+
+static bool trans_LDXR(DisasContext *s, arg_stxr *a)
+{
+ TCGv_i64 clean_addr;
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+ false, a->rn != 31, a->sz);
+ s->is_ldex = true;
+ gen_load_exclusive(s, a->rt, a->rt2, clean_addr, a->sz, false);
+ if (a->lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ return true;
+}
+
+static bool trans_STLR(DisasContext *s, arg_stlr *a)
+{
+ TCGv_i64 clean_addr;
+ bool iss_sf = ldst_iss_sf(a->sz, false, false);
+
+ /*
+ * StoreLORelease is the same as Store-Release for QEMU, but
+ * needs the feature-test.
+ */
+ if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
+ return false;
+ }
+ /* Generate ISS for non-exclusive accesses including LASR. */
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+ true, a->rn != 31, a->sz);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, a->sz | MO_ALIGN, true, a->rt,
+ iss_sf, a->lasr);
+ return true;
+}
+
+static bool trans_LDAR(DisasContext *s, arg_stlr *a)
+{
+ TCGv_i64 clean_addr;
+ bool iss_sf = ldst_iss_sf(a->sz, false, false);
+
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
+ if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
+ return false;
+ }
+ /* Generate ISS for non-exclusive accesses including LASR. */
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
+ false, a->rn != 31, a->sz);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, a->sz | MO_ALIGN, false, true,
+ a->rt, iss_sf, a->lasr);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ return true;
+}
+
/* Load/store exclusive
*
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
@@ -2585,71 +2681,6 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
TCGv_i64 clean_addr;
switch (o2_L_o1_o0) {
- case 0x0: /* STXR */
- case 0x1: /* STLXR */
- if (rn == 31) {
- gen_check_sp_alignment(s);
- }
- if (is_lasr) {
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- }
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- true, rn != 31, size);
- gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
- return;
-
- case 0x4: /* LDXR */
- case 0x5: /* LDAXR */
- if (rn == 31) {
- gen_check_sp_alignment(s);
- }
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- false, rn != 31, size);
- s->is_ldex = true;
- gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
- if (is_lasr) {
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
- }
- return;
-
- case 0x8: /* STLLR */
- if (!dc_isar_feature(aa64_lor, s)) {
- break;
- }
- /* StoreLORelease is the same as Store-Release for QEMU. */
- /* fall through */
- case 0x9: /* STLR */
- /* Generate ISS for non-exclusive accesses including LASR. */
- if (rn == 31) {
- gen_check_sp_alignment(s);
- }
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- true, rn != 31, size);
- /* TODO: ARMv8.4-LSE SCTLR.nAA */
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
- return;
-
- case 0xc: /* LDLAR */
- if (!dc_isar_feature(aa64_lor, s)) {
- break;
- }
- /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
- /* fall through */
- case 0xd: /* LDAR */
- /* Generate ISS for non-exclusive accesses including LASR. */
- if (rn == 31) {
- gen_check_sp_alignment(s);
- }
- clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
- false, rn != 31, size);
- /* TODO: ARMv8.4-LSE SCTLR.nAA */
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
- rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
- return;
-
case 0x2: case 0x3: /* CASP / STXP */
if (size & 2) { /* STXP / STLXP */
if (rn == 31) {
@@ -2704,6 +2735,9 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
return;
}
break;
+ default:
+ /* Handled in decodetree */
+ break;
}
unallocated_encoding(s);
}
--
2.34.1
- [PATCH 03/20] target/arm: Convert barrier insns to decodetree, (continued)
- [PATCH 03/20] target/arm: Convert barrier insns to decodetree, Peter Maydell, 2023/06/02
- [PATCH 04/20] target/arm: Convert CFINV, XAFLAG and AXFLAG to decodetree, Peter Maydell, 2023/06/02
- [PATCH 05/20] target/arm: Convert MSR (immediate) to decodetree, Peter Maydell, 2023/06/02
- [PATCH 06/20] target/arm: Convert MSR (reg), MRS, SYS, SYSL to decodetree, Peter Maydell, 2023/06/02
- [PATCH 07/20] target/arm: Convert exception generation instructions to decodetree, Peter Maydell, 2023/06/02
- [PATCH 08/20] target/arm: Convert load/store exclusive and ordered to decodetree,
Peter Maydell <=
- [PATCH 09/20] target/arm: Convert LDXP, STXP, CASP, CAS to decodetree, Peter Maydell, 2023/06/02
- [PATCH 10/20] target/arm: Convert load reg (literal) group to decodetree, Peter Maydell, 2023/06/02
- [PATCH 12/20] target/arm: Convert ld/st reg+imm9 insns to decodetree, Peter Maydell, 2023/06/02
- [PATCH 11/20] target/arm: Convert load/store-pair to decodetree, Peter Maydell, 2023/06/02
- [PATCH 13/20] target/arm: Convert LDR/STR with 12-bit immediate to decodetree, Peter Maydell, 2023/06/02