[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 26/31] target/arm: Use finalize_memop for aa64 fpr load/store
From: |
Richard Henderson |
Subject: |
[PATCH v5 26/31] target/arm: Use finalize_memop for aa64 fpr load/store |
Date: |
Mon, 19 Apr 2021 13:22:52 -0700 |
For 128-bit load/store, use 16-byte alignment. This
requires that we perform the two operations in the
correct order so that we generate the alignment fault
before modifying memory.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 42 +++++++++++++++++++++++---------------
1 file changed, 26 insertions(+), 16 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index f2995d2b74..b90d6880e7 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -963,25 +963,33 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest,
TCGv_i64 tcg_addr,
static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
{
/* This writes the bottom N bits of a 128 bit wide vector to memory */
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ MemOp mop;
+
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+
if (size < 4) {
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
- s->be_data + size);
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
+ TCGv_i64 tmphi = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
+
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
- s->be_data | MO_Q);
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
- tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
- s->be_data | MO_Q);
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
+
tcg_temp_free_i64(tcg_hiaddr);
+ tcg_temp_free_i64(tmphi);
}
- tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tmplo);
}
/*
@@ -992,10 +1000,11 @@ static void do_fp_ld(DisasContext *s, int destidx,
TCGv_i64 tcg_addr, int size)
/* This always zero-extends and writes to a full 128 bit wide vector */
TCGv_i64 tmplo = tcg_temp_new_i64();
TCGv_i64 tmphi = NULL;
+ MemOp mop;
if (size < 4) {
- MemOp memop = s->be_data + size;
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
} else {
bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr;
@@ -1003,11 +1012,12 @@ static void do_fp_ld(DisasContext *s, int destidx,
TCGv_i64 tcg_addr, int size)
tmphi = tcg_temp_new_i64();
tcg_hiaddr = tcg_temp_new_i64();
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr,
get_mem_index(s),
- s->be_data | MO_Q);
- tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr,
get_mem_index(s),
- s->be_data | MO_Q);
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
tcg_temp_free_i64(tcg_hiaddr);
}
--
2.25.1
- [PATCH v5 17/31] target/arm: Enforce alignment for LDM/STM, (continued)
- [PATCH v5 17/31] target/arm: Enforce alignment for LDM/STM, Richard Henderson, 2021/04/19
- [PATCH v5 16/31] target/arm: Enforce alignment for LDA/LDAH/STL/STLH, Richard Henderson, 2021/04/19
- [PATCH v5 18/31] target/arm: Enforce alignment for RFE, Richard Henderson, 2021/04/19
- [PATCH v5 19/31] target/arm: Enforce alignment for SRS, Richard Henderson, 2021/04/19
- [PATCH v5 20/31] target/arm: Enforce alignment for VLDM/VSTM, Richard Henderson, 2021/04/19
- [PATCH v5 21/31] target/arm: Enforce alignment for VLDR/VSTR, Richard Henderson, 2021/04/19
- [PATCH v5 22/31] target/arm: Enforce alignment for VLDn (all lanes), Richard Henderson, 2021/04/19
- [PATCH v5 24/31] target/arm: Enforce alignment for VLDn/VSTn (single), Richard Henderson, 2021/04/19
- [PATCH v5 23/31] target/arm: Enforce alignment for VLDn/VSTn (multiple), Richard Henderson, 2021/04/19
- [PATCH v5 25/31] target/arm: Use finalize_memop for aa64 gpr load/store, Richard Henderson, 2021/04/19
- [PATCH v5 26/31] target/arm: Use finalize_memop for aa64 fpr load/store,
Richard Henderson <=
- [PATCH v5 27/31] target/arm: Enforce alignment for aa64 load-acq/store-rel, Richard Henderson, 2021/04/19
- [PATCH v5 28/31] target/arm: Use MemOp for size + endian in aa64 vector ld/st, Richard Henderson, 2021/04/19
- [PATCH v5 29/31] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Richard Henderson, 2021/04/19
- [PATCH v5 30/31] target/arm: Enforce alignment for aa64 vector LDn/STn (single), Richard Henderson, 2021/04/19
- [PATCH v5 31/31] target/arm: Enforce alignment for sve LD1R, Richard Henderson, 2021/04/19
- Re: [PATCH v5 00/31] target/arm: enforce alignment, Peter Maydell, 2021/04/20